aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2024-02-20 17:08:31 +0100
committerAlex Auvolat <alex@adnab.me>2024-02-20 17:08:31 +0100
commit75e591727d9cfda0133200604872a38419c178a1 (patch)
tree8cd321f0114d1c235a65ffe2ac418c8a29114b27
parent643d1aabd8f229545991217faebd09445aa9b523 (diff)
downloadgarage-75e591727d9cfda0133200604872a38419c178a1.tar.gz
garage-75e591727d9cfda0133200604872a38419c178a1.zip
[next-0.10] cluster node status metrics: report nodes of all active layout versions
-rw-r--r--src/rpc/system_metrics.rs68
1 files changed, 34 insertions, 34 deletions
diff --git a/src/rpc/system_metrics.rs b/src/rpc/system_metrics.rs
index fb3c983c..0bb55bf3 100644
--- a/src/rpc/system_metrics.rs
+++ b/src/rpc/system_metrics.rs
@@ -216,12 +216,12 @@ impl SystemMetrics {
.u64_value_observer("cluster_layout_node_connected", move |observer| {
let layout = system.cluster_layout();
let nodes = system.get_known_nodes();
- for (id, _, config) in layout.current().roles.items().iter() {
- if let Some(role) = &config.0 {
- let mut kv = vec![
- KeyValue::new("id", format!("{:?}", id)),
- KeyValue::new("role_zone", role.zone.clone()),
- ];
+ for id in layout.all_nodes().iter() {
+ let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
+ if let Some(role) =
+ layout.current().roles.get(id).and_then(|r| r.0.as_ref())
+ {
+ kv.push(KeyValue::new("role_zone", role.zone.clone()));
match role.capacity {
Some(cap) => {
kv.push(KeyValue::new("role_capacity", cap as i64));
@@ -231,24 +231,24 @@ impl SystemMetrics {
kv.push(KeyValue::new("role_gateway", 1));
}
}
+ }
- let value;
- if let Some(node) = nodes.iter().find(|n| n.id == *id) {
- value = if node.is_up { 1 } else { 0 };
+ let value;
+ if let Some(node) = nodes.iter().find(|n| n.id == *id) {
// TODO: if we add address and hostname, and those change, we
// get duplicate metrics, due to bad otel aggregation :(
// Can probably be fixed when we upgrade opentelemetry
// kv.push(KeyValue::new("address", node.addr.to_string()));
// kv.push(KeyValue::new(
- // "hostname",
- // node.status.hostname.clone(),
+ // "hostname",
+ // node.status.hostname.clone(),
// ));
- } else {
- value = 0;
- }
-
- observer.observe(value, &kv);
+ value = if node.is_up { 1 } else { 0 };
+ } else {
+ value = 0;
}
+
+ observer.observe(value, &kv);
}
})
.with_description("Connection status for nodes in the cluster layout")
@@ -260,12 +260,12 @@ impl SystemMetrics {
.u64_value_observer("cluster_layout_node_disconnected_time", move |observer| {
let layout = system.cluster_layout();
let nodes = system.get_known_nodes();
- for (id, _, config) in layout.current().roles.items().iter() {
- if let Some(role) = &config.0 {
- let mut kv = vec![
- KeyValue::new("id", format!("{:?}", id)),
- KeyValue::new("role_zone", role.zone.clone()),
- ];
+ for id in layout.all_nodes().iter() {
+ let mut kv = vec![KeyValue::new("id", format!("{:?}", id))];
+ if let Some(role) =
+ layout.current().roles.get(id).and_then(|r| r.0.as_ref())
+ {
+ kv.push(KeyValue::new("role_zone", role.zone.clone()));
match role.capacity {
Some(cap) => {
kv.push(KeyValue::new("role_capacity", cap as i64));
@@ -275,19 +275,19 @@ impl SystemMetrics {
kv.push(KeyValue::new("role_gateway", 1));
}
}
+ }
- if let Some(node) = nodes.iter().find(|n| n.id == *id) {
- // TODO: see comment above
- // kv.push(KeyValue::new("address", node.addr.to_string()));
- // kv.push(KeyValue::new(
- // "hostname",
- // node.status.hostname.clone(),
- // ));
- if node.is_up {
- observer.observe(0, &kv);
- } else if let Some(secs) = node.last_seen_secs_ago {
- observer.observe(secs, &kv);
- }
+ if let Some(node) = nodes.iter().find(|n| n.id == *id) {
+ // TODO: see comment above
+ // kv.push(KeyValue::new("address", node.addr.to_string()));
+ // kv.push(KeyValue::new(
+ // "hostname",
+ // node.status.hostname.clone(),
+ // ));
+ if node.is_up {
+ observer.observe(0, &kv);
+ } else if let Some(secs) = node.last_seen_secs_ago {
+ observer.observe(secs, &kv);
}
}
}