aboutsummaryrefslogtreecommitdiff
path: root/src/garage/cli_v2/cluster.rs
diff options
context:
space:
mode:
authorAlex Auvolat <lx@deuxfleurs.fr>2025-01-30 16:40:07 +0100
committerAlex Auvolat <lx@deuxfleurs.fr>2025-01-30 16:45:59 +0100
commit5a89350b382f9a24d4e81b056f88dc16a5daa080 (patch)
treebfea9c13130481a3463f8bba2c2fe85fe3be6cd4 /src/garage/cli_v2/cluster.rs
parent3caea5fc06a36b9e2f446c263b29948de431f30f (diff)
downloadgarage-5a89350b382f9a24d4e81b056f88dc16a5daa080.tar.gz
garage-5a89350b382f9a24d4e81b056f88dc16a5daa080.zip
cli_v2: fix garage status
Diffstat (limited to 'src/garage/cli_v2/cluster.rs')
-rw-r--r--src/garage/cli_v2/cluster.rs96
1 files changed, 41 insertions, 55 deletions
diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs
index e6ba2428..34a28674 100644
--- a/src/garage/cli_v2/cluster.rs
+++ b/src/garage/cli_v2/cluster.rs
@@ -12,11 +12,12 @@ impl Cli {
pub async fn cmd_status(&self) -> Result<(), Error> {
let status = self.api_request(GetClusterStatusRequest).await?;
let layout = self.api_request(GetClusterLayoutRequest).await?;
- // TODO: layout history
println!("==== HEALTHY NODES ====");
+
let mut healthy_nodes =
vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()];
+
for adv in status.nodes.iter().filter(|adv| adv.is_up) {
let host = adv.hostname.as_deref().unwrap_or("?");
let addr = match adv.addr {
@@ -43,78 +44,43 @@ impl Cli {
capacity = capacity_string(cfg.capacity),
data_avail = data_avail,
));
- } else if adv.draining {
- healthy_nodes.push(format!(
- "{id:.16}\t{host}\t{addr}\t\t\tdraining metadata...",
- id = adv.id,
- host = host,
- addr = addr,
- ));
} else {
- let new_role = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
- Some(_) => "pending...",
+ let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
+ Some(NodeRoleChange {
+ action: NodeRoleChangeEnum::Update { .. },
+ ..
+ }) => "pending...",
+ _ if adv.draining => "draining metadata..",
_ => "NO ROLE ASSIGNED",
};
healthy_nodes.push(format!(
- "{id:.16}\t{h}\t{addr}\t\t\t{new_role}",
+ "{id:.16}\t{h}\t{addr}\t\t\t{status}",
id = adv.id,
h = host,
addr = addr,
- new_role = new_role,
+ status = status,
));
}
}
format_table(healthy_nodes);
- // Determine which nodes are unhealthy and print that to stdout
- // TODO: do we need this, or can it be done in the GetClusterStatus handler?
- let status_map = status
- .nodes
- .iter()
- .map(|adv| (&adv.id, adv))
- .collect::<HashMap<_, _>>();
-
let tf = timeago::Formatter::new();
let mut drain_msg = false;
let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()];
- let mut listed = HashSet::new();
- //for ver in layout.versions.iter().rev() {
- for ver in [&layout].iter() {
- for cfg in ver.roles.iter() {
- let node = &cfg.id;
- if listed.contains(node.as_str()) {
- continue;
- }
- listed.insert(node.as_str());
-
- let adv = status_map.get(node);
- if adv.map(|x| x.is_up).unwrap_or(false) {
- continue;
- }
-
- // Node is in a layout version, is not a gateway node, and is not up:
- // it is in a failed state, add proper line to the output
- let (host, last_seen) = match adv {
- Some(adv) => (
- adv.hostname.as_deref().unwrap_or("?"),
- adv.last_seen_secs_ago
- .map(|s| tf.convert(Duration::from_secs(s)))
- .unwrap_or_else(|| "never seen".into()),
- ),
- None => ("??", "never seen".into()),
- };
- /*
- let capacity = if ver.version == layout.current().version {
- cfg.capacity_string()
- } else {
- drain_msg = true;
- "draining metadata...".to_string()
- };
- */
+ for adv in status.nodes.iter().filter(|x| !x.is_up) {
+ let node = &adv.id;
+
+ let host = adv.hostname.as_deref().unwrap_or("?");
+ let last_seen = adv
+ .last_seen_secs_ago
+ .map(|s| tf.convert(Duration::from_secs(s)))
+ .unwrap_or_else(|| "never seen".into());
+
+ if let Some(cfg) = &adv.role {
let capacity = capacity_string(cfg.capacity);
failed_nodes.push(format!(
- "{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
+ "{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}",
id = node,
host = host,
tags = cfg.tags.join(","),
@@ -122,6 +88,26 @@ impl Cli {
capacity = capacity,
last_seen = last_seen,
));
+ } else {
+ let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) {
+ Some(NodeRoleChange {
+ action: NodeRoleChangeEnum::Update { .. },
+ ..
+ }) => "pending...",
+ _ if adv.draining => {
+ drain_msg = true;
+ "draining metadata.."
+ }
+ _ => unreachable!(),
+ };
+
+ failed_nodes.push(format!(
+ "{id:.16}\t{host}\t\t\t{status}\t{last_seen}",
+ id = node,
+ host = host,
+ status = status,
+ last_seen = last_seen,
+ ));
}
}