From 145130481eac30793c6c08caa4d208ddddfc30e8 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 10:44:08 +0100 Subject: wip: proxy admin api requests through admin rpc, prepare new cli --- src/garage/cli_v2/mod.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 src/garage/cli_v2/mod.rs (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs new file mode 100644 index 00000000..6cf068c6 --- /dev/null +++ b/src/garage/cli_v2/mod.rs @@ -0,0 +1,63 @@ +use std::collections::{HashMap, HashSet}; +use std::convert::TryFrom; +use std::sync::Arc; +use std::time::Duration; + +use format_table::format_table; +use garage_util::error::*; + +use garage_rpc::layout::*; +use garage_rpc::system::*; +use garage_rpc::*; + +use garage_api::admin::api::*; +use garage_api::admin::EndpointHandler as AdminApiEndpoint; + +use crate::admin::*; +use crate::cli::*; + +pub struct Cli { + pub system_rpc_endpoint: Arc>, + pub admin_rpc_endpoint: Arc>, + pub rpc_host: NodeID, +} + +impl Cli { + pub async fn handle(&self, cmd: Command) -> Result<(), Error> { + println!("{:?}", self.api_request(GetClusterStatusRequest).await?); + Ok(()) + /* + match cmd { + _ => todo!(), + } + */ + } + + pub async fn api_request(&self, req: T) -> Result<::Response, Error> + where + T: AdminApiEndpoint, + AdminApiRequest: From, + ::Response: TryFrom, + { + let req = AdminApiRequest::from(req); + let req_name = req.name(); + match self + .admin_rpc_endpoint + .call(&self.rpc_host, AdminRpc::ApiRequest(req), PRIO_NORMAL) + .await? + .ok_or_message("xoxo")? + { + AdminRpc::ApiOkResponse(resp) => ::Response::try_from(resp) + .map_err(|_| Error::Message(format!("{} returned unexpected response", req_name))), + AdminRpc::ApiErrorResponse { + http_code, + error_code, + message, + } => Err(Error::Message(format!( + "{} returned {} ({}): {}", + req_name, error_code, http_code, message + ))), + m => Err(Error::unexpected_rpc_message(m)), + } + } +} -- cgit v1.2.3 From 69ddaafc6061d06d277fe772dfaa7fe64ecafcc1 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 12:07:12 +0100 Subject: wip: migrate garage status and garage layout assign --- src/garage/cli_v2/cluster.rs | 188 +++++++++++++++++++++++++++++++++++++++++++ src/garage/cli_v2/layout.rs | 119 +++++++++++++++++++++++++++ src/garage/cli_v2/mod.rs | 72 +++++++++++++++-- src/garage/cli_v2/util.rs | 115 ++++++++++++++++++++++++++ 4 files changed, 486 insertions(+), 8 deletions(-) create mode 100644 src/garage/cli_v2/cluster.rs create mode 100644 src/garage/cli_v2/layout.rs create mode 100644 src/garage/cli_v2/util.rs (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs new file mode 100644 index 00000000..0b5b9559 --- /dev/null +++ b/src/garage/cli_v2/cluster.rs @@ -0,0 +1,188 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::util::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_status(&self) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + // TODO: layout history + + println!("==== HEALTHY NODES ===="); + let mut healthy_nodes = + vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; + for adv in status.nodes.iter().filter(|adv| adv.is_up) { + let host = adv.hostname.as_deref().unwrap_or("?"); + let addr = match adv.addr { + Some(addr) => addr.to_string(), + None => "N/A".to_string(), + }; + if let Some(cfg) = &adv.role { + let data_avail = match &adv.data_partition { + _ if cfg.capacity.is_none() => "N/A".into(), + Some(FreeSpaceResp { available, total }) => { + let pct = (*available as f64) / (*total as f64) * 100.; + let avail_str = bytesize::ByteSize::b(*available); + format!("{} ({:.1}%)", avail_str, pct) + } + None => "?".into(), + }; + healthy_nodes.push(format!( + "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity_string(cfg.capacity), + data_avail = data_avail, + )); + } else { + /* + let prev_role = layout + .versions + .iter() + .rev() + .find_map(|x| match x.roles.get(&adv.id) { + Some(NodeRoleV(Some(cfg))) => Some(cfg), + _ => None, + }); + */ + let prev_role = Option::::None; //TODO + if let Some(cfg) = prev_role { + healthy_nodes.push(format!( + "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + )); + } else { + let new_role = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) + { + Some(_) => "pending...", + _ => "NO ROLE ASSIGNED", + }; + healthy_nodes.push(format!( + "{id:?}\t{h}\t{addr}\t\t\t{new_role}", + id = adv.id, + h = host, + addr = addr, + new_role = new_role, + )); + } + } + } + format_table(healthy_nodes); + + // Determine which nodes are unhealthy and print that to stdout + // TODO: do we need this, or can it be done in the GetClusterStatus handler? + let status_map = status + .nodes + .iter() + .map(|adv| (&adv.id, adv)) + .collect::>(); + + let tf = timeago::Formatter::new(); + let mut drain_msg = false; + let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; + let mut listed = HashSet::new(); + //for ver in layout.versions.iter().rev() { + for ver in [&layout].iter() { + for cfg in ver.roles.iter() { + let node = &cfg.id; + if listed.contains(node.as_str()) { + continue; + } + listed.insert(node.as_str()); + + let adv = status_map.get(node); + if adv.map(|x| x.is_up).unwrap_or(false) { + continue; + } + + // Node is in a layout version, is not a gateway node, and is not up: + // it is in a failed state, add proper line to the output + let (host, last_seen) = match adv { + Some(adv) => ( + adv.hostname.as_deref().unwrap_or("?"), + adv.last_seen_secs_ago + .map(|s| tf.convert(Duration::from_secs(s))) + .unwrap_or_else(|| "never seen".into()), + ), + None => ("??", "never seen".into()), + }; + /* + let capacity = if ver.version == layout.current().version { + cfg.capacity_string() + } else { + drain_msg = true; + "draining metadata...".to_string() + }; + */ + let capacity = capacity_string(cfg.capacity); + + failed_nodes.push(format!( + "{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", + id = node, + host = host, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity, + last_seen = last_seen, + )); + } + } + + if failed_nodes.len() > 1 { + println!("\n==== FAILED NODES ===="); + format_table(failed_nodes); + if drain_msg { + println!(); + println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); + println!( + "If these nodes are definitely dead, please review the layout history with" + ); + println!( + "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." + ); + } + } + + if print_staging_role_changes(&layout) { + println!(); + println!( + "Please use `garage layout show` to check the proposed new layout and apply it." + ); + println!(); + } + + Ok(()) + } + + pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> { + let res = self + .api_request(ConnectClusterNodesRequest(vec![opt.node])) + .await?; + if res.0.len() != 1 { + return Err(Error::Message(format!("unexpected response: {:?}", res))); + } + let res = res.0.into_iter().next().unwrap(); + if res.success { + println!("Success."); + Ok(()) + } else { + Err(Error::Message(format!( + "Failure: {}", + res.error.unwrap_or_default() + ))) + } + } +} diff --git a/src/garage/cli_v2/layout.rs b/src/garage/cli_v2/layout.rs new file mode 100644 index 00000000..ccd1886f --- /dev/null +++ b/src/garage/cli_v2/layout.rs @@ -0,0 +1,119 @@ +use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::layout as cli_v1; +use crate::cli::structs::*; +use crate::cli_v2::util::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> { + match cmd { + LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await, + + // TODO + LayoutOperation::Remove(remove_opt) => { + cli_v1::cmd_remove_role(&self.system_rpc_endpoint, self.rpc_host, remove_opt).await + } + LayoutOperation::Show => { + cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::Apply(apply_opt) => { + cli_v1::cmd_apply_layout(&self.system_rpc_endpoint, self.rpc_host, apply_opt).await + } + LayoutOperation::Revert(revert_opt) => { + cli_v1::cmd_revert_layout(&self.system_rpc_endpoint, self.rpc_host, revert_opt) + .await + } + LayoutOperation::Config(config_opt) => { + cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt) + .await + } + LayoutOperation::History => { + cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::SkipDeadNodes(assume_sync_opt) => { + cli_v1::cmd_layout_skip_dead_nodes( + &self.system_rpc_endpoint, + self.rpc_host, + assume_sync_opt, + ) + .await + } + } + } + + pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let mut actions = vec![]; + + for node in opt.replace.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }); + } + + for node in opt.node_ids.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + let current = get_staged_or_current_role(&id, &layout); + + let zone = opt + .zone + .clone() + .or_else(|| current.as_ref().map(|c| c.zone.clone())) + .ok_or_message("Please specify a zone with the -z flag")?; + + let capacity = if opt.gateway { + if opt.capacity.is_some() { + return Err(Error::Message("Please specify only -c or -g".into())); + } + None + } else if let Some(cap) = opt.capacity { + Some(cap.as_u64()) + } else { + current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity + }; + + let tags = if !opt.tags.is_empty() { + opt.tags.clone() + } else if let Some(cur) = current.as_ref() { + cur.tags.clone() + } else { + vec![] + }; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + }, + }); + } + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role changes are staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index 6cf068c6..2fe45e29 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -1,12 +1,15 @@ +pub mod util; + +pub mod cluster; +pub mod layout; + use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; -use format_table::format_table; use garage_util::error::*; -use garage_rpc::layout::*; use garage_rpc::system::*; use garage_rpc::*; @@ -14,7 +17,9 @@ use garage_api::admin::api::*; use garage_api::admin::EndpointHandler as AdminApiEndpoint; use crate::admin::*; -use crate::cli::*; +use crate::cli as cli_v1; +use crate::cli::structs::*; +use crate::cli::Command; pub struct Cli { pub system_rpc_endpoint: Arc>, @@ -24,13 +29,64 @@ pub struct Cli { impl Cli { pub async fn handle(&self, cmd: Command) -> Result<(), Error> { - println!("{:?}", self.api_request(GetClusterStatusRequest).await?); - Ok(()) - /* match cmd { - _ => todo!(), + Command::Status => self.cmd_status().await, + Command::Node(NodeOperation::Connect(connect_opt)) => { + self.cmd_connect(connect_opt).await + } + Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, + + // TODO + Command::Bucket(bo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::BucketOperation(bo), + ) + .await + .ok_or_message("xoxo"), + Command::Key(ko) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::KeyOperation(ko), + ) + .await + .ok_or_message("xoxo"), + Command::Repair(ro) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::LaunchRepair(ro), + ) + .await + .ok_or_message("xoxo"), + Command::Stats(so) => { + cli_v1::cmd_admin(&self.admin_rpc_endpoint, self.rpc_host, AdminRpc::Stats(so)) + .await + .ok_or_message("xoxo") + } + Command::Worker(wo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::Worker(wo), + ) + .await + .ok_or_message("xoxo"), + Command::Block(bo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::BlockOperation(bo), + ) + .await + .ok_or_message("xoxo"), + Command::Meta(mo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::MetaOperation(mo), + ) + .await + .ok_or_message("xoxo"), + + _ => unreachable!(), } - */ } pub async fn api_request(&self, req: T) -> Result<::Response, Error> diff --git a/src/garage/cli_v2/util.rs b/src/garage/cli_v2/util.rs new file mode 100644 index 00000000..78399b0d --- /dev/null +++ b/src/garage/cli_v2/util.rs @@ -0,0 +1,115 @@ +use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::Error; + +use garage_api::admin::api::*; + +pub fn capacity_string(v: Option) -> String { + match v { + Some(c) => ByteSize::b(c).to_string_as(false), + None => "gateway".to_string(), + } +} + +pub fn get_staged_or_current_role( + id: &str, + layout: &GetClusterLayoutResponse, +) -> Option { + for node in layout.staged_role_changes.iter() { + if node.id == id { + return match &node.action { + NodeRoleChangeEnum::Remove { .. } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(NodeRoleResp { + id: id.to_string(), + zone: zone.to_string(), + capacity: *capacity, + tags: tags.clone(), + }), + }; + } + } + + for node in layout.roles.iter() { + if node.id == id { + return Some(node.clone()); + } + } + + None +} + +pub fn find_matching_node<'a>( + cand: impl std::iter::Iterator, + pattern: &'a str, +) -> Result { + let mut candidates = vec![]; + for c in cand { + if c.starts_with(pattern) && !candidates.contains(&c) { + candidates.push(c); + } + } + if candidates.len() != 1 { + Err(Error::Message(format!( + "{} nodes match '{}'", + candidates.len(), + pattern, + ))) + } else { + Ok(candidates[0].to_string()) + } +} + +pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { + let has_role_changes = !layout.staged_role_changes.is_empty(); + + // TODO!! Layout parameters + let has_layout_changes = false; + + if has_role_changes || has_layout_changes { + println!(); + println!("==== STAGED ROLE CHANGES ===="); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for change in layout.staged_role_changes.iter() { + match &change.action { + NodeRoleChangeEnum::Update { + tags, + zone, + capacity, + } => { + let tags = tags.join(","); + table.push(format!( + "{:.16}\t{}\t{}\t{}", + change.id, + tags, + zone, + capacity_string(*capacity), + )); + } + NodeRoleChangeEnum::Remove { .. } => { + table.push(format!("{:.16}\tREMOVED", change.id)); + } + } + } + format_table(table); + println!(); + } + //TODO + /* + if has_layout_changes { + println!( + "Zone redundancy: {}", + staging.parameters.get().zone_redundancy + ); + } + */ + true + } else { + false + } +} -- cgit v1.2.3 From 819f4f00509a57097d0ee8291e1556829e982e14 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 12:19:23 +0100 Subject: cli: migrate layout remove, apply, revert --- src/garage/cli_v2/layout.rs | 77 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 65 insertions(+), 12 deletions(-) (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/layout.rs b/src/garage/cli_v2/layout.rs index ccd1886f..8088f019 100644 --- a/src/garage/cli_v2/layout.rs +++ b/src/garage/cli_v2/layout.rs @@ -1,5 +1,5 @@ -use bytesize::ByteSize; -use format_table::format_table; +//use bytesize::ByteSize; +//use format_table::format_table; use garage_util::error::*; @@ -14,21 +14,14 @@ impl Cli { pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> { match cmd { LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await, + LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await, + LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await, + LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await, // TODO - LayoutOperation::Remove(remove_opt) => { - cli_v1::cmd_remove_role(&self.system_rpc_endpoint, self.rpc_host, remove_opt).await - } LayoutOperation::Show => { cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await } - LayoutOperation::Apply(apply_opt) => { - cli_v1::cmd_apply_layout(&self.system_rpc_endpoint, self.rpc_host, apply_opt).await - } - LayoutOperation::Revert(revert_opt) => { - cli_v1::cmd_revert_layout(&self.system_rpc_endpoint, self.rpc_host, revert_opt) - .await - } LayoutOperation::Config(config_opt) => { cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt) .await @@ -116,4 +109,64 @@ impl Cli { println!("and `garage layout apply` to enact staged changes."); Ok(()) } + + pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let id = find_matching_node(all_node_ids_iter.clone(), &opt.node_id)?; + + let actions = vec![NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }]; + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role removal is staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } + + pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> { + let missing_version_error = r#" +Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout. +To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes. + "#; + + let req = ApplyClusterLayoutRequest { + version: apply_opt.version.ok_or_message(missing_version_error)?, + }; + let res = self.api_request(req).await?; + + for line in res.message.iter() { + println!("{}", line); + } + + println!("New cluster layout with updated role assignment has been applied in cluster."); + println!("Data will now be moved around between nodes accordingly."); + + Ok(()) + } + + pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> { + if !revert_opt.yes { + return Err(Error::Message( + "Please add the --yes flag to run the layout revert operation".into(), + )); + } + + self.api_request(RevertClusterLayoutRequest).await?; + + println!("All proposed role changes in cluster layout have been canceled."); + Ok(()) + } } -- cgit v1.2.3 From f37d5d2b08b008eba7b1ee8d84b08d5fddeabf78 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 13:36:25 +0100 Subject: admin api: convert most bucket operations --- src/garage/cli_v2/bucket.rs | 523 ++++++++++++++++++++++++++++++++++++++++++++ src/garage/cli_v2/mod.rs | 9 +- 2 files changed, 525 insertions(+), 7 deletions(-) create mode 100644 src/garage/cli_v2/bucket.rs (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/bucket.rs b/src/garage/cli_v2/bucket.rs new file mode 100644 index 00000000..837ce783 --- /dev/null +++ b/src/garage/cli_v2/bucket.rs @@ -0,0 +1,523 @@ +//use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli as cli_v1; +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> { + match cmd { + BucketOperation::List => self.cmd_list_buckets().await, + BucketOperation::Info(query) => self.cmd_bucket_info(query).await, + BucketOperation::Create(query) => self.cmd_create_bucket(query).await, + BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await, + BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await, + BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await, + BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await, + BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await, + BucketOperation::Website(query) => self.cmd_bucket_website(query).await, + BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await, + + // TODO + x => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::BucketOperation(x), + ) + .await + .ok_or_message("old error"), + } + } + + pub async fn cmd_list_buckets(&self) -> Result<(), Error> { + let buckets = self.api_request(ListBucketsRequest).await?; + + println!("List of buckets:"); + + let mut table = vec![]; + for bucket in buckets.0.iter() { + let local_aliases_n = match &bucket.local_aliases[..] { + [] => "".into(), + [alias] => format!("{}:{}", alias.access_key_id, alias.alias), + s => format!("[{} local aliases]", s.len()), + }; + + table.push(format!( + "\t{}\t{}\t{}", + bucket.global_aliases.join(","), + local_aliases_n, + bucket.id, + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name), + }) + .await?; + + println!("Bucket: {}", bucket.id); + + let size = bytesize::ByteSize::b(bucket.bytes as u64); + println!( + "\nSize: {} ({})", + size.to_string_as(true), + size.to_string_as(false) + ); + println!("Objects: {}", bucket.objects); + println!( + "Unfinished uploads (multipart and non-multipart): {}", + bucket.unfinished_uploads, + ); + println!( + "Unfinished multipart uploads: {}", + bucket.unfinished_multipart_uploads + ); + let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64); + println!( + "Size of unfinished multipart uploads: {} ({})", + mpu_size.to_string_as(true), + mpu_size.to_string_as(false), + ); + + println!("\nWebsite access: {}", bucket.website_access); + + if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() { + println!("\nQuotas:"); + if let Some(ms) = bucket.quotas.max_size { + let ms = bytesize::ByteSize::b(ms); + println!( + " maximum size: {} ({})", + ms.to_string_as(true), + ms.to_string_as(false) + ); + } + if let Some(mo) = bucket.quotas.max_objects { + println!(" maximum number of objects: {}", mo); + } + } + + println!("\nGlobal aliases:"); + for alias in bucket.global_aliases { + println!(" {}", alias); + } + + println!("\nKey-specific aliases:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + for alias in key.bucket_local_aliases.iter() { + table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias)); + } + } + format_table(table); + + println!("\nAuthorized keys:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + if !(key.permissions.read || key.permissions.write || key.permissions.owner) { + continue; + } + let rflag = if key.permissions.read { "R" } else { " " }; + let wflag = if key.permissions.write { "W" } else { " " }; + let oflag = if key.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}", + rflag, wflag, oflag, key.access_key_id, key.name + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> { + self.api_request(CreateBucketRequest { + global_alias: Some(opt.name.clone()), + local_alias: None, + }) + .await?; + + println!("Bucket {} was created.", opt.name); + + Ok(()) + } + + pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name.clone()), + }) + .await?; + + // CLI-only checks: the bucket must not have other aliases + if bucket + .global_aliases + .iter() + .find(|a| **a != opt.name) + .is_some() + { + return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if bucket + .keys + .iter() + .any(|k| !k.bucket_local_aliases.is_empty()) + { + return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if !opt.yes { + println!("About to delete bucket {}.", bucket.id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteBucketRequest { + id: bucket.id.clone(), + }) + .await?; + + println!("Bucket {} has been deleted.", bucket.id); + + Ok(()) + } + + pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.existing_bucket.clone()), + }) + .await?; + + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + local_alias: opt.new_name.clone(), + access_key_id: key.access_key_id.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16} in namespace of key {}", + opt.new_name, bucket.id, key.access_key_id + ) + } else { + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.new_name.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16}", + opt.new_name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> { + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + let bucket = key + .buckets + .iter() + .find(|x| x.local_aliases.contains(&opt.name)) + .ok_or_message(format!( + "No bucket called {} in namespace of key {}", + opt.name, key.access_key_id + ))?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + access_key_id: key.access_key_id.clone(), + local_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16} in namespace of key {}", + &opt.name, bucket.id, key.access_key_id + ) + } else { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: Some(opt.name.clone()), + search: None, + }) + .await?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16}", + opt.name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} has no permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} no longer has permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if !(opt.allow ^ opt.deny) { + return Err(Error::Message( + "You must specify exactly one flag, either --allow or --deny".to_string(), + )); + } + + let wa = if opt.allow { + UpdateBucketWebsiteAccess { + enabled: true, + index_document: Some(opt.index_document.clone()), + error_document: opt + .error_document + .or(bucket.website_config.and_then(|x| x.error_document.clone())), + } + } else { + UpdateBucketWebsiteAccess { + enabled: false, + index_document: None, + error_document: None, + } + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id, + body: UpdateBucketRequestBody { + website_access: Some(wa), + quotas: None, + }, + }) + .await?; + + if opt.allow { + println!("Website access allowed for {}", &opt.bucket); + } else { + println!("Website access denied for {}", &opt.bucket); + } + + Ok(()) + } + + pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if opt.max_size.is_none() && opt.max_objects.is_none() { + return Err(Error::Message( + "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(), + )); + } + + let new_quotas = ApiBucketQuotas { + max_size: match opt.max_size.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::() + .ok_or_message(format!("Invalid size specified: {}", v))? + .as_u64(), + ), + None => bucket.quotas.max_size, + }, + max_objects: match opt.max_objects.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::() + .ok_or_message(format!("Invalid number: {}", v))?, + ), + None => bucket.quotas.max_objects, + }, + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id.clone(), + body: UpdateBucketRequestBody { + website_access: None, + quotas: Some(new_quotas), + }, + }) + .await?; + + println!("Quotas updated for bucket {:.16}", bucket.id); + + Ok(()) + } +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index 2fe45e29..24ff6f72 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -1,5 +1,6 @@ pub mod util; +pub mod bucket; pub mod cluster; pub mod layout; @@ -35,15 +36,9 @@ impl Cli { self.cmd_connect(connect_opt).await } Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, + Command::Bucket(bo) => self.cmd_bucket(bo).await, // TODO - Command::Bucket(bo) => cli_v1::cmd_admin( - &self.admin_rpc_endpoint, - self.rpc_host, - AdminRpc::BucketOperation(bo), - ) - .await - .ok_or_message("xoxo"), Command::Key(ko) => cli_v1::cmd_admin( &self.admin_rpc_endpoint, self.rpc_host, -- cgit v1.2.3 From 076ce04fe53123c5046f356e2b164a8093be2dfe Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 15:38:22 +0100 Subject: fix garage status output --- src/garage/cli_v2/cluster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs index 0b5b9559..fa63960d 100644 --- a/src/garage/cli_v2/cluster.rs +++ b/src/garage/cli_v2/cluster.rs @@ -71,7 +71,7 @@ impl Cli { _ => "NO ROLE ASSIGNED", }; healthy_nodes.push(format!( - "{id:?}\t{h}\t{addr}\t\t\t{new_role}", + "{id:.16}\t{h}\t{addr}\t\t\t{new_role}", id = adv.id, h = host, addr = addr, -- cgit v1.2.3 From f8c6a8373d630311a18e9af011724181be68e5e1 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 16:12:16 +0100 Subject: convert cli key operations to admin rpc --- src/garage/cli_v2/cluster.rs | 52 ++++------ src/garage/cli_v2/key.rs | 227 +++++++++++++++++++++++++++++++++++++++++++ src/garage/cli_v2/mod.rs | 9 +- 3 files changed, 247 insertions(+), 41 deletions(-) create mode 100644 src/garage/cli_v2/key.rs (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs index fa63960d..adaf9a25 100644 --- a/src/garage/cli_v2/cluster.rs +++ b/src/garage/cli_v2/cluster.rs @@ -43,41 +43,25 @@ impl Cli { capacity = capacity_string(cfg.capacity), data_avail = data_avail, )); + } else if adv.draining { + healthy_nodes.push(format!( + "{id:.16}\t{host}\t{addr}\t\t\tdraining metadata...", + id = adv.id, + host = host, + addr = addr, + )); } else { - /* - let prev_role = layout - .versions - .iter() - .rev() - .find_map(|x| match x.roles.get(&adv.id) { - Some(NodeRoleV(Some(cfg))) => Some(cfg), - _ => None, - }); - */ - let prev_role = Option::::None; //TODO - if let Some(cfg) = prev_role { - healthy_nodes.push(format!( - "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...", - id = adv.id, - host = host, - addr = addr, - tags = cfg.tags.join(","), - zone = cfg.zone, - )); - } else { - let new_role = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) - { - Some(_) => "pending...", - _ => "NO ROLE ASSIGNED", - }; - healthy_nodes.push(format!( - "{id:.16}\t{h}\t{addr}\t\t\t{new_role}", - id = adv.id, - h = host, - addr = addr, - new_role = new_role, - )); - } + let new_role = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(_) => "pending...", + _ => "NO ROLE ASSIGNED", + }; + healthy_nodes.push(format!( + "{id:.16}\t{h}\t{addr}\t\t\t{new_role}", + id = adv.id, + h = host, + addr = addr, + new_role = new_role, + )); } } format_table(healthy_nodes); diff --git a/src/garage/cli_v2/key.rs b/src/garage/cli_v2/key.rs new file mode 100644 index 00000000..ff403a9a --- /dev/null +++ b/src/garage/cli_v2/key.rs @@ -0,0 +1,227 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> { + match cmd { + KeyOperation::List => self.cmd_list_keys().await, + KeyOperation::Info(query) => self.cmd_key_info(query).await, + KeyOperation::Create(query) => self.cmd_create_key(query).await, + KeyOperation::Rename(query) => self.cmd_rename_key(query).await, + KeyOperation::Delete(query) => self.cmd_delete_key(query).await, + KeyOperation::Allow(query) => self.cmd_allow_key(query).await, + KeyOperation::Deny(query) => self.cmd_deny_key(query).await, + KeyOperation::Import(query) => self.cmd_import_key(query).await, + } + } + + pub async fn cmd_list_keys(&self) -> Result<(), Error> { + let keys = self.api_request(ListKeysRequest).await?; + + println!("List of keys:"); + let mut table = vec![]; + for key in keys.0.iter() { + table.push(format!("\t{}\t{}", key.id, key.name)); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: opt.show_secret, + }) + .await?; + + print_key_info(&key); + + Ok(()) + } + + pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> { + let key = self + .api_request(CreateKeyRequest { + name: Some(opt.name), + }) + .await?; + + print_key_info(&key.0); + + Ok(()) + } + + pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: Some(opt.new_name), + allow: None, + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + if !opt.yes { + println!("About to delete key {}...", key.access_key_id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteKeyRequest { + id: key.access_key_id.clone(), + }) + .await?; + + println!("Access key {} has been deleted.", key.access_key_id); + + Ok(()) + } + + pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: None, + deny: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> { + if !opt.yes { + return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); + } + + let new_key = self + .api_request(ImportKeyRequest { + name: Some(opt.name), + access_key_id: opt.key_id, + secret_access_key: opt.secret_key, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } +} + +fn print_key_info(key: &GetKeyInfoResponse) { + println!("Key name: {}", key.name); + println!("Key ID: {}", key.access_key_id); + println!( + "Secret key: {}", + key.secret_access_key.as_deref().unwrap_or("(redacted)") + ); + println!("Can create buckets: {}", key.permissions.create_bucket); + + println!("\nKey-specific bucket aliases:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + for la in bucket.local_aliases.iter() { + table.push(format!( + "\t{}\t{}\t{}", + la, + bucket.global_aliases.join(","), + bucket.id + )); + } + } + format_table(table); + + println!("\nAuthorized buckets:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + let rflag = if bucket.permissions.read { "R" } else { " " }; + let wflag = if bucket.permissions.write { "W" } else { " " }; + let oflag = if bucket.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}\t{:.16}", + rflag, + wflag, + oflag, + bucket.global_aliases.join(","), + bucket.local_aliases.join(","), + bucket.id + )); + } + format_table(table); +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index 24ff6f72..e6d2d8c6 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -2,6 +2,7 @@ pub mod util; pub mod bucket; pub mod cluster; +pub mod key; pub mod layout; use std::collections::{HashMap, HashSet}; @@ -37,15 +38,9 @@ impl Cli { } Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, Command::Bucket(bo) => self.cmd_bucket(bo).await, + Command::Key(ko) => self.cmd_key(ko).await, // TODO - Command::Key(ko) => cli_v1::cmd_admin( - &self.admin_rpc_endpoint, - self.rpc_host, - AdminRpc::KeyOperation(ko), - ) - .await - .ok_or_message("xoxo"), Command::Repair(ro) => cli_v1::cmd_admin( &self.admin_rpc_endpoint, self.rpc_host, -- cgit v1.2.3 From ebc0e9319e8e0f8d77eb538d8d0356189597acaa Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 16:17:35 +0100 Subject: cli_v2: error messages --- src/garage/cli_v2/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index e6d2d8c6..b51ed67f 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -47,11 +47,11 @@ impl Cli { AdminRpc::LaunchRepair(ro), ) .await - .ok_or_message("xoxo"), + .ok_or_message("cli_v1"), Command::Stats(so) => { cli_v1::cmd_admin(&self.admin_rpc_endpoint, self.rpc_host, AdminRpc::Stats(so)) .await - .ok_or_message("xoxo") + .ok_or_message("cli_v1") } Command::Worker(wo) => cli_v1::cmd_admin( &self.admin_rpc_endpoint, @@ -59,21 +59,21 @@ impl Cli { AdminRpc::Worker(wo), ) .await - .ok_or_message("xoxo"), + .ok_or_message("cli_v1"), Command::Block(bo) => cli_v1::cmd_admin( &self.admin_rpc_endpoint, self.rpc_host, AdminRpc::BlockOperation(bo), ) .await - .ok_or_message("xoxo"), + .ok_or_message("cli_v1"), Command::Meta(mo) => cli_v1::cmd_admin( &self.admin_rpc_endpoint, self.rpc_host, AdminRpc::MetaOperation(mo), ) .await - .ok_or_message("xoxo"), + .ok_or_message("cli_v1"), _ => unreachable!(), } @@ -91,7 +91,7 @@ impl Cli { .admin_rpc_endpoint .call(&self.rpc_host, AdminRpc::ApiRequest(req), PRIO_NORMAL) .await? - .ok_or_message("xoxo")? + .ok_or_message("rpc")? { AdminRpc::ApiOkResponse(resp) => ::Response::try_from(resp) .map_err(|_| Error::Message(format!("{} returned unexpected response", req_name))), -- cgit v1.2.3 From 3caea5fc06a36b9e2f446c263b29948de431f30f Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 16:24:55 +0100 Subject: cli_v2: merge util.rs into layout.rs --- src/garage/cli_v2/cluster.rs | 2 +- src/garage/cli_v2/layout.rs | 118 +++++++++++++++++++++++++++++++++++++++++-- src/garage/cli_v2/mod.rs | 2 - src/garage/cli_v2/util.rs | 115 ----------------------------------------- 4 files changed, 116 insertions(+), 121 deletions(-) delete mode 100644 src/garage/cli_v2/util.rs (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs index adaf9a25..e6ba2428 100644 --- a/src/garage/cli_v2/cluster.rs +++ b/src/garage/cli_v2/cluster.rs @@ -5,7 +5,7 @@ use garage_util::error::*; use garage_api::admin::api::*; use crate::cli::structs::*; -use crate::cli_v2::util::*; +use crate::cli_v2::layout::*; use crate::cli_v2::*; impl Cli { diff --git a/src/garage/cli_v2/layout.rs b/src/garage/cli_v2/layout.rs index 8088f019..d44771c7 100644 --- a/src/garage/cli_v2/layout.rs +++ b/src/garage/cli_v2/layout.rs @@ -1,5 +1,5 @@ -//use bytesize::ByteSize; -//use format_table::format_table; +use bytesize::ByteSize; +use format_table::format_table; use garage_util::error::*; @@ -7,7 +7,6 @@ use garage_api::admin::api::*; use crate::cli::layout as cli_v1; use crate::cli::structs::*; -use crate::cli_v2::util::*; use crate::cli_v2::*; impl Cli { @@ -170,3 +169,116 @@ To know the correct value of the new layout version, invoke `garage layout show` Ok(()) } } + +// -------------------------- +// ---- helper functions ---- +// -------------------------- + +pub fn capacity_string(v: Option) -> String { + match v { + Some(c) => ByteSize::b(c).to_string_as(false), + None => "gateway".to_string(), + } +} + +pub fn get_staged_or_current_role( + id: &str, + layout: &GetClusterLayoutResponse, +) -> Option { + for node in layout.staged_role_changes.iter() { + if node.id == id { + return match &node.action { + NodeRoleChangeEnum::Remove { .. } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(NodeRoleResp { + id: id.to_string(), + zone: zone.to_string(), + capacity: *capacity, + tags: tags.clone(), + }), + }; + } + } + + for node in layout.roles.iter() { + if node.id == id { + return Some(node.clone()); + } + } + + None +} + +pub fn find_matching_node<'a>( + cand: impl std::iter::Iterator, + pattern: &'a str, +) -> Result { + let mut candidates = vec![]; + for c in cand { + if c.starts_with(pattern) && !candidates.contains(&c) { + candidates.push(c); + } + } + if candidates.len() != 1 { + Err(Error::Message(format!( + "{} nodes match '{}'", + candidates.len(), + pattern, + ))) + } else { + Ok(candidates[0].to_string()) + } +} + +pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { + let has_role_changes = !layout.staged_role_changes.is_empty(); + + // TODO!! Layout parameters + let has_layout_changes = false; + + if has_role_changes || has_layout_changes { + println!(); + println!("==== STAGED ROLE CHANGES ===="); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for change in layout.staged_role_changes.iter() { + match &change.action { + NodeRoleChangeEnum::Update { + tags, + zone, + capacity, + } => { + let tags = tags.join(","); + table.push(format!( + "{:.16}\t{}\t{}\t{}", + change.id, + tags, + zone, + capacity_string(*capacity), + )); + } + NodeRoleChangeEnum::Remove { .. } => { + table.push(format!("{:.16}\tREMOVED", change.id)); + } + } + } + format_table(table); + println!(); + } + //TODO + /* + if has_layout_changes { + println!( + "Zone redundancy: {}", + staging.parameters.get().zone_redundancy + ); + } + */ + true + } else { + false + } +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index b51ed67f..51c4d144 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -1,5 +1,3 @@ -pub mod util; - pub mod bucket; pub mod cluster; pub mod key; diff --git a/src/garage/cli_v2/util.rs b/src/garage/cli_v2/util.rs deleted file mode 100644 index 78399b0d..00000000 --- a/src/garage/cli_v2/util.rs +++ /dev/null @@ -1,115 +0,0 @@ -use bytesize::ByteSize; -use format_table::format_table; - -use garage_util::error::Error; - -use garage_api::admin::api::*; - -pub fn capacity_string(v: Option) -> String { - match v { - Some(c) => ByteSize::b(c).to_string_as(false), - None => "gateway".to_string(), - } -} - -pub fn get_staged_or_current_role( - id: &str, - layout: &GetClusterLayoutResponse, -) -> Option { - for node in layout.staged_role_changes.iter() { - if node.id == id { - return match &node.action { - NodeRoleChangeEnum::Remove { .. } => None, - NodeRoleChangeEnum::Update { - zone, - capacity, - tags, - } => Some(NodeRoleResp { - id: id.to_string(), - zone: zone.to_string(), - capacity: *capacity, - tags: tags.clone(), - }), - }; - } - } - - for node in layout.roles.iter() { - if node.id == id { - return Some(node.clone()); - } - } - - None -} - -pub fn find_matching_node<'a>( - cand: impl std::iter::Iterator, - pattern: &'a str, -) -> Result { - let mut candidates = vec![]; - for c in cand { - if c.starts_with(pattern) && !candidates.contains(&c) { - candidates.push(c); - } - } - if candidates.len() != 1 { - Err(Error::Message(format!( - "{} nodes match '{}'", - candidates.len(), - pattern, - ))) - } else { - Ok(candidates[0].to_string()) - } -} - -pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { - let has_role_changes = !layout.staged_role_changes.is_empty(); - - // TODO!! Layout parameters - let has_layout_changes = false; - - if has_role_changes || has_layout_changes { - println!(); - println!("==== STAGED ROLE CHANGES ===="); - if has_role_changes { - let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; - for change in layout.staged_role_changes.iter() { - match &change.action { - NodeRoleChangeEnum::Update { - tags, - zone, - capacity, - } => { - let tags = tags.join(","); - table.push(format!( - "{:.16}\t{}\t{}\t{}", - change.id, - tags, - zone, - capacity_string(*capacity), - )); - } - NodeRoleChangeEnum::Remove { .. } => { - table.push(format!("{:.16}\tREMOVED", change.id)); - } - } - } - format_table(table); - println!(); - } - //TODO - /* - if has_layout_changes { - println!( - "Zone redundancy: {}", - staging.parameters.get().zone_redundancy - ); - } - */ - true - } else { - false - } -} -- cgit v1.2.3 From 5a89350b382f9a24d4e81b056f88dc16a5daa080 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Thu, 30 Jan 2025 16:40:07 +0100 Subject: cli_v2: fix garage status --- src/garage/cli_v2/cluster.rs | 96 +++++++++++++++++++------------------------- src/garage/cli_v2/mod.rs | 1 - 2 files changed, 41 insertions(+), 56 deletions(-) (limited to 'src/garage/cli_v2') diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs index e6ba2428..34a28674 100644 --- a/src/garage/cli_v2/cluster.rs +++ b/src/garage/cli_v2/cluster.rs @@ -12,11 +12,12 @@ impl Cli { pub async fn cmd_status(&self) -> Result<(), Error> { let status = self.api_request(GetClusterStatusRequest).await?; let layout = self.api_request(GetClusterLayoutRequest).await?; - // TODO: layout history println!("==== HEALTHY NODES ===="); + let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; + for adv in status.nodes.iter().filter(|adv| adv.is_up) { let host = adv.hostname.as_deref().unwrap_or("?"); let addr = match adv.addr { @@ -43,78 +44,43 @@ impl Cli { capacity = capacity_string(cfg.capacity), data_avail = data_avail, )); - } else if adv.draining { - healthy_nodes.push(format!( - "{id:.16}\t{host}\t{addr}\t\t\tdraining metadata...", - id = adv.id, - host = host, - addr = addr, - )); } else { - let new_role = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { - Some(_) => "pending...", + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => "draining metadata..", _ => "NO ROLE ASSIGNED", }; healthy_nodes.push(format!( - "{id:.16}\t{h}\t{addr}\t\t\t{new_role}", + "{id:.16}\t{h}\t{addr}\t\t\t{status}", id = adv.id, h = host, addr = addr, - new_role = new_role, + status = status, )); } } format_table(healthy_nodes); - // Determine which nodes are unhealthy and print that to stdout - // TODO: do we need this, or can it be done in the GetClusterStatus handler? - let status_map = status - .nodes - .iter() - .map(|adv| (&adv.id, adv)) - .collect::>(); - let tf = timeago::Formatter::new(); let mut drain_msg = false; let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; - let mut listed = HashSet::new(); - //for ver in layout.versions.iter().rev() { - for ver in [&layout].iter() { - for cfg in ver.roles.iter() { - let node = &cfg.id; - if listed.contains(node.as_str()) { - continue; - } - listed.insert(node.as_str()); - - let adv = status_map.get(node); - if adv.map(|x| x.is_up).unwrap_or(false) { - continue; - } - - // Node is in a layout version, is not a gateway node, and is not up: - // it is in a failed state, add proper line to the output - let (host, last_seen) = match adv { - Some(adv) => ( - adv.hostname.as_deref().unwrap_or("?"), - adv.last_seen_secs_ago - .map(|s| tf.convert(Duration::from_secs(s))) - .unwrap_or_else(|| "never seen".into()), - ), - None => ("??", "never seen".into()), - }; - /* - let capacity = if ver.version == layout.current().version { - cfg.capacity_string() - } else { - drain_msg = true; - "draining metadata...".to_string() - }; - */ + for adv in status.nodes.iter().filter(|x| !x.is_up) { + let node = &adv.id; + + let host = adv.hostname.as_deref().unwrap_or("?"); + let last_seen = adv + .last_seen_secs_ago + .map(|s| tf.convert(Duration::from_secs(s))) + .unwrap_or_else(|| "never seen".into()); + + if let Some(cfg) = &adv.role { let capacity = capacity_string(cfg.capacity); failed_nodes.push(format!( - "{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", + "{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", id = node, host = host, tags = cfg.tags.join(","), @@ -122,6 +88,26 @@ impl Cli { capacity = capacity, last_seen = last_seen, )); + } else { + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => { + drain_msg = true; + "draining metadata.." + } + _ => unreachable!(), + }; + + failed_nodes.push(format!( + "{id:.16}\t{host}\t\t\t{status}\t{last_seen}", + id = node, + host = host, + status = status, + last_seen = last_seen, + )); } } diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs index 51c4d144..692b7c1c 100644 --- a/src/garage/cli_v2/mod.rs +++ b/src/garage/cli_v2/mod.rs @@ -3,7 +3,6 @@ pub mod cluster; pub mod key; pub mod layout; -use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; -- cgit v1.2.3