diff options
Diffstat (limited to 'src/garage/cli')
-rw-r--r-- | src/garage/cli/cmd.rs | 220 | ||||
-rw-r--r-- | src/garage/cli/layout.rs | 210 | ||||
-rw-r--r-- | src/garage/cli/mod.rs | 1 | ||||
-rw-r--r-- | src/garage/cli/util.rs | 243 |
4 files changed, 1 insertions, 673 deletions
diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 44d3d96c..a6540c65 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -1,10 +1,5 @@ -use std::collections::{HashMap, HashSet}; -use std::time::Duration; - -use format_table::format_table; use garage_util::error::*; -use garage_rpc::layout::*; use garage_rpc::system::*; use garage_rpc::*; @@ -13,204 +8,6 @@ use garage_model::helper::error::Error as HelperError; use crate::admin::*; use crate::cli::*; -pub async fn cli_command_dispatch( - cmd: Command, - system_rpc_endpoint: &Endpoint<SystemRpc, ()>, - admin_rpc_endpoint: &Endpoint<AdminRpc, ()>, - rpc_host: NodeID, -) -> Result<(), HelperError> { - match cmd { - Command::Status => Ok(cmd_status(system_rpc_endpoint, rpc_host).await?), - Command::Node(NodeOperation::Connect(connect_opt)) => { - Ok(cmd_connect(system_rpc_endpoint, rpc_host, connect_opt).await?) - } - Command::Layout(layout_opt) => { - Ok(cli_layout_command_dispatch(layout_opt, system_rpc_endpoint, rpc_host).await?) - } - Command::Bucket(bo) => { - cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BucketOperation(bo)).await - } - Command::Key(ko) => { - cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::KeyOperation(ko)).await - } - Command::Repair(ro) => { - cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::LaunchRepair(ro)).await - } - Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await, - Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await, - Command::Block(bo) => { - cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await - } - Command::Meta(mo) => { - cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::MetaOperation(mo)).await - } - _ => unreachable!(), - } -} - -pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> Result<(), Error> { - let status = fetch_status(rpc_cli, rpc_host).await?; - let layout = fetch_layout(rpc_cli, rpc_host).await?; - - println!("==== HEALTHY NODES ===="); - let mut healthy_nodes = - vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; - for adv in status.iter().filter(|adv| adv.is_up) { - let host = adv.status.hostname.as_deref().unwrap_or("?"); - let addr = match adv.addr { - Some(addr) => addr.to_string(), - None => "N/A".to_string(), - }; - if let Some(NodeRoleV(Some(cfg))) = layout.current().roles.get(&adv.id) { - let data_avail = match &adv.status.data_disk_avail { - _ if cfg.capacity.is_none() => "N/A".into(), - Some((avail, total)) => { - let pct = (*avail as f64) / (*total as f64) * 100.; - let avail = bytesize::ByteSize::b(*avail); - format!("{} ({:.1}%)", avail, pct) - } - None => "?".into(), - }; - healthy_nodes.push(format!( - "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}", - id = adv.id, - host = host, - addr = addr, - tags = cfg.tags.join(","), - zone = cfg.zone, - capacity = cfg.capacity_string(), - data_avail = data_avail, - )); - } else { - let prev_role = layout - .versions - .iter() - .rev() - .find_map(|x| match x.roles.get(&adv.id) { - Some(NodeRoleV(Some(cfg))) => Some(cfg), - _ => None, - }); - if let Some(cfg) = prev_role { - healthy_nodes.push(format!( - "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\tdraining metadata...", - id = adv.id, - host = host, - addr = addr, - tags = cfg.tags.join(","), - zone = cfg.zone, - )); - } else { - let new_role = match layout.staging.get().roles.get(&adv.id) { - Some(NodeRoleV(Some(_))) => "pending...", - _ => "NO ROLE ASSIGNED", - }; - healthy_nodes.push(format!( - "{id:?}\t{h}\t{addr}\t\t\t{new_role}", - id = adv.id, - h = host, - addr = addr, - new_role = new_role, - )); - } - } - } - format_table(healthy_nodes); - - // Determine which nodes are unhealthy and print that to stdout - let status_map = status - .iter() - .map(|adv| (adv.id, adv)) - .collect::<HashMap<_, _>>(); - - let tf = timeago::Formatter::new(); - let mut drain_msg = false; - let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; - let mut listed = HashSet::new(); - for ver in layout.versions.iter().rev() { - for (node, _, role) in ver.roles.items().iter() { - let cfg = match role { - NodeRoleV(Some(role)) if role.capacity.is_some() => role, - _ => continue, - }; - - if listed.contains(node) { - continue; - } - listed.insert(*node); - - let adv = status_map.get(node); - if adv.map(|x| x.is_up).unwrap_or(false) { - continue; - } - - // Node is in a layout version, is not a gateway node, and is not up: - // it is in a failed state, add proper line to the output - let (host, last_seen) = match adv { - Some(adv) => ( - adv.status.hostname.as_deref().unwrap_or("?"), - adv.last_seen_secs_ago - .map(|s| tf.convert(Duration::from_secs(s))) - .unwrap_or_else(|| "never seen".into()), - ), - None => ("??", "never seen".into()), - }; - let capacity = if ver.version == layout.current().version { - cfg.capacity_string() - } else { - drain_msg = true; - "draining metadata...".to_string() - }; - failed_nodes.push(format!( - "{id:?}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", - id = node, - host = host, - tags = cfg.tags.join(","), - zone = cfg.zone, - capacity = capacity, - last_seen = last_seen, - )); - } - } - - if failed_nodes.len() > 1 { - println!("\n==== FAILED NODES ===="); - format_table(failed_nodes); - if drain_msg { - println!(); - println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); - println!("If these nodes are definitely dead, please review the layout history with"); - println!( - "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." - ); - } - } - - if print_staging_role_changes(&layout) { - println!(); - println!("Please use `garage layout show` to check the proposed new layout and apply it."); - println!(); - } - - Ok(()) -} - -pub async fn cmd_connect( - rpc_cli: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, - args: ConnectNodeOpt, -) -> Result<(), Error> { - match rpc_cli - .call(&rpc_host, SystemRpc::Connect(args.node), PRIO_NORMAL) - .await?? - { - SystemRpc::Ok => { - println!("Success."); - Ok(()) - } - m => Err(Error::unexpected_rpc_message(m)), - } -} - pub async fn cmd_admin( rpc_cli: &Endpoint<AdminRpc, ()>, rpc_host: NodeID, @@ -220,23 +17,6 @@ pub async fn cmd_admin( AdminRpc::Ok(msg) => { println!("{}", msg); } - AdminRpc::BucketList(bl) => { - print_bucket_list(bl); - } - AdminRpc::BucketInfo { - bucket, - relevant_keys, - counters, - mpu_counters, - } => { - print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters); - } - AdminRpc::KeyList(kl) => { - print_key_list(kl); - } - AdminRpc::KeyInfo(key, rb) => { - print_key_info(&key, &rb); - } AdminRpc::WorkerList(wi, wlo) => { print_worker_list(wi, wlo); } diff --git a/src/garage/cli/layout.rs b/src/garage/cli/layout.rs index f053eef4..bb81d144 100644 --- a/src/garage/cli/layout.rs +++ b/src/garage/cli/layout.rs @@ -1,7 +1,6 @@ use bytesize::ByteSize; use format_table::format_table; -use garage_util::crdt::Crdt; use garage_util::error::*; use garage_rpc::layout::*; @@ -10,174 +9,6 @@ use garage_rpc::*; use crate::cli::*; -pub async fn cli_layout_command_dispatch( - cmd: LayoutOperation, - system_rpc_endpoint: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, -) -> Result<(), Error> { - match cmd { - LayoutOperation::Assign(assign_opt) => { - cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await - } - LayoutOperation::Remove(remove_opt) => { - cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await - } - LayoutOperation::Show => cmd_show_layout(system_rpc_endpoint, rpc_host).await, - LayoutOperation::Apply(apply_opt) => { - cmd_apply_layout(system_rpc_endpoint, rpc_host, apply_opt).await - } - LayoutOperation::Revert(revert_opt) => { - cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await - } - LayoutOperation::Config(config_opt) => { - cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await - } - LayoutOperation::History => cmd_layout_history(system_rpc_endpoint, rpc_host).await, - LayoutOperation::SkipDeadNodes(assume_sync_opt) => { - cmd_layout_skip_dead_nodes(system_rpc_endpoint, rpc_host, assume_sync_opt).await - } - } -} - -pub async fn cmd_assign_role( - rpc_cli: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, - args: AssignRoleOpt, -) -> Result<(), Error> { - let status = match rpc_cli - .call(&rpc_host, SystemRpc::GetKnownNodes, PRIO_NORMAL) - .await?? - { - SystemRpc::ReturnKnownNodes(nodes) => nodes, - resp => return Err(Error::Message(format!("Invalid RPC response: {:?}", resp))), - }; - - let mut layout = fetch_layout(rpc_cli, rpc_host).await?; - let all_nodes = layout.get_all_nodes(); - - let added_nodes = args - .node_ids - .iter() - .map(|node_id| { - find_matching_node( - status - .iter() - .map(|adv| adv.id) - .chain(all_nodes.iter().cloned()), - node_id, - ) - }) - .collect::<Result<Vec<_>, _>>()?; - - let mut roles = layout.current().roles.clone(); - roles.merge(&layout.staging.get().roles); - - for replaced in args.replace.iter() { - let replaced_node = find_matching_node(all_nodes.iter().cloned(), replaced)?; - match roles.get(&replaced_node) { - Some(NodeRoleV(Some(_))) => { - layout - .staging - .get_mut() - .roles - .merge(&roles.update_mutator(replaced_node, NodeRoleV(None))); - } - _ => { - return Err(Error::Message(format!( - "Cannot replace node {:?} as it is not currently in planned layout", - replaced_node - ))); - } - } - } - - if args.capacity.is_some() && args.gateway { - return Err(Error::Message( - "-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into())); - } - if args.capacity == Some(ByteSize::b(0)) { - return Err(Error::Message("Invalid capacity value: 0".into())); - } - - for added_node in added_nodes { - let new_entry = match roles.get(&added_node) { - Some(NodeRoleV(Some(old))) => { - let capacity = match args.capacity { - Some(c) => Some(c.as_u64()), - None if args.gateway => None, - None => old.capacity, - }; - let tags = if args.tags.is_empty() { - old.tags.clone() - } else { - args.tags.clone() - }; - NodeRole { - zone: args.zone.clone().unwrap_or_else(|| old.zone.to_string()), - capacity, - tags, - } - } - _ => { - let capacity = match args.capacity { - Some(c) => Some(c.as_u64()), - None if args.gateway => None, - None => return Err(Error::Message( - "Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())), - }; - NodeRole { - zone: args - .zone - .clone() - .ok_or("Please specify a zone with the -z flag")?, - capacity, - tags: args.tags.clone(), - } - } - }; - - layout - .staging - .get_mut() - .roles - .merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry)))); - } - - send_layout(rpc_cli, rpc_host, layout).await?; - - println!("Role changes are staged but not yet committed."); - println!("Use `garage layout show` to view staged role changes,"); - println!("and `garage layout apply` to enact staged changes."); - Ok(()) -} - -pub async fn cmd_remove_role( - rpc_cli: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, - args: RemoveRoleOpt, -) -> Result<(), Error> { - let mut layout = fetch_layout(rpc_cli, rpc_host).await?; - - let mut roles = layout.current().roles.clone(); - roles.merge(&layout.staging.get().roles); - - let deleted_node = - find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?; - - layout - .staging - .get_mut() - .roles - .merge(&roles.update_mutator(deleted_node, NodeRoleV(None))); - - send_layout(rpc_cli, rpc_host, layout).await?; - - println!("Role removal is staged but not yet committed."); - println!("Use `garage layout show` to view staged role changes,"); - println!("and `garage layout apply` to enact staged changes."); - Ok(()) -} - pub async fn cmd_show_layout( rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID, @@ -226,47 +57,6 @@ pub async fn cmd_show_layout( Ok(()) } -pub async fn cmd_apply_layout( - rpc_cli: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, - apply_opt: ApplyLayoutOpt, -) -> Result<(), Error> { - let layout = fetch_layout(rpc_cli, rpc_host).await?; - - let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?; - for line in msg.iter() { - println!("{}", line); - } - - send_layout(rpc_cli, rpc_host, layout).await?; - - println!("New cluster layout with updated role assignment has been applied in cluster."); - println!("Data will now be moved around between nodes accordingly."); - - Ok(()) -} - -pub async fn cmd_revert_layout( - rpc_cli: &Endpoint<SystemRpc, ()>, - rpc_host: NodeID, - revert_opt: RevertLayoutOpt, -) -> Result<(), Error> { - if !revert_opt.yes { - return Err(Error::Message( - "Please add the --yes flag to run the layout revert operation".into(), - )); - } - - let layout = fetch_layout(rpc_cli, rpc_host).await?; - - let layout = layout.revert_staged_changes()?; - - send_layout(rpc_cli, rpc_host, layout).await?; - - println!("All proposed role changes in cluster layout have been canceled."); - Ok(()) -} - pub async fn cmd_config_layout( rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID, diff --git a/src/garage/cli/mod.rs b/src/garage/cli/mod.rs index e131f62c..30f566e2 100644 --- a/src/garage/cli/mod.rs +++ b/src/garage/cli/mod.rs @@ -8,6 +8,5 @@ pub(crate) mod convert_db; pub(crate) use cmd::*; pub(crate) use init::*; -pub(crate) use layout::*; pub(crate) use structs::*; pub(crate) use util::*; diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 21c14f42..a3a1480e 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -3,257 +3,16 @@ use std::time::Duration; use format_table::format_table; use garage_util::background::*; -use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::*; use garage_util::time::*; use garage_block::manager::BlockResyncErrorInfo; -use garage_model::bucket_table::*; -use garage_model::key_table::*; -use garage_model::s3::mpu_table::{self, MultipartUpload}; -use garage_model::s3::object_table; +use garage_model::s3::mpu_table::MultipartUpload; use garage_model::s3::version_table::*; use crate::cli::structs::WorkerListOpt; -pub fn print_bucket_list(bl: Vec<Bucket>) { - println!("List of buckets:"); - - let mut table = vec![]; - for bucket in bl { - let aliases = bucket - .aliases() - .iter() - .filter(|(_, _, active)| *active) - .map(|(name, _, _)| name.to_string()) - .collect::<Vec<_>>(); - let local_aliases_n = match &bucket - .local_aliases() - .iter() - .filter(|(_, _, active)| *active) - .collect::<Vec<_>>()[..] - { - [] => "".into(), - [((k, n), _, _)] => format!("{}:{}", k, n), - s => format!("[{} local aliases]", s.len()), - }; - - table.push(format!( - "\t{}\t{}\t{}", - aliases.join(","), - local_aliases_n, - hex::encode(bucket.id), - )); - } - format_table(table); -} - -pub fn print_key_list(kl: Vec<(String, String)>) { - println!("List of keys:"); - let mut table = vec![]; - for key in kl { - table.push(format!("\t{}\t{}", key.0, key.1)); - } - format_table(table); -} - -pub fn print_key_info(key: &Key, relevant_buckets: &HashMap<Uuid, Bucket>) { - let bucket_global_aliases = |b: &Uuid| { - if let Some(bucket) = relevant_buckets.get(b) { - if let Some(p) = bucket.state.as_option() { - return p - .aliases - .items() - .iter() - .filter(|(_, _, active)| *active) - .map(|(a, _, _)| a.clone()) - .collect::<Vec<_>>() - .join(", "); - } - } - - "".to_string() - }; - - match &key.state { - Deletable::Present(p) => { - println!("Key name: {}", p.name.get()); - println!("Key ID: {}", key.key_id); - println!("Secret key: {}", p.secret_key); - println!("Can create buckets: {}", p.allow_create_bucket.get()); - println!("\nKey-specific bucket aliases:"); - let mut table = vec![]; - for (alias_name, _, alias) in p.local_aliases.items().iter() { - if let Some(bucket_id) = alias { - table.push(format!( - "\t{}\t{}\t{}", - alias_name, - bucket_global_aliases(bucket_id), - hex::encode(bucket_id) - )); - } - } - format_table(table); - - println!("\nAuthorized buckets:"); - let mut table = vec![]; - for (bucket_id, perm) in p.authorized_buckets.items().iter() { - if !perm.is_any() { - continue; - } - let rflag = if perm.allow_read { "R" } else { " " }; - let wflag = if perm.allow_write { "W" } else { " " }; - let oflag = if perm.allow_owner { "O" } else { " " }; - let local_aliases = p - .local_aliases - .items() - .iter() - .filter(|(_, _, a)| *a == Some(*bucket_id)) - .map(|(a, _, _)| a.clone()) - .collect::<Vec<_>>() - .join(", "); - table.push(format!( - "\t{}{}{}\t{}\t{}\t{:?}", - rflag, - wflag, - oflag, - bucket_global_aliases(bucket_id), - local_aliases, - bucket_id - )); - } - format_table(table); - } - Deletable::Deleted => { - println!("Key {} is deleted.", key.key_id); - } - } -} - -pub fn print_bucket_info( - bucket: &Bucket, - relevant_keys: &HashMap<String, Key>, - counters: &HashMap<String, i64>, - mpu_counters: &HashMap<String, i64>, -) { - let key_name = |k| { - relevant_keys - .get(k) - .map(|k| k.params().unwrap().name.get().as_str()) - .unwrap_or("<deleted>") - }; - - println!("Bucket: {}", hex::encode(bucket.id)); - match &bucket.state { - Deletable::Deleted => println!("Bucket is deleted."), - Deletable::Present(p) => { - let size = - bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64); - println!( - "\nSize: {} ({})", - size.to_string_as(true), - size.to_string_as(false) - ); - println!( - "Objects: {}", - *counters.get(object_table::OBJECTS).unwrap_or(&0) - ); - println!( - "Unfinished uploads (multipart and non-multipart): {}", - *counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0) - ); - println!( - "Unfinished multipart uploads: {}", - *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0) - ); - let mpu_size = - bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64); - println!( - "Size of unfinished multipart uploads: {} ({})", - mpu_size.to_string_as(true), - mpu_size.to_string_as(false), - ); - - println!("\nWebsite access: {}", p.website_config.get().is_some()); - - let quotas = p.quotas.get(); - if quotas.max_size.is_some() || quotas.max_objects.is_some() { - println!("\nQuotas:"); - if let Some(ms) = quotas.max_size { - let ms = bytesize::ByteSize::b(ms); - println!( - " maximum size: {} ({})", - ms.to_string_as(true), - ms.to_string_as(false) - ); - } - if let Some(mo) = quotas.max_objects { - println!(" maximum number of objects: {}", mo); - } - } - - println!("\nGlobal aliases:"); - for (alias, _, active) in p.aliases.items().iter() { - if *active { - println!(" {}", alias); - } - } - - println!("\nKey-specific aliases:"); - let mut table = vec![]; - for ((key_id, alias), _, active) in p.local_aliases.items().iter() { - if *active { - table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias)); - } - } - format_table(table); - - println!("\nAuthorized keys:"); - let mut table = vec![]; - for (k, perm) in p.authorized_keys.items().iter() { - if !perm.is_any() { - continue; - } - let rflag = if perm.allow_read { "R" } else { " " }; - let wflag = if perm.allow_write { "W" } else { " " }; - let oflag = if perm.allow_owner { "O" } else { " " }; - table.push(format!( - "\t{}{}{}\t{}\t{}", - rflag, - wflag, - oflag, - k, - key_name(k) - )); - } - format_table(table); - } - }; -} - -pub fn find_matching_node( - cand: impl std::iter::Iterator<Item = Uuid>, - pattern: &str, -) -> Result<Uuid, Error> { - let mut candidates = vec![]; - for c in cand { - if hex::encode(c).starts_with(pattern) && !candidates.contains(&c) { - candidates.push(c); - } - } - if candidates.len() != 1 { - Err(Error::Message(format!( - "{} nodes match '{}'", - candidates.len(), - pattern, - ))) - } else { - Ok(candidates[0]) - } -} - pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) { let mut wi = wi.into_iter().collect::<Vec<_>>(); wi.sort_by_key(|(tid, info)| { |