diff options
author | Alex Auvolat <lx@deuxfleurs.fr> | 2025-02-14 13:03:11 +0100 |
---|---|---|
committer | Alex Auvolat <lx@deuxfleurs.fr> | 2025-02-14 13:11:33 +0100 |
commit | 315169501173c1cf5f78397375fe05ea2ebb77f0 (patch) | |
tree | 777b81f299208afb795b84e9626863b8bbaddd81 /src/garage/cli_v2 | |
parent | f034e834fa70f579bfd85745aea533b4328cbce4 (diff) | |
parent | 61f3de649646d098812e6ddb814e20ce7e66ad94 (diff) | |
download | garage-315169501173c1cf5f78397375fe05ea2ebb77f0.tar.gz garage-315169501173c1cf5f78397375fe05ea2ebb77f0.zip |
Merge branch 'next-v2' into 1686a/s3-redirects
Diffstat (limited to 'src/garage/cli_v2')
-rw-r--r-- | src/garage/cli_v2/block.rs | 145 | ||||
-rw-r--r-- | src/garage/cli_v2/bucket.rs | 549 | ||||
-rw-r--r-- | src/garage/cli_v2/cluster.rs | 158 | ||||
-rw-r--r-- | src/garage/cli_v2/key.rs | 227 | ||||
-rw-r--r-- | src/garage/cli_v2/layout.rs | 284 | ||||
-rw-r--r-- | src/garage/cli_v2/mod.rs | 108 | ||||
-rw-r--r-- | src/garage/cli_v2/node.rs | 113 | ||||
-rw-r--r-- | src/garage/cli_v2/worker.rs | 213 |
8 files changed, 1797 insertions, 0 deletions
diff --git a/src/garage/cli_v2/block.rs b/src/garage/cli_v2/block.rs new file mode 100644 index 00000000..bfc0db4a --- /dev/null +++ b/src/garage/cli_v2/block.rs @@ -0,0 +1,145 @@ +//use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_block(&self, cmd: BlockOperation) -> Result<(), Error> { + match cmd { + BlockOperation::ListErrors => self.cmd_list_block_errors().await, + BlockOperation::Info { hash } => self.cmd_get_block_info(hash).await, + BlockOperation::RetryNow { all, blocks } => self.cmd_block_retry_now(all, blocks).await, + BlockOperation::Purge { yes, blocks } => self.cmd_block_purge(yes, blocks).await, + } + } + + pub async fn cmd_list_block_errors(&self) -> Result<(), Error> { + let errors = self.local_api_request(LocalListBlockErrorsRequest).await?.0; + + let tf = timeago::Formatter::new(); + let mut tf2 = timeago::Formatter::new(); + tf2.ago(""); + + let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()]; + for e in errors { + let next_try = if e.next_try_in_secs > 0 { + tf2.convert(Duration::from_secs(e.next_try_in_secs)) + } else { + "asap".to_string() + }; + table.push(format!( + "{}\t{}\t{}\t{}\tin {}", + e.block_hash, + e.refcount, + e.error_count, + tf.convert(Duration::from_secs(e.last_try_secs_ago)), + next_try + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_get_block_info(&self, hash: String) -> Result<(), Error> { + let info = self + .local_api_request(LocalGetBlockInfoRequest { block_hash: hash }) + .await?; + + println!("Block hash: {}", info.block_hash); + println!("Refcount: {}", info.refcount); + println!(); + + let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()]; + let mut nondeleted_count = 0; + for ver in info.versions.iter() { + match &ver.backlink { + Some(BlockVersionBacklink::Object { bucket_id, key }) => { + table.push(format!( + "{:.16}\t{:.16}\t{}\t\t{:?}", + ver.version_id, bucket_id, key, ver.deleted + )); + } + Some(BlockVersionBacklink::Upload { + upload_id, + upload_deleted: _, + upload_garbage_collected: _, + bucket_id, + key, + }) => { + table.push(format!( + "{:.16}\t{:.16}\t{}\t{:.16}\t{:.16}", + ver.version_id, + bucket_id.as_deref().unwrap_or(""), + key.as_deref().unwrap_or(""), + upload_id, + ver.deleted + )); + } + None => { + table.push(format!("{:.16}\t\t\tyes", ver.version_id)); + } + } + if !ver.deleted { + nondeleted_count += 1; + } + } + format_table(table); + + if info.refcount != nondeleted_count { + println!(); + println!( + "Warning: refcount does not match number of non-deleted versions, you should try `garage repair block-rc`." + ); + } + + Ok(()) + } + + pub async fn cmd_block_retry_now(&self, all: bool, blocks: Vec<String>) -> Result<(), Error> { + let req = match (all, blocks.len()) { + (true, 0) => LocalRetryBlockResyncRequest::All { all: true }, + (false, n) if n > 0 => LocalRetryBlockResyncRequest::Blocks { + block_hashes: blocks, + }, + _ => { + return Err(Error::Message( + "Please specify block hashes or --all (not both)".into(), + )) + } + }; + + let res = self.local_api_request(req).await?; + + println!( + "{} blocks returned in queue for a retry now (check logs to see results)", + res.count + ); + + Ok(()) + } + + pub async fn cmd_block_purge(&self, yes: bool, blocks: Vec<String>) -> Result<(), Error> { + if !yes { + return Err(Error::Message( + "Pass the --yes flag to confirm block purge operation.".into(), + )); + } + + let res = self + .local_api_request(LocalPurgeBlocksRequest(blocks)) + .await?; + + println!( + "Purged {} blocks: deleted {} versions, {} objects, {} multipart uploads", + res.blocks_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted, + ); + + Ok(()) + } +} diff --git a/src/garage/cli_v2/bucket.rs b/src/garage/cli_v2/bucket.rs new file mode 100644 index 00000000..c25c2c3e --- /dev/null +++ b/src/garage/cli_v2/bucket.rs @@ -0,0 +1,549 @@ +//use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> { + match cmd { + BucketOperation::List => self.cmd_list_buckets().await, + BucketOperation::Info(query) => self.cmd_bucket_info(query).await, + BucketOperation::Create(query) => self.cmd_create_bucket(query).await, + BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await, + BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await, + BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await, + BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await, + BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await, + BucketOperation::Website(query) => self.cmd_bucket_website(query).await, + BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await, + BucketOperation::CleanupIncompleteUploads(query) => { + self.cmd_cleanup_incomplete_uploads(query).await + } + } + } + + pub async fn cmd_list_buckets(&self) -> Result<(), Error> { + let buckets = self.api_request(ListBucketsRequest).await?; + + println!("List of buckets:"); + + let mut table = vec![]; + for bucket in buckets.0.iter() { + let local_aliases_n = match &bucket.local_aliases[..] { + [] => "".into(), + [alias] => format!("{}:{}", alias.access_key_id, alias.alias), + s => format!("[{} local aliases]", s.len()), + }; + + table.push(format!( + "\t{}\t{}\t{}", + bucket.global_aliases.join(","), + local_aliases_n, + bucket.id, + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name), + }) + .await?; + + println!("Bucket: {}", bucket.id); + + let size = bytesize::ByteSize::b(bucket.bytes as u64); + println!( + "\nSize: {} ({})", + size.to_string_as(true), + size.to_string_as(false) + ); + println!("Objects: {}", bucket.objects); + println!( + "Unfinished uploads (multipart and non-multipart): {}", + bucket.unfinished_uploads, + ); + println!( + "Unfinished multipart uploads: {}", + bucket.unfinished_multipart_uploads + ); + let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64); + println!( + "Size of unfinished multipart uploads: {} ({})", + mpu_size.to_string_as(true), + mpu_size.to_string_as(false), + ); + + println!("\nWebsite access: {}", bucket.website_access); + + if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() { + println!("\nQuotas:"); + if let Some(ms) = bucket.quotas.max_size { + let ms = bytesize::ByteSize::b(ms); + println!( + " maximum size: {} ({})", + ms.to_string_as(true), + ms.to_string_as(false) + ); + } + if let Some(mo) = bucket.quotas.max_objects { + println!(" maximum number of objects: {}", mo); + } + } + + println!("\nGlobal aliases:"); + for alias in bucket.global_aliases { + println!(" {}", alias); + } + + println!("\nKey-specific aliases:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + for alias in key.bucket_local_aliases.iter() { + table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias)); + } + } + format_table(table); + + println!("\nAuthorized keys:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + if !(key.permissions.read || key.permissions.write || key.permissions.owner) { + continue; + } + let rflag = if key.permissions.read { "R" } else { " " }; + let wflag = if key.permissions.write { "W" } else { " " }; + let oflag = if key.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}", + rflag, wflag, oflag, key.access_key_id, key.name + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> { + self.api_request(CreateBucketRequest { + global_alias: Some(opt.name.clone()), + local_alias: None, + }) + .await?; + + println!("Bucket {} was created.", opt.name); + + Ok(()) + } + + pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name.clone()), + }) + .await?; + + // CLI-only checks: the bucket must not have other aliases + if bucket + .global_aliases + .iter() + .find(|a| **a != opt.name) + .is_some() + { + return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if bucket + .keys + .iter() + .any(|k| !k.bucket_local_aliases.is_empty()) + { + return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if !opt.yes { + println!("About to delete bucket {}.", bucket.id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteBucketRequest { + id: bucket.id.clone(), + }) + .await?; + + println!("Bucket {} has been deleted.", bucket.id); + + Ok(()) + } + + pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.existing_bucket.clone()), + }) + .await?; + + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + local_alias: opt.new_name.clone(), + access_key_id: key.access_key_id.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16} in namespace of key {}", + opt.new_name, bucket.id, key.access_key_id + ) + } else { + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.new_name.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16}", + opt.new_name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> { + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + let bucket = key + .buckets + .iter() + .find(|x| x.local_aliases.contains(&opt.name)) + .ok_or_message(format!( + "No bucket called {} in namespace of key {}", + opt.name, key.access_key_id + ))?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + access_key_id: key.access_key_id.clone(), + local_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16} in namespace of key {}", + &opt.name, bucket.id, key.access_key_id + ) + } else { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: Some(opt.name.clone()), + search: None, + }) + .await?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16}", + opt.name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} has no permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} no longer has permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if !(opt.allow ^ opt.deny) { + return Err(Error::Message( + "You must specify exactly one flag, either --allow or --deny".to_string(), + )); + } + + let wa = if opt.allow { + UpdateBucketWebsiteAccess { + enabled: true, + index_document: Some(opt.index_document.clone()), + error_document: opt + .error_document + .or(bucket.website_config.and_then(|x| x.error_document.clone())), + } + } else { + UpdateBucketWebsiteAccess { + enabled: false, + index_document: None, + error_document: None, + } + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id, + body: UpdateBucketRequestBody { + website_access: Some(wa), + quotas: None, + }, + }) + .await?; + + if opt.allow { + println!("Website access allowed for {}", &opt.bucket); + } else { + println!("Website access denied for {}", &opt.bucket); + } + + Ok(()) + } + + pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if opt.max_size.is_none() && opt.max_objects.is_none() { + return Err(Error::Message( + "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(), + )); + } + + let new_quotas = ApiBucketQuotas { + max_size: match opt.max_size.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::<bytesize::ByteSize>() + .ok_or_message(format!("Invalid size specified: {}", v))? + .as_u64(), + ), + None => bucket.quotas.max_size, + }, + max_objects: match opt.max_objects.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::<u64>() + .ok_or_message(format!("Invalid number: {}", v))?, + ), + None => bucket.quotas.max_objects, + }, + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id.clone(), + body: UpdateBucketRequestBody { + website_access: None, + quotas: Some(new_quotas), + }, + }) + .await?; + + println!("Quotas updated for bucket {:.16}", bucket.id); + + Ok(()) + } + + pub async fn cmd_cleanup_incomplete_uploads( + &self, + opt: CleanupIncompleteUploadsOpt, + ) -> Result<(), Error> { + let older_than = parse_duration::parse::parse(&opt.older_than) + .ok_or_message("Invalid duration passed for --older-than parameter")?; + + for b in opt.buckets.iter() { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(b.clone()), + }) + .await?; + + let res = self + .api_request(CleanupIncompleteUploadsRequest { + bucket_id: bucket.id.clone(), + older_than_secs: older_than.as_secs(), + }) + .await?; + + if res.uploads_deleted > 0 { + println!("{:.16}: {} uploads deleted", bucket.id, res.uploads_deleted); + } else { + println!("{:.16}: no uploads deleted", bucket.id); + } + } + + Ok(()) + } +} diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs new file mode 100644 index 00000000..6eb65d12 --- /dev/null +++ b/src/garage/cli_v2/cluster.rs @@ -0,0 +1,158 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::layout::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_status(&self) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + println!("==== HEALTHY NODES ===="); + + let mut healthy_nodes = + vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; + + for adv in status.nodes.iter().filter(|adv| adv.is_up) { + let host = adv.hostname.as_deref().unwrap_or("?"); + let addr = match adv.addr { + Some(addr) => addr.to_string(), + None => "N/A".to_string(), + }; + if let Some(cfg) = &adv.role { + let data_avail = match &adv.data_partition { + _ if cfg.capacity.is_none() => "N/A".into(), + Some(FreeSpaceResp { available, total }) => { + let pct = (*available as f64) / (*total as f64) * 100.; + let avail_str = bytesize::ByteSize::b(*available); + format!("{} ({:.1}%)", avail_str, pct) + } + None => "?".into(), + }; + healthy_nodes.push(format!( + "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity_string(cfg.capacity), + data_avail = data_avail, + )); + } else { + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => "draining metadata..", + _ => "NO ROLE ASSIGNED", + }; + healthy_nodes.push(format!( + "{id:.16}\t{h}\t{addr}\t\t\t{status}", + id = adv.id, + h = host, + addr = addr, + status = status, + )); + } + } + format_table(healthy_nodes); + + let tf = timeago::Formatter::new(); + let mut drain_msg = false; + let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; + for adv in status.nodes.iter().filter(|x| !x.is_up) { + let node = &adv.id; + + let host = adv.hostname.as_deref().unwrap_or("?"); + let last_seen = adv + .last_seen_secs_ago + .map(|s| tf.convert(Duration::from_secs(s))) + .unwrap_or_else(|| "never seen".into()); + + if let Some(cfg) = &adv.role { + let capacity = capacity_string(cfg.capacity); + + failed_nodes.push(format!( + "{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", + id = node, + host = host, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity, + last_seen = last_seen, + )); + } else { + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => { + drain_msg = true; + "draining metadata.." + } + _ => unreachable!(), + }; + + failed_nodes.push(format!( + "{id:.16}\t{host}\t\t\t{status}\t{last_seen}", + id = node, + host = host, + status = status, + last_seen = last_seen, + )); + } + } + + if failed_nodes.len() > 1 { + println!("\n==== FAILED NODES ===="); + format_table(failed_nodes); + if drain_msg { + println!(); + println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); + println!( + "If these nodes are definitely dead, please review the layout history with" + ); + println!( + "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." + ); + } + } + + if print_staging_role_changes(&layout) { + println!(); + println!( + "Please use `garage layout show` to check the proposed new layout and apply it." + ); + println!(); + } + + Ok(()) + } + + pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> { + let res = self + .api_request(ConnectClusterNodesRequest(vec![opt.node])) + .await?; + if res.0.len() != 1 { + return Err(Error::Message(format!("unexpected response: {:?}", res))); + } + let res = res.0.into_iter().next().unwrap(); + if res.success { + println!("Success."); + Ok(()) + } else { + Err(Error::Message(format!( + "Failure: {}", + res.error.unwrap_or_default() + ))) + } + } +} diff --git a/src/garage/cli_v2/key.rs b/src/garage/cli_v2/key.rs new file mode 100644 index 00000000..b956906d --- /dev/null +++ b/src/garage/cli_v2/key.rs @@ -0,0 +1,227 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> { + match cmd { + KeyOperation::List => self.cmd_list_keys().await, + KeyOperation::Info(query) => self.cmd_key_info(query).await, + KeyOperation::Create(query) => self.cmd_create_key(query).await, + KeyOperation::Rename(query) => self.cmd_rename_key(query).await, + KeyOperation::Delete(query) => self.cmd_delete_key(query).await, + KeyOperation::Allow(query) => self.cmd_allow_key(query).await, + KeyOperation::Deny(query) => self.cmd_deny_key(query).await, + KeyOperation::Import(query) => self.cmd_import_key(query).await, + } + } + + pub async fn cmd_list_keys(&self) -> Result<(), Error> { + let keys = self.api_request(ListKeysRequest).await?; + + println!("List of keys:"); + let mut table = vec![]; + for key in keys.0.iter() { + table.push(format!("\t{}\t{}", key.id, key.name)); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: opt.show_secret, + }) + .await?; + + print_key_info(&key); + + Ok(()) + } + + pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> { + let key = self + .api_request(CreateKeyRequest { + name: Some(opt.name), + }) + .await?; + + print_key_info(&key.0); + + Ok(()) + } + + pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: Some(opt.new_name), + allow: None, + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + if !opt.yes { + println!("About to delete key {}...", key.access_key_id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteKeyRequest { + id: key.access_key_id.clone(), + }) + .await?; + + println!("Access key {} has been deleted.", key.access_key_id); + + Ok(()) + } + + pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: None, + deny: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> { + if !opt.yes { + return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); + } + + let new_key = self + .api_request(ImportKeyRequest { + name: Some(opt.name), + access_key_id: opt.key_id, + secret_access_key: opt.secret_key, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } +} + +fn print_key_info(key: &GetKeyInfoResponse) { + println!("Key name: {}", key.name); + println!("Key ID: {}", key.access_key_id); + println!( + "Secret key: {}", + key.secret_access_key.as_deref().unwrap_or("(redacted)") + ); + println!("Can create buckets: {}", key.permissions.create_bucket); + + println!("\nKey-specific bucket aliases:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + for la in bucket.local_aliases.iter() { + table.push(format!( + "\t{}\t{}\t{}", + la, + bucket.global_aliases.join(","), + bucket.id + )); + } + } + format_table(table); + + println!("\nAuthorized buckets:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + let rflag = if bucket.permissions.read { "R" } else { " " }; + let wflag = if bucket.permissions.write { "W" } else { " " }; + let oflag = if bucket.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}\t{:.16}", + rflag, + wflag, + oflag, + bucket.global_aliases.join(","), + bucket.local_aliases.join(","), + bucket.id + )); + } + format_table(table); +} diff --git a/src/garage/cli_v2/layout.rs b/src/garage/cli_v2/layout.rs new file mode 100644 index 00000000..2f14b332 --- /dev/null +++ b/src/garage/cli_v2/layout.rs @@ -0,0 +1,284 @@ +use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::layout as cli_v1; +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> { + match cmd { + LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await, + LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await, + LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await, + LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await, + + // TODO + LayoutOperation::Show => { + cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::Config(config_opt) => { + cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt) + .await + } + LayoutOperation::History => { + cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::SkipDeadNodes(assume_sync_opt) => { + cli_v1::cmd_layout_skip_dead_nodes( + &self.system_rpc_endpoint, + self.rpc_host, + assume_sync_opt, + ) + .await + } + } + } + + pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let mut actions = vec![]; + + for node in opt.replace.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }); + } + + for node in opt.node_ids.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + let current = get_staged_or_current_role(&id, &layout); + + let zone = opt + .zone + .clone() + .or_else(|| current.as_ref().map(|c| c.zone.clone())) + .ok_or_message("Please specify a zone with the -z flag")?; + + let capacity = if opt.gateway { + if opt.capacity.is_some() { + return Err(Error::Message("Please specify only -c or -g".into())); + } + None + } else if let Some(cap) = opt.capacity { + Some(cap.as_u64()) + } else { + current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity + }; + + let tags = if !opt.tags.is_empty() { + opt.tags.clone() + } else if let Some(cur) = current.as_ref() { + cur.tags.clone() + } else { + vec![] + }; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + }, + }); + } + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role changes are staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } + + pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let id = find_matching_node(all_node_ids_iter.clone(), &opt.node_id)?; + + let actions = vec![NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }]; + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role removal is staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } + + pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> { + let missing_version_error = r#" +Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout. +To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes. + "#; + + let req = ApplyClusterLayoutRequest { + version: apply_opt.version.ok_or_message(missing_version_error)?, + }; + let res = self.api_request(req).await?; + + for line in res.message.iter() { + println!("{}", line); + } + + println!("New cluster layout with updated role assignment has been applied in cluster."); + println!("Data will now be moved around between nodes accordingly."); + + Ok(()) + } + + pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> { + if !revert_opt.yes { + return Err(Error::Message( + "Please add the --yes flag to run the layout revert operation".into(), + )); + } + + self.api_request(RevertClusterLayoutRequest).await?; + + println!("All proposed role changes in cluster layout have been canceled."); + Ok(()) + } +} + +// -------------------------- +// ---- helper functions ---- +// -------------------------- + +pub fn capacity_string(v: Option<u64>) -> String { + match v { + Some(c) => ByteSize::b(c).to_string_as(false), + None => "gateway".to_string(), + } +} + +pub fn get_staged_or_current_role( + id: &str, + layout: &GetClusterLayoutResponse, +) -> Option<NodeRoleResp> { + for node in layout.staged_role_changes.iter() { + if node.id == id { + return match &node.action { + NodeRoleChangeEnum::Remove { .. } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(NodeRoleResp { + id: id.to_string(), + zone: zone.to_string(), + capacity: *capacity, + tags: tags.clone(), + }), + }; + } + } + + for node in layout.roles.iter() { + if node.id == id { + return Some(node.clone()); + } + } + + None +} + +pub fn find_matching_node<'a>( + cand: impl std::iter::Iterator<Item = &'a str>, + pattern: &'a str, +) -> Result<String, Error> { + let mut candidates = vec![]; + for c in cand { + if c.starts_with(pattern) && !candidates.contains(&c) { + candidates.push(c); + } + } + if candidates.len() != 1 { + Err(Error::Message(format!( + "{} nodes match '{}'", + candidates.len(), + pattern, + ))) + } else { + Ok(candidates[0].to_string()) + } +} + +pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { + let has_role_changes = !layout.staged_role_changes.is_empty(); + + // TODO!! Layout parameters + let has_layout_changes = false; + + if has_role_changes || has_layout_changes { + println!(); + println!("==== STAGED ROLE CHANGES ===="); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for change in layout.staged_role_changes.iter() { + match &change.action { + NodeRoleChangeEnum::Update { + tags, + zone, + capacity, + } => { + let tags = tags.join(","); + table.push(format!( + "{:.16}\t{}\t{}\t{}", + change.id, + tags, + zone, + capacity_string(*capacity), + )); + } + NodeRoleChangeEnum::Remove { .. } => { + table.push(format!("{:.16}\tREMOVED", change.id)); + } + } + } + format_table(table); + println!(); + } + //TODO + /* + if has_layout_changes { + println!( + "Zone redundancy: {}", + staging.parameters.get().zone_redundancy + ); + } + */ + true + } else { + false + } +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs new file mode 100644 index 00000000..28c7c824 --- /dev/null +++ b/src/garage/cli_v2/mod.rs @@ -0,0 +1,108 @@ +pub mod bucket; +pub mod cluster; +pub mod key; +pub mod layout; + +pub mod block; +pub mod node; +pub mod worker; + +use std::convert::TryFrom; +use std::sync::Arc; +use std::time::Duration; + +use garage_util::error::*; + +use garage_rpc::system::*; +use garage_rpc::*; + +use garage_api_admin::api::*; +use garage_api_admin::api_server::{AdminRpc as ProxyRpc, AdminRpcResponse as ProxyRpcResponse}; +use garage_api_admin::RequestHandler; + +use crate::cli::structs::*; + +pub struct Cli { + pub system_rpc_endpoint: Arc<Endpoint<SystemRpc, ()>>, + pub proxy_rpc_endpoint: Arc<Endpoint<ProxyRpc, ()>>, + pub rpc_host: NodeID, +} + +impl Cli { + pub async fn handle(&self, cmd: Command) -> Result<(), Error> { + match cmd { + Command::Status => self.cmd_status().await, + Command::Node(NodeOperation::Connect(connect_opt)) => { + self.cmd_connect(connect_opt).await + } + Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, + Command::Bucket(bo) => self.cmd_bucket(bo).await, + Command::Key(ko) => self.cmd_key(ko).await, + Command::Worker(wo) => self.cmd_worker(wo).await, + Command::Block(bo) => self.cmd_block(bo).await, + Command::Meta(mo) => self.cmd_meta(mo).await, + Command::Stats(so) => self.cmd_stats(so).await, + Command::Repair(ro) => self.cmd_repair(ro).await, + + _ => unreachable!(), + } + } + + pub async fn api_request<T>(&self, req: T) -> Result<<T as RequestHandler>::Response, Error> + where + T: RequestHandler, + AdminApiRequest: From<T>, + <T as RequestHandler>::Response: TryFrom<TaggedAdminApiResponse>, + { + let req = AdminApiRequest::from(req); + let req_name = req.name(); + match self + .proxy_rpc_endpoint + .call(&self.rpc_host, ProxyRpc::Proxy(req), PRIO_NORMAL) + .await?? + { + ProxyRpcResponse::ProxyApiOkResponse(resp) => { + <T as RequestHandler>::Response::try_from(resp).map_err(|_| { + Error::Message(format!("{} returned unexpected response", req_name)) + }) + } + ProxyRpcResponse::ApiErrorResponse { + http_code, + error_code, + message, + } => Err(Error::Message(format!( + "{} returned {} ({}): {}", + req_name, error_code, http_code, message + ))), + m => Err(Error::unexpected_rpc_message(m)), + } + } + + pub async fn local_api_request<T>( + &self, + req: T, + ) -> Result<<T as RequestHandler>::Response, Error> + where + T: RequestHandler, + MultiRequest<T>: RequestHandler<Response = MultiResponse<<T as RequestHandler>::Response>>, + AdminApiRequest: From<MultiRequest<T>>, + <MultiRequest<T> as RequestHandler>::Response: TryFrom<TaggedAdminApiResponse>, + { + let req = MultiRequest { + node: hex::encode(self.rpc_host), + body: req, + }; + let resp = self.api_request(req).await?; + + if let Some((_, e)) = resp.error.into_iter().next() { + return Err(Error::Message(e)); + } + if resp.success.len() != 1 { + return Err(Error::Message(format!( + "{} responses returned, expected 1", + resp.success.len() + ))); + } + Ok(resp.success.into_iter().next().unwrap().1) + } +} diff --git a/src/garage/cli_v2/node.rs b/src/garage/cli_v2/node.rs new file mode 100644 index 00000000..c5d0cdea --- /dev/null +++ b/src/garage/cli_v2/node.rs @@ -0,0 +1,113 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_meta(&self, cmd: MetaOperation) -> Result<(), Error> { + let MetaOperation::Snapshot { all } = cmd; + + let res = self + .api_request(CreateMetadataSnapshotRequest { + node: if all { + "*".to_string() + } else { + hex::encode(self.rpc_host) + }, + body: LocalCreateMetadataSnapshotRequest, + }) + .await?; + + let mut table = vec![]; + for (node, err) in res.error.iter() { + table.push(format!("{:.16}\tError: {}", node, err)); + } + for (node, _) in res.success.iter() { + table.push(format!("{:.16}\tSnapshot created", node)); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_stats(&self, cmd: StatsOpt) -> Result<(), Error> { + let res = self + .api_request(GetNodeStatisticsRequest { + node: if cmd.all_nodes { + "*".to_string() + } else { + hex::encode(self.rpc_host) + }, + body: LocalGetNodeStatisticsRequest, + }) + .await?; + + for (node, res) in res.success.iter() { + println!("======================"); + println!("Stats for node {:.16}:\n", node); + println!("{}\n", res.freeform); + } + + for (node, err) in res.error.iter() { + println!("======================"); + println!("Node {:.16}: error: {}\n", node, err); + } + + let res = self.api_request(GetClusterStatisticsRequest).await?; + println!("======================"); + println!("Cluster statistics:\n"); + println!("{}\n", res.freeform); + + Ok(()) + } + + pub async fn cmd_repair(&self, cmd: RepairOpt) -> Result<(), Error> { + if !cmd.yes { + return Err(Error::Message( + "Please add --yes to start the repair operation".into(), + )); + } + + let repair_type = match cmd.what { + RepairWhat::Tables => RepairType::Tables, + RepairWhat::Blocks => RepairType::Blocks, + RepairWhat::Versions => RepairType::Versions, + RepairWhat::MultipartUploads => RepairType::MultipartUploads, + RepairWhat::BlockRefs => RepairType::BlockRefs, + RepairWhat::BlockRc => RepairType::BlockRc, + RepairWhat::Rebalance => RepairType::Rebalance, + RepairWhat::Scrub { cmd } => RepairType::Scrub(match cmd { + ScrubCmd::Start => ScrubCommand::Start, + ScrubCmd::Cancel => ScrubCommand::Cancel, + ScrubCmd::Pause => ScrubCommand::Pause, + ScrubCmd::Resume => ScrubCommand::Resume, + }), + }; + + let res = self + .api_request(LaunchRepairOperationRequest { + node: if cmd.all_nodes { + "*".to_string() + } else { + hex::encode(self.rpc_host) + }, + body: LocalLaunchRepairOperationRequest { repair_type }, + }) + .await?; + + let mut table = vec![]; + for (node, err) in res.error.iter() { + table.push(format!("{:.16}\tError: {}", node, err)); + } + for (node, _) in res.success.iter() { + table.push(format!("{:.16}\tRepair launched", node)); + } + format_table(table); + + Ok(()) + } +} diff --git a/src/garage/cli_v2/worker.rs b/src/garage/cli_v2/worker.rs new file mode 100644 index 00000000..9c248a39 --- /dev/null +++ b/src/garage/cli_v2/worker.rs @@ -0,0 +1,213 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api_admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_worker(&self, cmd: WorkerOperation) -> Result<(), Error> { + match cmd { + WorkerOperation::List { opt } => self.cmd_list_workers(opt).await, + WorkerOperation::Info { tid } => self.cmd_worker_info(tid).await, + WorkerOperation::Get { + all_nodes, + variable, + } => self.cmd_get_var(all_nodes, variable).await, + WorkerOperation::Set { + all_nodes, + variable, + value, + } => self.cmd_set_var(all_nodes, variable, value).await, + } + } + + pub async fn cmd_list_workers(&self, opt: WorkerListOpt) -> Result<(), Error> { + let mut list = self + .local_api_request(LocalListWorkersRequest { + busy_only: opt.busy, + error_only: opt.errors, + }) + .await? + .0; + + list.sort_by_key(|info| { + ( + match info.state { + WorkerStateResp::Busy | WorkerStateResp::Throttled { .. } => 0, + WorkerStateResp::Idle => 1, + WorkerStateResp::Done => 2, + }, + info.id, + ) + }); + + let mut table = + vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()]; + let tf = timeago::Formatter::new(); + for info in list.iter() { + let err_ago = info + .last_error + .as_ref() + .map(|x| tf.convert(Duration::from_secs(x.secs_ago))) + .unwrap_or_default(); + let (total_err, consec_err) = if info.errors > 0 { + (info.errors.to_string(), info.consecutive_errors.to_string()) + } else { + ("-".into(), "-".into()) + }; + + table.push(format!( + "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}", + info.id, + format_worker_state(&info.state), + info.name, + info.tranquility + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "-".into()), + info.progress.as_deref().unwrap_or("-"), + info.queue_length + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "-".into()), + total_err, + consec_err, + err_ago, + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_worker_info(&self, tid: usize) -> Result<(), Error> { + let info = self + .local_api_request(LocalGetWorkerInfoRequest { id: tid as u64 }) + .await? + .0; + + let mut table = vec![]; + table.push(format!("Task id:\t{}", info.id)); + table.push(format!("Worker name:\t{}", info.name)); + match &info.state { + WorkerStateResp::Throttled { duration_secs } => { + table.push(format!( + "Worker state:\tBusy (throttled, paused for {:.3}s)", + duration_secs + )); + } + s => { + table.push(format!("Worker state:\t{}", format_worker_state(s))); + } + }; + if let Some(tql) = info.tranquility { + table.push(format!("Tranquility:\t{}", tql)); + } + + table.push("".into()); + table.push(format!("Total errors:\t{}", info.errors)); + table.push(format!("Consecutive errs:\t{}", info.consecutive_errors)); + if let Some(err) = info.last_error { + table.push(format!("Last error:\t{}", err.message)); + let tf = timeago::Formatter::new(); + table.push(format!( + "Last error time:\t{}", + tf.convert(Duration::from_secs(err.secs_ago)) + )); + } + + table.push("".into()); + if let Some(p) = info.progress { + table.push(format!("Progress:\t{}", p)); + } + if let Some(ql) = info.queue_length { + table.push(format!("Queue length:\t{}", ql)); + } + if let Some(pe) = info.persistent_errors { + table.push(format!("Persistent errors:\t{}", pe)); + } + + for (i, s) in info.freeform.iter().enumerate() { + if i == 0 { + if table.last() != Some(&"".into()) { + table.push("".into()); + } + table.push(format!("Message:\t{}", s)); + } else { + table.push(format!("\t{}", s)); + } + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_get_var(&self, all: bool, var: Option<String>) -> Result<(), Error> { + let res = self + .api_request(GetWorkerVariableRequest { + node: if all { + "*".to_string() + } else { + hex::encode(self.rpc_host) + }, + body: LocalGetWorkerVariableRequest { variable: var }, + }) + .await?; + + let mut table = vec![]; + for (node, vars) in res.success.iter() { + for (key, val) in vars.0.iter() { + table.push(format!("{:.16}\t{}\t{}", node, key, val)); + } + } + format_table(table); + + for (node, err) in res.error.iter() { + eprintln!("{:.16}: error: {}", node, err); + } + + Ok(()) + } + + pub async fn cmd_set_var( + &self, + all: bool, + variable: String, + value: String, + ) -> Result<(), Error> { + let res = self + .api_request(SetWorkerVariableRequest { + node: if all { + "*".to_string() + } else { + hex::encode(self.rpc_host) + }, + body: LocalSetWorkerVariableRequest { variable, value }, + }) + .await?; + + let mut table = vec![]; + for (node, kv) in res.success.iter() { + table.push(format!("{:.16}\t{}\t{}", node, kv.variable, kv.value)); + } + format_table(table); + + for (node, err) in res.error.iter() { + eprintln!("{:.16}: error: {}", node, err); + } + + Ok(()) + } +} + +fn format_worker_state(s: &WorkerStateResp) -> &'static str { + match s { + WorkerStateResp::Busy => "Busy", + WorkerStateResp::Throttled { .. } => "Busy*", + WorkerStateResp::Idle => "Idle", + WorkerStateResp::Done => "Done", + } +} |