diff options
Diffstat (limited to 'src/garage/cli_v2')
-rw-r--r-- | src/garage/cli_v2/bucket.rs | 523 | ||||
-rw-r--r-- | src/garage/cli_v2/cluster.rs | 158 | ||||
-rw-r--r-- | src/garage/cli_v2/key.rs | 227 | ||||
-rw-r--r-- | src/garage/cli_v2/layout.rs | 284 | ||||
-rw-r--r-- | src/garage/cli_v2/mod.rs | 106 |
5 files changed, 1298 insertions, 0 deletions
diff --git a/src/garage/cli_v2/bucket.rs b/src/garage/cli_v2/bucket.rs new file mode 100644 index 00000000..837ce783 --- /dev/null +++ b/src/garage/cli_v2/bucket.rs @@ -0,0 +1,523 @@ +//use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli as cli_v1; +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> { + match cmd { + BucketOperation::List => self.cmd_list_buckets().await, + BucketOperation::Info(query) => self.cmd_bucket_info(query).await, + BucketOperation::Create(query) => self.cmd_create_bucket(query).await, + BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await, + BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await, + BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await, + BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await, + BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await, + BucketOperation::Website(query) => self.cmd_bucket_website(query).await, + BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await, + + // TODO + x => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::BucketOperation(x), + ) + .await + .ok_or_message("old error"), + } + } + + pub async fn cmd_list_buckets(&self) -> Result<(), Error> { + let buckets = self.api_request(ListBucketsRequest).await?; + + println!("List of buckets:"); + + let mut table = vec![]; + for bucket in buckets.0.iter() { + let local_aliases_n = match &bucket.local_aliases[..] { + [] => "".into(), + [alias] => format!("{}:{}", alias.access_key_id, alias.alias), + s => format!("[{} local aliases]", s.len()), + }; + + table.push(format!( + "\t{}\t{}\t{}", + bucket.global_aliases.join(","), + local_aliases_n, + bucket.id, + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name), + }) + .await?; + + println!("Bucket: {}", bucket.id); + + let size = bytesize::ByteSize::b(bucket.bytes as u64); + println!( + "\nSize: {} ({})", + size.to_string_as(true), + size.to_string_as(false) + ); + println!("Objects: {}", bucket.objects); + println!( + "Unfinished uploads (multipart and non-multipart): {}", + bucket.unfinished_uploads, + ); + println!( + "Unfinished multipart uploads: {}", + bucket.unfinished_multipart_uploads + ); + let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64); + println!( + "Size of unfinished multipart uploads: {} ({})", + mpu_size.to_string_as(true), + mpu_size.to_string_as(false), + ); + + println!("\nWebsite access: {}", bucket.website_access); + + if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() { + println!("\nQuotas:"); + if let Some(ms) = bucket.quotas.max_size { + let ms = bytesize::ByteSize::b(ms); + println!( + " maximum size: {} ({})", + ms.to_string_as(true), + ms.to_string_as(false) + ); + } + if let Some(mo) = bucket.quotas.max_objects { + println!(" maximum number of objects: {}", mo); + } + } + + println!("\nGlobal aliases:"); + for alias in bucket.global_aliases { + println!(" {}", alias); + } + + println!("\nKey-specific aliases:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + for alias in key.bucket_local_aliases.iter() { + table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias)); + } + } + format_table(table); + + println!("\nAuthorized keys:"); + let mut table = vec![]; + for key in bucket.keys.iter() { + if !(key.permissions.read || key.permissions.write || key.permissions.owner) { + continue; + } + let rflag = if key.permissions.read { "R" } else { " " }; + let wflag = if key.permissions.write { "W" } else { " " }; + let oflag = if key.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}", + rflag, wflag, oflag, key.access_key_id, key.name + )); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> { + self.api_request(CreateBucketRequest { + global_alias: Some(opt.name.clone()), + local_alias: None, + }) + .await?; + + println!("Bucket {} was created.", opt.name); + + Ok(()) + } + + pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.name.clone()), + }) + .await?; + + // CLI-only checks: the bucket must not have other aliases + if bucket + .global_aliases + .iter() + .find(|a| **a != opt.name) + .is_some() + { + return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if bucket + .keys + .iter() + .any(|k| !k.bucket_local_aliases.is_empty()) + { + return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name))); + } + + if !opt.yes { + println!("About to delete bucket {}.", bucket.id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteBucketRequest { + id: bucket.id.clone(), + }) + .await?; + + println!("Bucket {} has been deleted.", bucket.id); + + Ok(()) + } + + pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.existing_bucket.clone()), + }) + .await?; + + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + local_alias: opt.new_name.clone(), + access_key_id: key.access_key_id.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16} in namespace of key {}", + opt.new_name, bucket.id, key.access_key_id + ) + } else { + self.api_request(AddBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.new_name.clone(), + }, + }) + .await?; + + println!( + "Alias {} now points to bucket {:.16}", + opt.new_name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> { + if let Some(key_pat) = &opt.local { + let key = self + .api_request(GetKeyInfoRequest { + search: Some(key_pat.clone()), + id: None, + show_secret_key: false, + }) + .await?; + + let bucket = key + .buckets + .iter() + .find(|x| x.local_aliases.contains(&opt.name)) + .ok_or_message(format!( + "No bucket called {} in namespace of key {}", + opt.name, key.access_key_id + ))?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Local { + access_key_id: key.access_key_id.clone(), + local_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16} in namespace of key {}", + &opt.name, bucket.id, key.access_key_id + ) + } else { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: Some(opt.name.clone()), + search: None, + }) + .await?; + + self.api_request(RemoveBucketAliasRequest { + bucket_id: bucket.id.clone(), + alias: BucketAliasEnum::Global { + global_alias: opt.name.clone(), + }, + }) + .await?; + + println!( + "Alias {} no longer points to bucket {:.16}", + opt.name, bucket.id + ) + } + + Ok(()) + } + + pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} has no permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern.clone()), + show_secret_key: false, + }) + .await?; + + self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest { + bucket_id: bucket.id.clone(), + access_key_id: key.access_key_id.clone(), + permissions: ApiBucketKeyPerm { + read: opt.read, + write: opt.write, + owner: opt.owner, + }, + })) + .await?; + + let new_bucket = self + .api_request(GetBucketInfoRequest { + id: Some(bucket.id), + global_alias: None, + search: None, + }) + .await?; + + if let Some(new_key) = new_bucket + .keys + .iter() + .find(|k| k.access_key_id == key.access_key_id) + { + println!( + "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}", + key.access_key_id, + new_bucket.id, + new_key.permissions.read, + new_key.permissions.write, + new_key.permissions.owner + ); + } else { + println!( + "Access key {} no longer has permissions on bucket {:.16}", + key.access_key_id, new_bucket.id + ); + } + + Ok(()) + } + + pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if !(opt.allow ^ opt.deny) { + return Err(Error::Message( + "You must specify exactly one flag, either --allow or --deny".to_string(), + )); + } + + let wa = if opt.allow { + UpdateBucketWebsiteAccess { + enabled: true, + index_document: Some(opt.index_document.clone()), + error_document: opt + .error_document + .or(bucket.website_config.and_then(|x| x.error_document.clone())), + } + } else { + UpdateBucketWebsiteAccess { + enabled: false, + index_document: None, + error_document: None, + } + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id, + body: UpdateBucketRequestBody { + website_access: Some(wa), + quotas: None, + }, + }) + .await?; + + if opt.allow { + println!("Website access allowed for {}", &opt.bucket); + } else { + println!("Website access denied for {}", &opt.bucket); + } + + Ok(()) + } + + pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> { + let bucket = self + .api_request(GetBucketInfoRequest { + id: None, + global_alias: None, + search: Some(opt.bucket.clone()), + }) + .await?; + + if opt.max_size.is_none() && opt.max_objects.is_none() { + return Err(Error::Message( + "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(), + )); + } + + let new_quotas = ApiBucketQuotas { + max_size: match opt.max_size.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::<bytesize::ByteSize>() + .ok_or_message(format!("Invalid size specified: {}", v))? + .as_u64(), + ), + None => bucket.quotas.max_size, + }, + max_objects: match opt.max_objects.as_deref() { + Some("none") => None, + Some(v) => Some( + v.parse::<u64>() + .ok_or_message(format!("Invalid number: {}", v))?, + ), + None => bucket.quotas.max_objects, + }, + }; + + self.api_request(UpdateBucketRequest { + id: bucket.id.clone(), + body: UpdateBucketRequestBody { + website_access: None, + quotas: Some(new_quotas), + }, + }) + .await?; + + println!("Quotas updated for bucket {:.16}", bucket.id); + + Ok(()) + } +} diff --git a/src/garage/cli_v2/cluster.rs b/src/garage/cli_v2/cluster.rs new file mode 100644 index 00000000..34a28674 --- /dev/null +++ b/src/garage/cli_v2/cluster.rs @@ -0,0 +1,158 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::layout::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_status(&self) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + println!("==== HEALTHY NODES ===="); + + let mut healthy_nodes = + vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail".to_string()]; + + for adv in status.nodes.iter().filter(|adv| adv.is_up) { + let host = adv.hostname.as_deref().unwrap_or("?"); + let addr = match adv.addr { + Some(addr) => addr.to_string(), + None => "N/A".to_string(), + }; + if let Some(cfg) = &adv.role { + let data_avail = match &adv.data_partition { + _ if cfg.capacity.is_none() => "N/A".into(), + Some(FreeSpaceResp { available, total }) => { + let pct = (*available as f64) / (*total as f64) * 100.; + let avail_str = bytesize::ByteSize::b(*available); + format!("{} ({:.1}%)", avail_str, pct) + } + None => "?".into(), + }; + healthy_nodes.push(format!( + "{id:.16}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}", + id = adv.id, + host = host, + addr = addr, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity_string(cfg.capacity), + data_avail = data_avail, + )); + } else { + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => "draining metadata..", + _ => "NO ROLE ASSIGNED", + }; + healthy_nodes.push(format!( + "{id:.16}\t{h}\t{addr}\t\t\t{status}", + id = adv.id, + h = host, + addr = addr, + status = status, + )); + } + } + format_table(healthy_nodes); + + let tf = timeago::Formatter::new(); + let mut drain_msg = false; + let mut failed_nodes = vec!["ID\tHostname\tTags\tZone\tCapacity\tLast seen".to_string()]; + for adv in status.nodes.iter().filter(|x| !x.is_up) { + let node = &adv.id; + + let host = adv.hostname.as_deref().unwrap_or("?"); + let last_seen = adv + .last_seen_secs_ago + .map(|s| tf.convert(Duration::from_secs(s))) + .unwrap_or_else(|| "never seen".into()); + + if let Some(cfg) = &adv.role { + let capacity = capacity_string(cfg.capacity); + + failed_nodes.push(format!( + "{id:.16}\t{host}\t[{tags}]\t{zone}\t{capacity}\t{last_seen}", + id = node, + host = host, + tags = cfg.tags.join(","), + zone = cfg.zone, + capacity = capacity, + last_seen = last_seen, + )); + } else { + let status = match layout.staged_role_changes.iter().find(|x| x.id == adv.id) { + Some(NodeRoleChange { + action: NodeRoleChangeEnum::Update { .. }, + .. + }) => "pending...", + _ if adv.draining => { + drain_msg = true; + "draining metadata.." + } + _ => unreachable!(), + }; + + failed_nodes.push(format!( + "{id:.16}\t{host}\t\t\t{status}\t{last_seen}", + id = node, + host = host, + status = status, + last_seen = last_seen, + )); + } + } + + if failed_nodes.len() > 1 { + println!("\n==== FAILED NODES ===="); + format_table(failed_nodes); + if drain_msg { + println!(); + println!("Your cluster is expecting to drain data from nodes that are currently unavailable."); + println!( + "If these nodes are definitely dead, please review the layout history with" + ); + println!( + "`garage layout history` and use `garage layout skip-dead-nodes` to force progress." + ); + } + } + + if print_staging_role_changes(&layout) { + println!(); + println!( + "Please use `garage layout show` to check the proposed new layout and apply it." + ); + println!(); + } + + Ok(()) + } + + pub async fn cmd_connect(&self, opt: ConnectNodeOpt) -> Result<(), Error> { + let res = self + .api_request(ConnectClusterNodesRequest(vec![opt.node])) + .await?; + if res.0.len() != 1 { + return Err(Error::Message(format!("unexpected response: {:?}", res))); + } + let res = res.0.into_iter().next().unwrap(); + if res.success { + println!("Success."); + Ok(()) + } else { + Err(Error::Message(format!( + "Failure: {}", + res.error.unwrap_or_default() + ))) + } + } +} diff --git a/src/garage/cli_v2/key.rs b/src/garage/cli_v2/key.rs new file mode 100644 index 00000000..ff403a9a --- /dev/null +++ b/src/garage/cli_v2/key.rs @@ -0,0 +1,227 @@ +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn cmd_key(&self, cmd: KeyOperation) -> Result<(), Error> { + match cmd { + KeyOperation::List => self.cmd_list_keys().await, + KeyOperation::Info(query) => self.cmd_key_info(query).await, + KeyOperation::Create(query) => self.cmd_create_key(query).await, + KeyOperation::Rename(query) => self.cmd_rename_key(query).await, + KeyOperation::Delete(query) => self.cmd_delete_key(query).await, + KeyOperation::Allow(query) => self.cmd_allow_key(query).await, + KeyOperation::Deny(query) => self.cmd_deny_key(query).await, + KeyOperation::Import(query) => self.cmd_import_key(query).await, + } + } + + pub async fn cmd_list_keys(&self) -> Result<(), Error> { + let keys = self.api_request(ListKeysRequest).await?; + + println!("List of keys:"); + let mut table = vec![]; + for key in keys.0.iter() { + table.push(format!("\t{}\t{}", key.id, key.name)); + } + format_table(table); + + Ok(()) + } + + pub async fn cmd_key_info(&self, opt: KeyInfoOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: opt.show_secret, + }) + .await?; + + print_key_info(&key); + + Ok(()) + } + + pub async fn cmd_create_key(&self, opt: KeyNewOpt) -> Result<(), Error> { + let key = self + .api_request(CreateKeyRequest { + name: Some(opt.name), + }) + .await?; + + print_key_info(&key.0); + + Ok(()) + } + + pub async fn cmd_rename_key(&self, opt: KeyRenameOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: Some(opt.new_name), + allow: None, + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_delete_key(&self, opt: KeyDeleteOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + if !opt.yes { + println!("About to delete key {}...", key.access_key_id); + return Err(Error::Message( + "Add --yes flag to really perform this operation".to_string(), + )); + } + + self.api_request(DeleteKeyRequest { + id: key.access_key_id.clone(), + }) + .await?; + + println!("Access key {} has been deleted.", key.access_key_id); + + Ok(()) + } + + pub async fn cmd_allow_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + deny: None, + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_deny_key(&self, opt: KeyPermOpt) -> Result<(), Error> { + let key = self + .api_request(GetKeyInfoRequest { + id: None, + search: Some(opt.key_pattern), + show_secret_key: false, + }) + .await?; + + let new_key = self + .api_request(UpdateKeyRequest { + id: key.access_key_id, + body: UpdateKeyRequestBody { + name: None, + allow: None, + deny: Some(KeyPerm { + create_bucket: opt.create_bucket, + }), + }, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } + + pub async fn cmd_import_key(&self, opt: KeyImportOpt) -> Result<(), Error> { + if !opt.yes { + return Err(Error::Message("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); + } + + let new_key = self + .api_request(ImportKeyRequest { + name: Some(opt.name), + access_key_id: opt.key_id, + secret_access_key: opt.secret_key, + }) + .await?; + + print_key_info(&new_key.0); + + Ok(()) + } +} + +fn print_key_info(key: &GetKeyInfoResponse) { + println!("Key name: {}", key.name); + println!("Key ID: {}", key.access_key_id); + println!( + "Secret key: {}", + key.secret_access_key.as_deref().unwrap_or("(redacted)") + ); + println!("Can create buckets: {}", key.permissions.create_bucket); + + println!("\nKey-specific bucket aliases:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + for la in bucket.local_aliases.iter() { + table.push(format!( + "\t{}\t{}\t{}", + la, + bucket.global_aliases.join(","), + bucket.id + )); + } + } + format_table(table); + + println!("\nAuthorized buckets:"); + let mut table = vec![]; + for bucket in key.buckets.iter() { + let rflag = if bucket.permissions.read { "R" } else { " " }; + let wflag = if bucket.permissions.write { "W" } else { " " }; + let oflag = if bucket.permissions.owner { "O" } else { " " }; + table.push(format!( + "\t{}{}{}\t{}\t{}\t{:.16}", + rflag, + wflag, + oflag, + bucket.global_aliases.join(","), + bucket.local_aliases.join(","), + bucket.id + )); + } + format_table(table); +} diff --git a/src/garage/cli_v2/layout.rs b/src/garage/cli_v2/layout.rs new file mode 100644 index 00000000..d44771c7 --- /dev/null +++ b/src/garage/cli_v2/layout.rs @@ -0,0 +1,284 @@ +use bytesize::ByteSize; +use format_table::format_table; + +use garage_util::error::*; + +use garage_api::admin::api::*; + +use crate::cli::layout as cli_v1; +use crate::cli::structs::*; +use crate::cli_v2::*; + +impl Cli { + pub async fn layout_command_dispatch(&self, cmd: LayoutOperation) -> Result<(), Error> { + match cmd { + LayoutOperation::Assign(assign_opt) => self.cmd_assign_role(assign_opt).await, + LayoutOperation::Remove(remove_opt) => self.cmd_remove_role(remove_opt).await, + LayoutOperation::Apply(apply_opt) => self.cmd_apply_layout(apply_opt).await, + LayoutOperation::Revert(revert_opt) => self.cmd_revert_layout(revert_opt).await, + + // TODO + LayoutOperation::Show => { + cli_v1::cmd_show_layout(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::Config(config_opt) => { + cli_v1::cmd_config_layout(&self.system_rpc_endpoint, self.rpc_host, config_opt) + .await + } + LayoutOperation::History => { + cli_v1::cmd_layout_history(&self.system_rpc_endpoint, self.rpc_host).await + } + LayoutOperation::SkipDeadNodes(assume_sync_opt) => { + cli_v1::cmd_layout_skip_dead_nodes( + &self.system_rpc_endpoint, + self.rpc_host, + assume_sync_opt, + ) + .await + } + } + } + + pub async fn cmd_assign_role(&self, opt: AssignRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let mut actions = vec![]; + + for node in opt.replace.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }); + } + + for node in opt.node_ids.iter() { + let id = find_matching_node(all_node_ids_iter.clone(), &node)?; + + let current = get_staged_or_current_role(&id, &layout); + + let zone = opt + .zone + .clone() + .or_else(|| current.as_ref().map(|c| c.zone.clone())) + .ok_or_message("Please specify a zone with the -z flag")?; + + let capacity = if opt.gateway { + if opt.capacity.is_some() { + return Err(Error::Message("Please specify only -c or -g".into())); + } + None + } else if let Some(cap) = opt.capacity { + Some(cap.as_u64()) + } else { + current.as_ref().ok_or_message("Please specify a capacity with the -c flag, or set node explicitly as gateway with -g")?.capacity + }; + + let tags = if !opt.tags.is_empty() { + opt.tags.clone() + } else if let Some(cur) = current.as_ref() { + cur.tags.clone() + } else { + vec![] + }; + + actions.push(NodeRoleChange { + id, + action: NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + }, + }); + } + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role changes are staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } + + pub async fn cmd_remove_role(&self, opt: RemoveRoleOpt) -> Result<(), Error> { + let status = self.api_request(GetClusterStatusRequest).await?; + let layout = self.api_request(GetClusterLayoutRequest).await?; + + let all_node_ids_iter = status + .nodes + .iter() + .map(|x| x.id.as_str()) + .chain(layout.roles.iter().map(|x| x.id.as_str())); + + let id = find_matching_node(all_node_ids_iter.clone(), &opt.node_id)?; + + let actions = vec![NodeRoleChange { + id, + action: NodeRoleChangeEnum::Remove { remove: true }, + }]; + + self.api_request(UpdateClusterLayoutRequest(actions)) + .await?; + + println!("Role removal is staged but not yet committed."); + println!("Use `garage layout show` to view staged role changes,"); + println!("and `garage layout apply` to enact staged changes."); + Ok(()) + } + + pub async fn cmd_apply_layout(&self, apply_opt: ApplyLayoutOpt) -> Result<(), Error> { + let missing_version_error = r#" +Please pass the new layout version number to ensure that you are writing the correct version of the cluster layout. +To know the correct value of the new layout version, invoke `garage layout show` and review the proposed changes. + "#; + + let req = ApplyClusterLayoutRequest { + version: apply_opt.version.ok_or_message(missing_version_error)?, + }; + let res = self.api_request(req).await?; + + for line in res.message.iter() { + println!("{}", line); + } + + println!("New cluster layout with updated role assignment has been applied in cluster."); + println!("Data will now be moved around between nodes accordingly."); + + Ok(()) + } + + pub async fn cmd_revert_layout(&self, revert_opt: RevertLayoutOpt) -> Result<(), Error> { + if !revert_opt.yes { + return Err(Error::Message( + "Please add the --yes flag to run the layout revert operation".into(), + )); + } + + self.api_request(RevertClusterLayoutRequest).await?; + + println!("All proposed role changes in cluster layout have been canceled."); + Ok(()) + } +} + +// -------------------------- +// ---- helper functions ---- +// -------------------------- + +pub fn capacity_string(v: Option<u64>) -> String { + match v { + Some(c) => ByteSize::b(c).to_string_as(false), + None => "gateway".to_string(), + } +} + +pub fn get_staged_or_current_role( + id: &str, + layout: &GetClusterLayoutResponse, +) -> Option<NodeRoleResp> { + for node in layout.staged_role_changes.iter() { + if node.id == id { + return match &node.action { + NodeRoleChangeEnum::Remove { .. } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(NodeRoleResp { + id: id.to_string(), + zone: zone.to_string(), + capacity: *capacity, + tags: tags.clone(), + }), + }; + } + } + + for node in layout.roles.iter() { + if node.id == id { + return Some(node.clone()); + } + } + + None +} + +pub fn find_matching_node<'a>( + cand: impl std::iter::Iterator<Item = &'a str>, + pattern: &'a str, +) -> Result<String, Error> { + let mut candidates = vec![]; + for c in cand { + if c.starts_with(pattern) && !candidates.contains(&c) { + candidates.push(c); + } + } + if candidates.len() != 1 { + Err(Error::Message(format!( + "{} nodes match '{}'", + candidates.len(), + pattern, + ))) + } else { + Ok(candidates[0].to_string()) + } +} + +pub fn print_staging_role_changes(layout: &GetClusterLayoutResponse) -> bool { + let has_role_changes = !layout.staged_role_changes.is_empty(); + + // TODO!! Layout parameters + let has_layout_changes = false; + + if has_role_changes || has_layout_changes { + println!(); + println!("==== STAGED ROLE CHANGES ===="); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for change in layout.staged_role_changes.iter() { + match &change.action { + NodeRoleChangeEnum::Update { + tags, + zone, + capacity, + } => { + let tags = tags.join(","); + table.push(format!( + "{:.16}\t{}\t{}\t{}", + change.id, + tags, + zone, + capacity_string(*capacity), + )); + } + NodeRoleChangeEnum::Remove { .. } => { + table.push(format!("{:.16}\tREMOVED", change.id)); + } + } + } + format_table(table); + println!(); + } + //TODO + /* + if has_layout_changes { + println!( + "Zone redundancy: {}", + staging.parameters.get().zone_redundancy + ); + } + */ + true + } else { + false + } +} diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs new file mode 100644 index 00000000..692b7c1c --- /dev/null +++ b/src/garage/cli_v2/mod.rs @@ -0,0 +1,106 @@ +pub mod bucket; +pub mod cluster; +pub mod key; +pub mod layout; + +use std::convert::TryFrom; +use std::sync::Arc; +use std::time::Duration; + +use garage_util::error::*; + +use garage_rpc::system::*; +use garage_rpc::*; + +use garage_api::admin::api::*; +use garage_api::admin::EndpointHandler as AdminApiEndpoint; + +use crate::admin::*; +use crate::cli as cli_v1; +use crate::cli::structs::*; +use crate::cli::Command; + +pub struct Cli { + pub system_rpc_endpoint: Arc<Endpoint<SystemRpc, ()>>, + pub admin_rpc_endpoint: Arc<Endpoint<AdminRpc, ()>>, + pub rpc_host: NodeID, +} + +impl Cli { + pub async fn handle(&self, cmd: Command) -> Result<(), Error> { + match cmd { + Command::Status => self.cmd_status().await, + Command::Node(NodeOperation::Connect(connect_opt)) => { + self.cmd_connect(connect_opt).await + } + Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await, + Command::Bucket(bo) => self.cmd_bucket(bo).await, + Command::Key(ko) => self.cmd_key(ko).await, + + // TODO + Command::Repair(ro) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::LaunchRepair(ro), + ) + .await + .ok_or_message("cli_v1"), + Command::Stats(so) => { + cli_v1::cmd_admin(&self.admin_rpc_endpoint, self.rpc_host, AdminRpc::Stats(so)) + .await + .ok_or_message("cli_v1") + } + Command::Worker(wo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::Worker(wo), + ) + .await + .ok_or_message("cli_v1"), + Command::Block(bo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::BlockOperation(bo), + ) + .await + .ok_or_message("cli_v1"), + Command::Meta(mo) => cli_v1::cmd_admin( + &self.admin_rpc_endpoint, + self.rpc_host, + AdminRpc::MetaOperation(mo), + ) + .await + .ok_or_message("cli_v1"), + + _ => unreachable!(), + } + } + + pub async fn api_request<T>(&self, req: T) -> Result<<T as AdminApiEndpoint>::Response, Error> + where + T: AdminApiEndpoint, + AdminApiRequest: From<T>, + <T as AdminApiEndpoint>::Response: TryFrom<TaggedAdminApiResponse>, + { + let req = AdminApiRequest::from(req); + let req_name = req.name(); + match self + .admin_rpc_endpoint + .call(&self.rpc_host, AdminRpc::ApiRequest(req), PRIO_NORMAL) + .await? + .ok_or_message("rpc")? + { + AdminRpc::ApiOkResponse(resp) => <T as AdminApiEndpoint>::Response::try_from(resp) + .map_err(|_| Error::Message(format!("{} returned unexpected response", req_name))), + AdminRpc::ApiErrorResponse { + http_code, + error_code, + message, + } => Err(Error::Message(format!( + "{} returned {} ({}): {}", + req_name, error_code, http_code, message + ))), + m => Err(Error::unexpected_rpc_message(m)), + } + } +} |