aboutsummaryrefslogtreecommitdiff
path: root/src/garage
diff options
context:
space:
mode:
authorAlex Auvolat <lx@deuxfleurs.fr>2025-01-30 13:36:25 +0100
committerAlex Auvolat <lx@deuxfleurs.fr>2025-01-30 13:36:25 +0100
commitf37d5d2b08b008eba7b1ee8d84b08d5fddeabf78 (patch)
tree453085c608fc35687f5207bffc2fc5a007d918e2 /src/garage
parent819f4f00509a57097d0ee8291e1556829e982e14 (diff)
downloadgarage-f37d5d2b08b008eba7b1ee8d84b08d5fddeabf78.tar.gz
garage-f37d5d2b08b008eba7b1ee8d84b08d5fddeabf78.zip
admin api: convert most bucket operations
Diffstat (limited to 'src/garage')
-rw-r--r--src/garage/admin/bucket.rs449
-rw-r--r--src/garage/admin/mod.rs1
-rw-r--r--src/garage/cli/cmd.rs11
-rw-r--r--src/garage/cli/util.rs137
-rw-r--r--src/garage/cli_v2/bucket.rs523
-rw-r--r--src/garage/cli_v2/mod.rs9
6 files changed, 528 insertions, 602 deletions
diff --git a/src/garage/admin/bucket.rs b/src/garage/admin/bucket.rs
index 1bdc6086..26d54084 100644
--- a/src/garage/admin/bucket.rs
+++ b/src/garage/admin/bucket.rs
@@ -1,15 +1,6 @@
-use std::collections::HashMap;
use std::fmt::Write;
-use garage_util::crdt::*;
-use garage_util::time::*;
-
-use garage_table::*;
-
-use garage_model::bucket_alias_table::*;
-use garage_model::bucket_table::*;
use garage_model::helper::error::{Error, OkOrBadRequest};
-use garage_model::permission::*;
use crate::cli::*;
@@ -18,451 +9,13 @@ use super::*;
impl AdminRpcHandler {
pub(super) async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
match cmd {
- BucketOperation::List => self.handle_list_buckets().await,
- BucketOperation::Info(query) => self.handle_bucket_info(query).await,
- BucketOperation::Create(query) => self.handle_create_bucket(&query.name).await,
- BucketOperation::Delete(query) => self.handle_delete_bucket(query).await,
- BucketOperation::Alias(query) => self.handle_alias_bucket(query).await,
- BucketOperation::Unalias(query) => self.handle_unalias_bucket(query).await,
- BucketOperation::Allow(query) => self.handle_bucket_allow(query).await,
- BucketOperation::Deny(query) => self.handle_bucket_deny(query).await,
- BucketOperation::Website(query) => self.handle_bucket_website(query).await,
- BucketOperation::SetQuotas(query) => self.handle_bucket_set_quotas(query).await,
BucketOperation::CleanupIncompleteUploads(query) => {
self.handle_bucket_cleanup_incomplete_uploads(query).await
}
+ _ => unreachable!(),
}
}
- async fn handle_list_buckets(&self) -> Result<AdminRpc, Error> {
- let buckets = self
- .garage
- .bucket_table
- .get_range(
- &EmptyKey,
- None,
- Some(DeletedFilter::NotDeleted),
- 10000,
- EnumerationOrder::Forward,
- )
- .await?;
-
- Ok(AdminRpc::BucketList(buckets))
- }
-
- async fn handle_bucket_info(&self, query: &BucketOpt) -> Result<AdminRpc, Error> {
- let bucket_id = self
- .garage
- .bucket_helper()
- .admin_get_existing_matching_bucket(&query.name)
- .await?;
-
- let bucket = self
- .garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
-
- let counters = self
- .garage
- .object_counter_table
- .table
- .get(&bucket_id, &EmptyKey)
- .await?
- .map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
- .unwrap_or_default();
-
- let mpu_counters = self
- .garage
- .mpu_counter_table
- .table
- .get(&bucket_id, &EmptyKey)
- .await?
- .map(|x| x.filtered_values(&self.garage.system.cluster_layout()))
- .unwrap_or_default();
-
- let mut relevant_keys = HashMap::new();
- for (k, _) in bucket
- .state
- .as_option()
- .unwrap()
- .authorized_keys
- .items()
- .iter()
- {
- if let Some(key) = self
- .garage
- .key_table
- .get(&EmptyKey, k)
- .await?
- .filter(|k| !k.is_deleted())
- {
- relevant_keys.insert(k.clone(), key);
- }
- }
- for ((k, _), _, _) in bucket
- .state
- .as_option()
- .unwrap()
- .local_aliases
- .items()
- .iter()
- {
- if relevant_keys.contains_key(k) {
- continue;
- }
- if let Some(key) = self.garage.key_table.get(&EmptyKey, k).await? {
- relevant_keys.insert(k.clone(), key);
- }
- }
-
- Ok(AdminRpc::BucketInfo {
- bucket,
- relevant_keys,
- counters,
- mpu_counters,
- })
- }
-
- #[allow(clippy::ptr_arg)]
- async fn handle_create_bucket(&self, name: &String) -> Result<AdminRpc, Error> {
- if !is_valid_bucket_name(name) {
- return Err(Error::BadRequest(format!(
- "{}: {}",
- name, INVALID_BUCKET_NAME_MESSAGE
- )));
- }
-
- let helper = self.garage.locked_helper().await;
-
- if let Some(alias) = self.garage.bucket_alias_table.get(&EmptyKey, name).await? {
- if alias.state.get().is_some() {
- return Err(Error::BadRequest(format!("Bucket {} already exists", name)));
- }
- }
-
- // ---- done checking, now commit ----
-
- let bucket = Bucket::new();
- self.garage.bucket_table.insert(&bucket).await?;
-
- helper.set_global_bucket_alias(bucket.id, name).await?;
-
- Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
- }
-
- async fn handle_delete_bucket(&self, query: &DeleteBucketOpt) -> Result<AdminRpc, Error> {
- let helper = self.garage.locked_helper().await;
-
- let bucket_id = helper
- .bucket()
- .admin_get_existing_matching_bucket(&query.name)
- .await?;
-
- // Get the alias, but keep in minde here the bucket name
- // given in parameter can also be directly the bucket's ID.
- // In that case bucket_alias will be None, and
- // we can still delete the bucket if it has zero aliases
- // (a condition which we try to prevent but that could still happen somehow).
- // We just won't try to delete an alias entry because there isn't one.
- let bucket_alias = self
- .garage
- .bucket_alias_table
- .get(&EmptyKey, &query.name)
- .await?;
-
- // Check bucket doesn't have other aliases
- let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
- let bucket_state = bucket.state.as_option().unwrap();
- if bucket_state
- .aliases
- .items()
- .iter()
- .filter(|(_, _, active)| *active)
- .any(|(name, _, _)| name != &query.name)
- {
- return Err(Error::BadRequest(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", query.name)));
- }
- if bucket_state
- .local_aliases
- .items()
- .iter()
- .any(|(_, _, active)| *active)
- {
- return Err(Error::BadRequest(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", query.name)));
- }
-
- // Check bucket is empty
- if !helper.bucket().is_bucket_empty(bucket_id).await? {
- return Err(Error::BadRequest(format!(
- "Bucket {} is not empty",
- query.name
- )));
- }
-
- if !query.yes {
- return Err(Error::BadRequest(
- "Add --yes flag to really perform this operation".to_string(),
- ));
- }
-
- // --- done checking, now commit ---
- // 1. delete authorization from keys that had access
- for (key_id, _) in bucket.authorized_keys() {
- helper
- .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
- .await?;
- }
-
- // 2. delete bucket alias
- if bucket_alias.is_some() {
- helper
- .purge_global_bucket_alias(bucket_id, &query.name)
- .await?;
- }
-
- // 3. delete bucket
- bucket.state = Deletable::delete();
- self.garage.bucket_table.insert(&bucket).await?;
-
- Ok(AdminRpc::Ok(format!("Bucket {} was deleted.", query.name)))
- }
-
- async fn handle_alias_bucket(&self, query: &AliasBucketOpt) -> Result<AdminRpc, Error> {
- let helper = self.garage.locked_helper().await;
-
- let bucket_id = helper
- .bucket()
- .admin_get_existing_matching_bucket(&query.existing_bucket)
- .await?;
-
- if let Some(key_pattern) = &query.local {
- let key = helper.key().get_existing_matching_key(key_pattern).await?;
-
- helper
- .set_local_bucket_alias(bucket_id, &key.key_id, &query.new_name)
- .await?;
- Ok(AdminRpc::Ok(format!(
- "Alias {} now points to bucket {:?} in namespace of key {}",
- query.new_name, bucket_id, key.key_id
- )))
- } else {
- helper
- .set_global_bucket_alias(bucket_id, &query.new_name)
- .await?;
- Ok(AdminRpc::Ok(format!(
- "Alias {} now points to bucket {:?}",
- query.new_name, bucket_id
- )))
- }
- }
-
- async fn handle_unalias_bucket(&self, query: &UnaliasBucketOpt) -> Result<AdminRpc, Error> {
- let helper = self.garage.locked_helper().await;
-
- if let Some(key_pattern) = &query.local {
- let key = helper.key().get_existing_matching_key(key_pattern).await?;
-
- let bucket_id = key
- .state
- .as_option()
- .unwrap()
- .local_aliases
- .get(&query.name)
- .cloned()
- .flatten()
- .ok_or_bad_request("Bucket not found")?;
-
- helper
- .unset_local_bucket_alias(bucket_id, &key.key_id, &query.name)
- .await?;
-
- Ok(AdminRpc::Ok(format!(
- "Alias {} no longer points to bucket {:?} in namespace of key {}",
- &query.name, bucket_id, key.key_id
- )))
- } else {
- let bucket_id = helper
- .bucket()
- .resolve_global_bucket_name(&query.name)
- .await?
- .ok_or_bad_request("Bucket not found")?;
-
- helper
- .unset_global_bucket_alias(bucket_id, &query.name)
- .await?;
-
- Ok(AdminRpc::Ok(format!(
- "Alias {} no longer points to bucket {:?}",
- &query.name, bucket_id
- )))
- }
- }
-
- async fn handle_bucket_allow(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
- let helper = self.garage.locked_helper().await;
-
- let bucket_id = helper
- .bucket()
- .admin_get_existing_matching_bucket(&query.bucket)
- .await?;
- let key = helper
- .key()
- .get_existing_matching_key(&query.key_pattern)
- .await?;
-
- let allow_read = query.read || key.allow_read(&bucket_id);
- let allow_write = query.write || key.allow_write(&bucket_id);
- let allow_owner = query.owner || key.allow_owner(&bucket_id);
-
- helper
- .set_bucket_key_permissions(
- bucket_id,
- &key.key_id,
- BucketKeyPerm {
- timestamp: now_msec(),
- allow_read,
- allow_write,
- allow_owner,
- },
- )
- .await?;
-
- Ok(AdminRpc::Ok(format!(
- "New permissions for {} on {}: read {}, write {}, owner {}.",
- &key.key_id, &query.bucket, allow_read, allow_write, allow_owner
- )))
- }
-
- async fn handle_bucket_deny(&self, query: &PermBucketOpt) -> Result<AdminRpc, Error> {
- let helper = self.garage.locked_helper().await;
-
- let bucket_id = helper
- .bucket()
- .admin_get_existing_matching_bucket(&query.bucket)
- .await?;
- let key = helper
- .key()
- .get_existing_matching_key(&query.key_pattern)
- .await?;
-
- let allow_read = !query.read && key.allow_read(&bucket_id);
- let allow_write = !query.write && key.allow_write(&bucket_id);
- let allow_owner = !query.owner && key.allow_owner(&bucket_id);
-
- helper
- .set_bucket_key_permissions(
- bucket_id,
- &key.key_id,
- BucketKeyPerm {
- timestamp: now_msec(),
- allow_read,
- allow_write,
- allow_owner,
- },
- )
- .await?;
-
- Ok(AdminRpc::Ok(format!(
- "New permissions for {} on {}: read {}, write {}, owner {}.",
- &key.key_id, &query.bucket, allow_read, allow_write, allow_owner
- )))
- }
-
- async fn handle_bucket_website(&self, query: &WebsiteOpt) -> Result<AdminRpc, Error> {
- let bucket_id = self
- .garage
- .bucket_helper()
- .admin_get_existing_matching_bucket(&query.bucket)
- .await?;
-
- let mut bucket = self
- .garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
- let bucket_state = bucket.state.as_option_mut().unwrap();
-
- if !(query.allow ^ query.deny) {
- return Err(Error::BadRequest(
- "You must specify exactly one flag, either --allow or --deny".to_string(),
- ));
- }
-
- let website = if query.allow {
- Some(WebsiteConfig {
- index_document: query.index_document.clone(),
- error_document: query.error_document.clone(),
- })
- } else {
- None
- };
-
- bucket_state.website_config.update(website);
- self.garage.bucket_table.insert(&bucket).await?;
-
- let msg = if query.allow {
- format!("Website access allowed for {}", &query.bucket)
- } else {
- format!("Website access denied for {}", &query.bucket)
- };
-
- Ok(AdminRpc::Ok(msg))
- }
-
- async fn handle_bucket_set_quotas(&self, query: &SetQuotasOpt) -> Result<AdminRpc, Error> {
- let bucket_id = self
- .garage
- .bucket_helper()
- .admin_get_existing_matching_bucket(&query.bucket)
- .await?;
-
- let mut bucket = self
- .garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
- let bucket_state = bucket.state.as_option_mut().unwrap();
-
- if query.max_size.is_none() && query.max_objects.is_none() {
- return Err(Error::BadRequest(
- "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
- ));
- }
-
- let mut quotas = bucket_state.quotas.get().clone();
-
- match query.max_size.as_ref().map(String::as_ref) {
- Some("none") => quotas.max_size = None,
- Some(v) => {
- let bs = v
- .parse::<bytesize::ByteSize>()
- .ok_or_bad_request(format!("Invalid size specified: {}", v))?;
- quotas.max_size = Some(bs.as_u64());
- }
- _ => (),
- }
-
- match query.max_objects.as_ref().map(String::as_ref) {
- Some("none") => quotas.max_objects = None,
- Some(v) => {
- let mo = v
- .parse::<u64>()
- .ok_or_bad_request(format!("Invalid number specified: {}", v))?;
- quotas.max_objects = Some(mo);
- }
- _ => (),
- }
-
- bucket_state.quotas.update(quotas);
- self.garage.bucket_table.insert(&bucket).await?;
-
- Ok(AdminRpc::Ok(format!(
- "Quotas updated for {}",
- &query.bucket
- )))
- }
-
async fn handle_bucket_cleanup_incomplete_uploads(
&self,
query: &CleanupIncompleteUploadsOpt,
diff --git a/src/garage/admin/mod.rs b/src/garage/admin/mod.rs
index 4c460b8d..aa528965 100644
--- a/src/garage/admin/mod.rs
+++ b/src/garage/admin/mod.rs
@@ -524,6 +524,7 @@ impl AdminRpcHandler {
req: &AdminApiRequest,
) -> Result<AdminRpc, Error> {
let req = req.clone();
+ info!("Proxied admin API request: {}", req.name());
let res = req.handle(&self.garage).await;
match res {
Ok(res) => Ok(AdminRpc::ApiOkResponse(res.tagged())),
diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs
index 2b5f93d4..debe7dec 100644
--- a/src/garage/cli/cmd.rs
+++ b/src/garage/cli/cmd.rs
@@ -17,17 +17,6 @@ pub async fn cmd_admin(
AdminRpc::Ok(msg) => {
println!("{}", msg);
}
- AdminRpc::BucketList(bl) => {
- print_bucket_list(bl);
- }
- AdminRpc::BucketInfo {
- bucket,
- relevant_keys,
- counters,
- mpu_counters,
- } => {
- print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters);
- }
AdminRpc::KeyList(kl) => {
print_key_list(kl);
}
diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs
index c591cadd..acf7923e 100644
--- a/src/garage/cli/util.rs
+++ b/src/garage/cli/util.rs
@@ -5,51 +5,17 @@ use format_table::format_table;
use garage_util::background::*;
use garage_util::crdt::*;
use garage_util::data::*;
-use garage_util::error::*;
use garage_util::time::*;
use garage_block::manager::BlockResyncErrorInfo;
use garage_model::bucket_table::*;
use garage_model::key_table::*;
-use garage_model::s3::mpu_table::{self, MultipartUpload};
-use garage_model::s3::object_table;
+use garage_model::s3::mpu_table::MultipartUpload;
use garage_model::s3::version_table::*;
use crate::cli::structs::WorkerListOpt;
-pub fn print_bucket_list(bl: Vec<Bucket>) {
- println!("List of buckets:");
-
- let mut table = vec![];
- for bucket in bl {
- let aliases = bucket
- .aliases()
- .iter()
- .filter(|(_, _, active)| *active)
- .map(|(name, _, _)| name.to_string())
- .collect::<Vec<_>>();
- let local_aliases_n = match &bucket
- .local_aliases()
- .iter()
- .filter(|(_, _, active)| *active)
- .collect::<Vec<_>>()[..]
- {
- [] => "".into(),
- [((k, n), _, _)] => format!("{}:{}", k, n),
- s => format!("[{} local aliases]", s.len()),
- };
-
- table.push(format!(
- "\t{}\t{}\t{}",
- aliases.join(","),
- local_aliases_n,
- hex::encode(bucket.id),
- ));
- }
- format_table(table);
-}
-
pub fn print_key_list(kl: Vec<(String, String)>) {
println!("List of keys:");
let mut table = vec![];
@@ -132,107 +98,6 @@ pub fn print_key_info(key: &Key, relevant_buckets: &HashMap<Uuid, Bucket>) {
}
}
-pub fn print_bucket_info(
- bucket: &Bucket,
- relevant_keys: &HashMap<String, Key>,
- counters: &HashMap<String, i64>,
- mpu_counters: &HashMap<String, i64>,
-) {
- let key_name = |k| {
- relevant_keys
- .get(k)
- .map(|k| k.params().unwrap().name.get().as_str())
- .unwrap_or("<deleted>")
- };
-
- println!("Bucket: {}", hex::encode(bucket.id));
- match &bucket.state {
- Deletable::Deleted => println!("Bucket is deleted."),
- Deletable::Present(p) => {
- let size =
- bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64);
- println!(
- "\nSize: {} ({})",
- size.to_string_as(true),
- size.to_string_as(false)
- );
- println!(
- "Objects: {}",
- *counters.get(object_table::OBJECTS).unwrap_or(&0)
- );
- println!(
- "Unfinished uploads (multipart and non-multipart): {}",
- *counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0)
- );
- println!(
- "Unfinished multipart uploads: {}",
- *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0)
- );
- let mpu_size =
- bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64);
- println!(
- "Size of unfinished multipart uploads: {} ({})",
- mpu_size.to_string_as(true),
- mpu_size.to_string_as(false),
- );
-
- println!("\nWebsite access: {}", p.website_config.get().is_some());
-
- let quotas = p.quotas.get();
- if quotas.max_size.is_some() || quotas.max_objects.is_some() {
- println!("\nQuotas:");
- if let Some(ms) = quotas.max_size {
- let ms = bytesize::ByteSize::b(ms);
- println!(
- " maximum size: {} ({})",
- ms.to_string_as(true),
- ms.to_string_as(false)
- );
- }
- if let Some(mo) = quotas.max_objects {
- println!(" maximum number of objects: {}", mo);
- }
- }
-
- println!("\nGlobal aliases:");
- for (alias, _, active) in p.aliases.items().iter() {
- if *active {
- println!(" {}", alias);
- }
- }
-
- println!("\nKey-specific aliases:");
- let mut table = vec![];
- for ((key_id, alias), _, active) in p.local_aliases.items().iter() {
- if *active {
- table.push(format!("\t{} ({})\t{}", key_id, key_name(key_id), alias));
- }
- }
- format_table(table);
-
- println!("\nAuthorized keys:");
- let mut table = vec![];
- for (k, perm) in p.authorized_keys.items().iter() {
- if !perm.is_any() {
- continue;
- }
- let rflag = if perm.allow_read { "R" } else { " " };
- let wflag = if perm.allow_write { "W" } else { " " };
- let oflag = if perm.allow_owner { "O" } else { " " };
- table.push(format!(
- "\t{}{}{}\t{}\t{}",
- rflag,
- wflag,
- oflag,
- k,
- key_name(k)
- ));
- }
- format_table(table);
- }
- };
-}
-
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
let mut wi = wi.into_iter().collect::<Vec<_>>();
wi.sort_by_key(|(tid, info)| {
diff --git a/src/garage/cli_v2/bucket.rs b/src/garage/cli_v2/bucket.rs
new file mode 100644
index 00000000..837ce783
--- /dev/null
+++ b/src/garage/cli_v2/bucket.rs
@@ -0,0 +1,523 @@
+//use bytesize::ByteSize;
+use format_table::format_table;
+
+use garage_util::error::*;
+
+use garage_api::admin::api::*;
+
+use crate::cli as cli_v1;
+use crate::cli::structs::*;
+use crate::cli_v2::*;
+
+impl Cli {
+ pub async fn cmd_bucket(&self, cmd: BucketOperation) -> Result<(), Error> {
+ match cmd {
+ BucketOperation::List => self.cmd_list_buckets().await,
+ BucketOperation::Info(query) => self.cmd_bucket_info(query).await,
+ BucketOperation::Create(query) => self.cmd_create_bucket(query).await,
+ BucketOperation::Delete(query) => self.cmd_delete_bucket(query).await,
+ BucketOperation::Alias(query) => self.cmd_alias_bucket(query).await,
+ BucketOperation::Unalias(query) => self.cmd_unalias_bucket(query).await,
+ BucketOperation::Allow(query) => self.cmd_bucket_allow(query).await,
+ BucketOperation::Deny(query) => self.cmd_bucket_deny(query).await,
+ BucketOperation::Website(query) => self.cmd_bucket_website(query).await,
+ BucketOperation::SetQuotas(query) => self.cmd_bucket_set_quotas(query).await,
+
+ // TODO
+ x => cli_v1::cmd_admin(
+ &self.admin_rpc_endpoint,
+ self.rpc_host,
+ AdminRpc::BucketOperation(x),
+ )
+ .await
+ .ok_or_message("old error"),
+ }
+ }
+
+ pub async fn cmd_list_buckets(&self) -> Result<(), Error> {
+ let buckets = self.api_request(ListBucketsRequest).await?;
+
+ println!("List of buckets:");
+
+ let mut table = vec![];
+ for bucket in buckets.0.iter() {
+ let local_aliases_n = match &bucket.local_aliases[..] {
+ [] => "".into(),
+ [alias] => format!("{}:{}", alias.access_key_id, alias.alias),
+ s => format!("[{} local aliases]", s.len()),
+ };
+
+ table.push(format!(
+ "\t{}\t{}\t{}",
+ bucket.global_aliases.join(","),
+ local_aliases_n,
+ bucket.id,
+ ));
+ }
+ format_table(table);
+
+ Ok(())
+ }
+
+ pub async fn cmd_bucket_info(&self, opt: BucketOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.name),
+ })
+ .await?;
+
+ println!("Bucket: {}", bucket.id);
+
+ let size = bytesize::ByteSize::b(bucket.bytes as u64);
+ println!(
+ "\nSize: {} ({})",
+ size.to_string_as(true),
+ size.to_string_as(false)
+ );
+ println!("Objects: {}", bucket.objects);
+ println!(
+ "Unfinished uploads (multipart and non-multipart): {}",
+ bucket.unfinished_uploads,
+ );
+ println!(
+ "Unfinished multipart uploads: {}",
+ bucket.unfinished_multipart_uploads
+ );
+ let mpu_size = bytesize::ByteSize::b(bucket.unfinished_multipart_uploads as u64);
+ println!(
+ "Size of unfinished multipart uploads: {} ({})",
+ mpu_size.to_string_as(true),
+ mpu_size.to_string_as(false),
+ );
+
+ println!("\nWebsite access: {}", bucket.website_access);
+
+ if bucket.quotas.max_size.is_some() || bucket.quotas.max_objects.is_some() {
+ println!("\nQuotas:");
+ if let Some(ms) = bucket.quotas.max_size {
+ let ms = bytesize::ByteSize::b(ms);
+ println!(
+ " maximum size: {} ({})",
+ ms.to_string_as(true),
+ ms.to_string_as(false)
+ );
+ }
+ if let Some(mo) = bucket.quotas.max_objects {
+ println!(" maximum number of objects: {}", mo);
+ }
+ }
+
+ println!("\nGlobal aliases:");
+ for alias in bucket.global_aliases {
+ println!(" {}", alias);
+ }
+
+ println!("\nKey-specific aliases:");
+ let mut table = vec![];
+ for key in bucket.keys.iter() {
+ for alias in key.bucket_local_aliases.iter() {
+ table.push(format!("\t{} ({})\t{}", key.access_key_id, key.name, alias));
+ }
+ }
+ format_table(table);
+
+ println!("\nAuthorized keys:");
+ let mut table = vec![];
+ for key in bucket.keys.iter() {
+ if !(key.permissions.read || key.permissions.write || key.permissions.owner) {
+ continue;
+ }
+ let rflag = if key.permissions.read { "R" } else { " " };
+ let wflag = if key.permissions.write { "W" } else { " " };
+ let oflag = if key.permissions.owner { "O" } else { " " };
+ table.push(format!(
+ "\t{}{}{}\t{}\t{}",
+ rflag, wflag, oflag, key.access_key_id, key.name
+ ));
+ }
+ format_table(table);
+
+ Ok(())
+ }
+
+ pub async fn cmd_create_bucket(&self, opt: BucketOpt) -> Result<(), Error> {
+ self.api_request(CreateBucketRequest {
+ global_alias: Some(opt.name.clone()),
+ local_alias: None,
+ })
+ .await?;
+
+ println!("Bucket {} was created.", opt.name);
+
+ Ok(())
+ }
+
+ pub async fn cmd_delete_bucket(&self, opt: DeleteBucketOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.name.clone()),
+ })
+ .await?;
+
+ // CLI-only checks: the bucket must not have other aliases
+ if bucket
+ .global_aliases
+ .iter()
+ .find(|a| **a != opt.name)
+ .is_some()
+ {
+ return Err(Error::Message(format!("Bucket {} still has other global aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
+ }
+
+ if bucket
+ .keys
+ .iter()
+ .any(|k| !k.bucket_local_aliases.is_empty())
+ {
+ return Err(Error::Message(format!("Bucket {} still has other local aliases. Use `bucket unalias` to delete them one by one.", opt.name)));
+ }
+
+ if !opt.yes {
+ println!("About to delete bucket {}.", bucket.id);
+ return Err(Error::Message(
+ "Add --yes flag to really perform this operation".to_string(),
+ ));
+ }
+
+ self.api_request(DeleteBucketRequest {
+ id: bucket.id.clone(),
+ })
+ .await?;
+
+ println!("Bucket {} has been deleted.", bucket.id);
+
+ Ok(())
+ }
+
+ pub async fn cmd_alias_bucket(&self, opt: AliasBucketOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.existing_bucket.clone()),
+ })
+ .await?;
+
+ if let Some(key_pat) = &opt.local {
+ let key = self
+ .api_request(GetKeyInfoRequest {
+ search: Some(key_pat.clone()),
+ id: None,
+ show_secret_key: false,
+ })
+ .await?;
+
+ self.api_request(AddBucketAliasRequest {
+ bucket_id: bucket.id.clone(),
+ alias: BucketAliasEnum::Local {
+ local_alias: opt.new_name.clone(),
+ access_key_id: key.access_key_id.clone(),
+ },
+ })
+ .await?;
+
+ println!(
+ "Alias {} now points to bucket {:.16} in namespace of key {}",
+ opt.new_name, bucket.id, key.access_key_id
+ )
+ } else {
+ self.api_request(AddBucketAliasRequest {
+ bucket_id: bucket.id.clone(),
+ alias: BucketAliasEnum::Global {
+ global_alias: opt.new_name.clone(),
+ },
+ })
+ .await?;
+
+ println!(
+ "Alias {} now points to bucket {:.16}",
+ opt.new_name, bucket.id
+ )
+ }
+
+ Ok(())
+ }
+
+ pub async fn cmd_unalias_bucket(&self, opt: UnaliasBucketOpt) -> Result<(), Error> {
+ if let Some(key_pat) = &opt.local {
+ let key = self
+ .api_request(GetKeyInfoRequest {
+ search: Some(key_pat.clone()),
+ id: None,
+ show_secret_key: false,
+ })
+ .await?;
+
+ let bucket = key
+ .buckets
+ .iter()
+ .find(|x| x.local_aliases.contains(&opt.name))
+ .ok_or_message(format!(
+ "No bucket called {} in namespace of key {}",
+ opt.name, key.access_key_id
+ ))?;
+
+ self.api_request(RemoveBucketAliasRequest {
+ bucket_id: bucket.id.clone(),
+ alias: BucketAliasEnum::Local {
+ access_key_id: key.access_key_id.clone(),
+ local_alias: opt.name.clone(),
+ },
+ })
+ .await?;
+
+ println!(
+ "Alias {} no longer points to bucket {:.16} in namespace of key {}",
+ &opt.name, bucket.id, key.access_key_id
+ )
+ } else {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: Some(opt.name.clone()),
+ search: None,
+ })
+ .await?;
+
+ self.api_request(RemoveBucketAliasRequest {
+ bucket_id: bucket.id.clone(),
+ alias: BucketAliasEnum::Global {
+ global_alias: opt.name.clone(),
+ },
+ })
+ .await?;
+
+ println!(
+ "Alias {} no longer points to bucket {:.16}",
+ opt.name, bucket.id
+ )
+ }
+
+ Ok(())
+ }
+
+ pub async fn cmd_bucket_allow(&self, opt: PermBucketOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.bucket.clone()),
+ })
+ .await?;
+
+ let key = self
+ .api_request(GetKeyInfoRequest {
+ id: None,
+ search: Some(opt.key_pattern.clone()),
+ show_secret_key: false,
+ })
+ .await?;
+
+ self.api_request(AllowBucketKeyRequest(BucketKeyPermChangeRequest {
+ bucket_id: bucket.id.clone(),
+ access_key_id: key.access_key_id.clone(),
+ permissions: ApiBucketKeyPerm {
+ read: opt.read,
+ write: opt.write,
+ owner: opt.owner,
+ },
+ }))
+ .await?;
+
+ let new_bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: Some(bucket.id),
+ global_alias: None,
+ search: None,
+ })
+ .await?;
+
+ if let Some(new_key) = new_bucket
+ .keys
+ .iter()
+ .find(|k| k.access_key_id == key.access_key_id)
+ {
+ println!(
+ "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
+ key.access_key_id,
+ new_bucket.id,
+ new_key.permissions.read,
+ new_key.permissions.write,
+ new_key.permissions.owner
+ );
+ } else {
+ println!(
+ "Access key {} has no permissions on bucket {:.16}",
+ key.access_key_id, new_bucket.id
+ );
+ }
+
+ Ok(())
+ }
+
+ pub async fn cmd_bucket_deny(&self, opt: PermBucketOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.bucket.clone()),
+ })
+ .await?;
+
+ let key = self
+ .api_request(GetKeyInfoRequest {
+ id: None,
+ search: Some(opt.key_pattern.clone()),
+ show_secret_key: false,
+ })
+ .await?;
+
+ self.api_request(DenyBucketKeyRequest(BucketKeyPermChangeRequest {
+ bucket_id: bucket.id.clone(),
+ access_key_id: key.access_key_id.clone(),
+ permissions: ApiBucketKeyPerm {
+ read: opt.read,
+ write: opt.write,
+ owner: opt.owner,
+ },
+ }))
+ .await?;
+
+ let new_bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: Some(bucket.id),
+ global_alias: None,
+ search: None,
+ })
+ .await?;
+
+ if let Some(new_key) = new_bucket
+ .keys
+ .iter()
+ .find(|k| k.access_key_id == key.access_key_id)
+ {
+ println!(
+ "New permissions for key {} on bucket {:.16}:\n read {}\n write {}\n owner {}",
+ key.access_key_id,
+ new_bucket.id,
+ new_key.permissions.read,
+ new_key.permissions.write,
+ new_key.permissions.owner
+ );
+ } else {
+ println!(
+ "Access key {} no longer has permissions on bucket {:.16}",
+ key.access_key_id, new_bucket.id
+ );
+ }
+
+ Ok(())
+ }
+
+ pub async fn cmd_bucket_website(&self, opt: WebsiteOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.bucket.clone()),
+ })
+ .await?;
+
+ if !(opt.allow ^ opt.deny) {
+ return Err(Error::Message(
+ "You must specify exactly one flag, either --allow or --deny".to_string(),
+ ));
+ }
+
+ let wa = if opt.allow {
+ UpdateBucketWebsiteAccess {
+ enabled: true,
+ index_document: Some(opt.index_document.clone()),
+ error_document: opt
+ .error_document
+ .or(bucket.website_config.and_then(|x| x.error_document.clone())),
+ }
+ } else {
+ UpdateBucketWebsiteAccess {
+ enabled: false,
+ index_document: None,
+ error_document: None,
+ }
+ };
+
+ self.api_request(UpdateBucketRequest {
+ id: bucket.id,
+ body: UpdateBucketRequestBody {
+ website_access: Some(wa),
+ quotas: None,
+ },
+ })
+ .await?;
+
+ if opt.allow {
+ println!("Website access allowed for {}", &opt.bucket);
+ } else {
+ println!("Website access denied for {}", &opt.bucket);
+ }
+
+ Ok(())
+ }
+
+ pub async fn cmd_bucket_set_quotas(&self, opt: SetQuotasOpt) -> Result<(), Error> {
+ let bucket = self
+ .api_request(GetBucketInfoRequest {
+ id: None,
+ global_alias: None,
+ search: Some(opt.bucket.clone()),
+ })
+ .await?;
+
+ if opt.max_size.is_none() && opt.max_objects.is_none() {
+ return Err(Error::Message(
+ "You must specify either --max-size or --max-objects (or both) for this command to do something.".to_string(),
+ ));
+ }
+
+ let new_quotas = ApiBucketQuotas {
+ max_size: match opt.max_size.as_deref() {
+ Some("none") => None,
+ Some(v) => Some(
+ v.parse::<bytesize::ByteSize>()
+ .ok_or_message(format!("Invalid size specified: {}", v))?
+ .as_u64(),
+ ),
+ None => bucket.quotas.max_size,
+ },
+ max_objects: match opt.max_objects.as_deref() {
+ Some("none") => None,
+ Some(v) => Some(
+ v.parse::<u64>()
+ .ok_or_message(format!("Invalid number: {}", v))?,
+ ),
+ None => bucket.quotas.max_objects,
+ },
+ };
+
+ self.api_request(UpdateBucketRequest {
+ id: bucket.id.clone(),
+ body: UpdateBucketRequestBody {
+ website_access: None,
+ quotas: Some(new_quotas),
+ },
+ })
+ .await?;
+
+ println!("Quotas updated for bucket {:.16}", bucket.id);
+
+ Ok(())
+ }
+}
diff --git a/src/garage/cli_v2/mod.rs b/src/garage/cli_v2/mod.rs
index 2fe45e29..24ff6f72 100644
--- a/src/garage/cli_v2/mod.rs
+++ b/src/garage/cli_v2/mod.rs
@@ -1,5 +1,6 @@
pub mod util;
+pub mod bucket;
pub mod cluster;
pub mod layout;
@@ -35,15 +36,9 @@ impl Cli {
self.cmd_connect(connect_opt).await
}
Command::Layout(layout_opt) => self.layout_command_dispatch(layout_opt).await,
+ Command::Bucket(bo) => self.cmd_bucket(bo).await,
// TODO
- Command::Bucket(bo) => cli_v1::cmd_admin(
- &self.admin_rpc_endpoint,
- self.rpc_host,
- AdminRpc::BucketOperation(bo),
- )
- .await
- .ok_or_message("xoxo"),
Command::Key(ko) => cli_v1::cmd_admin(
&self.admin_rpc_endpoint,
self.rpc_host,