diff options
Diffstat (limited to 'src/api/admin')
-rw-r--r-- | src/api/admin/api_server.rs | 21 | ||||
-rw-r--r-- | src/api/admin/bucket.rs | 24 | ||||
-rw-r--r-- | src/api/admin/cluster.rs | 207 | ||||
-rw-r--r-- | src/api/admin/key.rs | 43 | ||||
-rw-r--r-- | src/api/admin/mod.rs | 3 | ||||
-rw-r--r-- | src/api/admin/router_v0.rs (renamed from src/api/admin/router.rs) | 15 | ||||
-rw-r--r-- | src/api/admin/router_v1.rs | 235 |
7 files changed, 459 insertions, 89 deletions
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index 6f1e44e5..0ce3ca0d 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -25,7 +25,8 @@ use crate::admin::bucket::*; use crate::admin::cluster::*; use crate::admin::error::*; use crate::admin::key::*; -use crate::admin::router::{Authorization, Endpoint}; +use crate::admin::router_v0; +use crate::admin::router_v1::{Authorization, Endpoint}; use crate::helpers::host_to_bucket; pub struct AdminApiServer { @@ -181,7 +182,7 @@ impl AdminApiServer { ), }; let status_str = format!( - "{}\nConsult the full health check API endpoint at /v0/health for more details\n", + "{}\nConsult the full health check API endpoint at /v1/health for more details\n", status_str ); @@ -229,7 +230,12 @@ impl ApiHandler for AdminApiServer { type Error = Error; fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> { - Endpoint::from_request(req) + if req.uri().path().starts_with("/v0/") { + let endpoint_v0 = router_v0::Endpoint::from_request(req)?; + Endpoint::from_v0(endpoint_v0) + } else { + Endpoint::from_request(req) + } } async fn handle( @@ -276,8 +282,13 @@ impl ApiHandler for AdminApiServer { Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await, // Keys Endpoint::ListKeys => handle_list_keys(&self.garage).await, - Endpoint::GetKeyInfo { id, search } => { - handle_get_key_info(&self.garage, id, search).await + Endpoint::GetKeyInfo { + id, + search, + show_secret_key, + } => { + let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false); + handle_get_key_info(&self.garage, id, search, show_secret_key).await } Endpoint::CreateKey => handle_create_key(&self.garage, req).await, Endpoint::ImportKey => handle_import_key(&self.garage, req).await, diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index f0a4a9e7..17f46c30 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -14,6 +14,7 @@ use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_model::permission::*; +use garage_model::s3::mpu_table; use garage_model::s3::object_table::*; use crate::admin::error::*; @@ -124,6 +125,14 @@ async fn bucket_info_results( .map(|x| x.filtered_values(&garage.system.ring.borrow())) .unwrap_or_default(); + let mpu_counters = garage + .mpu_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&garage.system.ring.borrow())) + .unwrap_or_default(); + let mut relevant_keys = HashMap::new(); for (k, _) in bucket .state @@ -208,12 +217,12 @@ async fn bucket_info_results( } }) .collect::<Vec<_>>(), - objects: counters.get(OBJECTS).cloned().unwrap_or_default(), - bytes: counters.get(BYTES).cloned().unwrap_or_default(), - unfinished_uploads: counters - .get(UNFINISHED_UPLOADS) - .cloned() - .unwrap_or_default(), + objects: *counters.get(OBJECTS).unwrap_or(&0), + bytes: *counters.get(BYTES).unwrap_or(&0), + unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0), + unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0), + unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0), + unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0), quotas: ApiBucketQuotas { max_size: quotas.max_size, max_objects: quotas.max_objects, @@ -235,6 +244,9 @@ struct GetBucketInfoResult { objects: i64, bytes: i64, unfinished_uploads: i64, + unfinished_multipart_uploads: i64, + unfinished_multipart_upload_parts: i64, + unfinished_multipart_upload_bytes: i64, quotas: ApiBucketQuotas, } diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 98bf2b5a..c8107b82 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -1,14 +1,13 @@ -use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; -use hyper::{Body, Request, Response, StatusCode}; +use hyper::{Body, Request, Response}; use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_rpc::layout::*; +use garage_rpc::layout; use garage_model::garage::Garage; @@ -26,26 +25,37 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response< .system .get_known_nodes() .into_iter() - .map(|i| { - ( - hex::encode(i.id), - KnownNodeResp { - addr: i.addr, - is_up: i.is_up, - last_seen_secs_ago: i.last_seen_secs_ago, - hostname: i.status.hostname, - }, - ) + .map(|i| KnownNodeResp { + id: hex::encode(i.id), + addr: i.addr, + is_up: i.is_up, + last_seen_secs_ago: i.last_seen_secs_ago, + hostname: i.status.hostname, }) .collect(), - layout: get_cluster_layout(garage), + layout: format_cluster_layout(&garage.system.get_cluster_layout()), }; Ok(json_ok_response(&res)?) } pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { + use garage_rpc::system::ClusterHealthStatus; let health = garage.system.health(); + let health = ClusterHealth { + status: match health.status { + ClusterHealthStatus::Healthy => "healthy", + ClusterHealthStatus::Degraded => "degraded", + ClusterHealthStatus::Unavailable => "unavailable", + }, + known_nodes: health.known_nodes, + connected_nodes: health.connected_nodes, + storage_nodes: health.storage_nodes, + storage_nodes_ok: health.storage_nodes_ok, + partitions: health.partitions, + partitions_quorum: health.partitions_quorum, + partitions_all_ok: health.partitions_all_ok, + }; Ok(json_ok_response(&health)?) } @@ -74,33 +84,68 @@ pub async fn handle_connect_cluster_nodes( } pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { - let res = get_cluster_layout(garage); + let res = format_cluster_layout(&garage.system.get_cluster_layout()); Ok(json_ok_response(&res)?) } -fn get_cluster_layout(garage: &Arc<Garage>) -> GetClusterLayoutResponse { - let layout = garage.system.get_cluster_layout(); +fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse { + let roles = layout + .roles + .items() + .iter() + .filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x))) + .map(|(k, v)| NodeRoleResp { + id: hex::encode(k), + zone: v.zone.clone(), + capacity: v.capacity, + tags: v.tags.clone(), + }) + .collect::<Vec<_>>(); + + let staged_role_changes = layout + .staging_roles + .items() + .iter() + .filter(|(k, _, v)| layout.roles.get(k) != Some(v)) + .map(|(k, _, v)| match &v.0 { + None => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Remove { remove: true }, + }, + Some(r) => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Update { + zone: r.zone.clone(), + capacity: r.capacity, + tags: r.tags.clone(), + }, + }, + }) + .collect::<Vec<_>>(); GetClusterLayoutResponse { version: layout.version, - roles: layout - .roles - .items() - .iter() - .filter(|(_, _, v)| v.0.is_some()) - .map(|(k, _, v)| (hex::encode(k), v.0.clone())) - .collect(), - staged_role_changes: layout - .staging - .items() - .iter() - .filter(|(k, _, v)| layout.roles.get(k) != Some(v)) - .map(|(k, _, v)| (hex::encode(k), v.0.clone())) - .collect(), + roles, + staged_role_changes, } } +// ---- + +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ClusterHealth { + status: &'static str, + known_nodes: usize, + connected_nodes: usize, + storage_nodes: usize, + storage_nodes_ok: usize, + partitions: usize, + partitions_quorum: usize, + partitions_all_ok: usize, +} + #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct GetClusterStatusResponse { @@ -109,11 +154,19 @@ struct GetClusterStatusResponse { garage_features: Option<&'static [&'static str]>, rust_version: &'static str, db_engine: String, - known_nodes: HashMap<String, KnownNodeResp>, + known_nodes: Vec<KnownNodeResp>, + layout: GetClusterLayoutResponse, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ApplyClusterLayoutResponse { + message: Vec<String>, layout: GetClusterLayoutResponse, } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct ConnectClusterNodesResponse { success: bool, error: Option<String>, @@ -123,18 +176,31 @@ struct ConnectClusterNodesResponse { #[serde(rename_all = "camelCase")] struct GetClusterLayoutResponse { version: u64, - roles: HashMap<String, Option<NodeRole>>, - staged_role_changes: HashMap<String, Option<NodeRole>>, + roles: Vec<NodeRoleResp>, + staged_role_changes: Vec<NodeRoleChange>, } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleResp { + id: String, + zone: String, + capacity: Option<u64>, + tags: Vec<String>, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct KnownNodeResp { + id: String, addr: SocketAddr, is_up: bool, last_seen_secs_ago: Option<u64>, hostname: String, } +// ---- update functions ---- + pub async fn handle_update_cluster_layout( garage: &Arc<Garage>, req: Request<Body>, @@ -144,22 +210,35 @@ pub async fn handle_update_cluster_layout( let mut layout = garage.system.get_cluster_layout(); let mut roles = layout.roles.clone(); - roles.merge(&layout.staging); + roles.merge(&layout.staging_roles); - for (node, role) in updates { - let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?; + for change in updates { + let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; + let new_role = match change.action { + NodeRoleChangeEnum::Remove { remove: true } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(layout::NodeRole { + zone, + capacity, + tags, + }), + _ => return Err(Error::bad_request("Invalid layout change")), + }; + layout - .staging - .merge(&roles.update_mutator(node, NodeRoleV(role))); + .staging_roles + .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); } garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) } pub async fn handle_apply_cluster_layout( @@ -169,12 +248,15 @@ pub async fn handle_apply_cluster_layout( let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?; let layout = garage.system.get_cluster_layout(); - let layout = layout.apply_staged_changes(Some(param.version))?; + let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; + garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = ApplyClusterLayoutResponse { + message: msg, + layout: format_cluster_layout(&layout), + }; + Ok(json_ok_response(&res)?) } pub async fn handle_revert_cluster_layout( @@ -187,14 +269,39 @@ pub async fn handle_revert_cluster_layout( let layout = layout.revert_staged_changes(Some(param.version))?; garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) } -type UpdateClusterLayoutRequest = HashMap<String, Option<NodeRole>>; +// ---- + +type UpdateClusterLayoutRequest = Vec<NodeRoleChange>; #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct ApplyRevertLayoutRequest { version: u64, } + +// ---- + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleChange { + id: String, + #[serde(flatten)] + action: NodeRoleChangeEnum, +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum NodeRoleChangeEnum { + #[serde(rename_all = "camelCase")] + Remove { remove: bool }, + #[serde(rename_all = "camelCase")] + Update { + zone: String, + capacity: Option<u64>, + tags: Vec<String>, + }, +} diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs index d74ca361..8d1c6890 100644 --- a/src/api/admin/key.rs +++ b/src/api/admin/key.rs @@ -10,7 +10,7 @@ use garage_model::garage::Garage; use garage_model::key_table::*; use crate::admin::error::*; -use crate::helpers::{json_ok_response, parse_json_body}; +use crate::helpers::{is_default, json_ok_response, parse_json_body}; pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { let res = garage @@ -34,6 +34,7 @@ pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Er } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct ListKeyResultItem { id: String, name: String, @@ -43,6 +44,7 @@ pub async fn handle_get_key_info( garage: &Arc<Garage>, id: Option<String>, search: Option<String>, + show_secret_key: bool, ) -> Result<Response<Body>, Error> { let key = if let Some(id) = id { garage.key_helper().get_existing_key(&id).await? @@ -55,7 +57,7 @@ pub async fn handle_get_key_info( unreachable!(); }; - key_info_results(garage, key).await + key_info_results(garage, key, show_secret_key).await } pub async fn handle_create_key( @@ -64,15 +66,16 @@ pub async fn handle_create_key( ) -> Result<Response<Body>, Error> { let req = parse_json_body::<CreateKeyRequest>(req).await?; - let key = Key::new(&req.name); + let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key")); garage.key_table.insert(&key).await?; - key_info_results(garage, key).await + key_info_results(garage, key, true).await } #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct CreateKeyRequest { - name: String, + name: Option<String>, } pub async fn handle_import_key( @@ -86,10 +89,15 @@ pub async fn handle_import_key( return Err(Error::KeyAlreadyExists(req.access_key_id.to_string())); } - let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name); + let imported_key = Key::import( + &req.access_key_id, + &req.secret_access_key, + req.name.as_deref().unwrap_or("Imported key"), + ) + .ok_or_bad_request("Invalid key format")?; garage.key_table.insert(&imported_key).await?; - key_info_results(garage, imported_key).await + key_info_results(garage, imported_key, false).await } #[derive(Deserialize)] @@ -97,7 +105,7 @@ pub async fn handle_import_key( struct ImportKeyRequest { access_key_id: String, secret_access_key: String, - name: String, + name: Option<String>, } pub async fn handle_update_key( @@ -127,10 +135,11 @@ pub async fn handle_update_key( garage.key_table.insert(&key).await?; - key_info_results(garage, key).await + key_info_results(garage, key, false).await } #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct UpdateKeyRequest { name: Option<String>, allow: Option<KeyPerm>, @@ -149,7 +158,11 @@ pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Respo .body(Body::empty())?) } -async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Body>, Error> { +async fn key_info_results( + garage: &Arc<Garage>, + key: Key, + show_secret: bool, +) -> Result<Response<Body>, Error> { let mut relevant_buckets = HashMap::new(); let key_state = key.state.as_option().unwrap(); @@ -178,7 +191,11 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod let res = GetKeyInfoResult { name: key_state.name.get().clone(), access_key_id: key.key_id.clone(), - secret_access_key: key_state.secret_key.clone(), + secret_access_key: if show_secret { + Some(key_state.secret_key.clone()) + } else { + None + }, permissions: KeyPerm { create_bucket: *key_state.allow_create_bucket.get(), }, @@ -224,7 +241,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod struct GetKeyInfoResult { name: String, access_key_id: String, - secret_access_key: String, + #[serde(skip_serializing_if = "is_default")] + secret_access_key: Option<String>, permissions: KeyPerm, buckets: Vec<KeyInfoBucketResult>, } @@ -246,6 +264,7 @@ struct KeyInfoBucketResult { } #[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] pub(crate) struct ApiBucketKeyPerm { #[serde(default)] pub(crate) read: bool, diff --git a/src/api/admin/mod.rs b/src/api/admin/mod.rs index c4857c10..43a8c59c 100644 --- a/src/api/admin/mod.rs +++ b/src/api/admin/mod.rs @@ -1,6 +1,7 @@ pub mod api_server; mod error; -mod router; +mod router_v0; +mod router_v1; mod bucket; mod cluster; diff --git a/src/api/admin/router.rs b/src/api/admin/router_v0.rs index 0225f18b..68676445 100644 --- a/src/api/admin/router.rs +++ b/src/api/admin/router_v0.rs @@ -5,12 +5,6 @@ use hyper::{Method, Request}; use crate::admin::error::*; use crate::router_macros::*; -pub enum Authorization { - None, - MetricsToken, - AdminToken, -} - router_match! {@func /// List of all Admin API endpoints. @@ -134,15 +128,6 @@ impl Endpoint { Ok(res) } - /// Get the kind of authorization which is required to perform the operation. - pub fn authorization_type(&self) -> Authorization { - match self { - Self::Health => Authorization::None, - Self::CheckDomain => Authorization::None, - Self::Metrics => Authorization::MetricsToken, - _ => Authorization::AdminToken, - } - } } generateQueryParameters! { diff --git a/src/api/admin/router_v1.rs b/src/api/admin/router_v1.rs new file mode 100644 index 00000000..cc5ff2ec --- /dev/null +++ b/src/api/admin/router_v1.rs @@ -0,0 +1,235 @@ +use std::borrow::Cow; + +use hyper::{Method, Request}; + +use crate::admin::error::*; +use crate::admin::router_v0; +use crate::router_macros::*; + +pub enum Authorization { + None, + MetricsToken, + AdminToken, +} + +router_match! {@func + +/// List of all Admin API endpoints. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Endpoint { + Options, + CheckDomain, + Health, + Metrics, + GetClusterStatus, + GetClusterHealth, + ConnectClusterNodes, + // Layout + GetClusterLayout, + UpdateClusterLayout, + ApplyClusterLayout, + RevertClusterLayout, + // Keys + ListKeys, + CreateKey, + ImportKey, + GetKeyInfo { + id: Option<String>, + search: Option<String>, + show_secret_key: Option<String>, + }, + DeleteKey { + id: String, + }, + UpdateKey { + id: String, + }, + // Buckets + ListBuckets, + CreateBucket, + GetBucketInfo { + id: Option<String>, + global_alias: Option<String>, + }, + DeleteBucket { + id: String, + }, + UpdateBucket { + id: String, + }, + // Bucket-Key Permissions + BucketAllowKey, + BucketDenyKey, + // Bucket aliases + GlobalAliasBucket { + id: String, + alias: String, + }, + GlobalUnaliasBucket { + id: String, + alias: String, + }, + LocalAliasBucket { + id: String, + access_key_id: String, + alias: String, + }, + LocalUnaliasBucket { + id: String, + access_key_id: String, + alias: String, + }, +}} + +impl Endpoint { + /// Determine which S3 endpoint a request is for using the request, and a bucket which was + /// possibly extracted from the Host header. + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets + pub fn from_request<T>(req: &Request<T>) -> Result<Self, Error> { + let uri = req.uri(); + let path = uri.path(); + let query = uri.query(); + + let mut query = QueryParameters::from_query(query.unwrap_or_default())?; + + let res = router_match!(@gen_path_parser (req.method(), path, query) [ + OPTIONS _ => Options, + GET "/check" => CheckDomain, + GET "/health" => Health, + GET "/metrics" => Metrics, + GET "/v1/status" => GetClusterStatus, + GET "/v1/health" => GetClusterHealth, + POST "/v1/connect" => ConnectClusterNodes, + // Layout endpoints + GET "/v1/layout" => GetClusterLayout, + POST "/v1/layout" => UpdateClusterLayout, + POST "/v1/layout/apply" => ApplyClusterLayout, + POST "/v1/layout/revert" => RevertClusterLayout, + // API key endpoints + GET "/v1/key" if id => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key), + GET "/v1/key" if search => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key), + POST "/v1/key" if id => UpdateKey (query::id), + POST "/v1/key" => CreateKey, + POST "/v1/key/import" => ImportKey, + DELETE "/v1/key" if id => DeleteKey (query::id), + GET "/v1/key" => ListKeys, + // Bucket endpoints + GET "/v1/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v1/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v1/bucket" => ListBuckets, + POST "/v1/bucket" => CreateBucket, + DELETE "/v1/bucket" if id => DeleteBucket (query::id), + PUT "/v1/bucket" if id => UpdateBucket (query::id), + // Bucket-key permissions + POST "/v1/bucket/allow" => BucketAllowKey, + POST "/v1/bucket/deny" => BucketDenyKey, + // Bucket aliases + PUT "/v1/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias), + DELETE "/v1/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias), + PUT "/v1/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias), + DELETE "/v1/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias), + ]); + + if let Some(message) = query.nonempty_message() { + debug!("Unused query parameter: {}", message) + } + + Ok(res) + } + /// Some endpoints work exactly the same in their v1/ version as they did in their v0/ version. + /// For these endpoints, we can convert a v0/ call to its equivalent as if it was made using + /// its v1/ URL. + pub fn from_v0(v0_endpoint: router_v0::Endpoint) -> Result<Self, Error> { + match v0_endpoint { + // Cluster endpoints + router_v0::Endpoint::ConnectClusterNodes => Ok(Self::ConnectClusterNodes), + // - GetClusterStatus: response format changed + // - GetClusterHealth: response format changed + + // Layout endpoints + router_v0::Endpoint::RevertClusterLayout => Ok(Self::RevertClusterLayout), + // - GetClusterLayout: response format changed + // - UpdateClusterLayout: query format changed + // - ApplyCusterLayout: response format changed + + // Key endpoints + router_v0::Endpoint::ListKeys => Ok(Self::ListKeys), + router_v0::Endpoint::CreateKey => Ok(Self::CreateKey), + router_v0::Endpoint::GetKeyInfo { id, search } => Ok(Self::GetKeyInfo { + id, + search, + show_secret_key: Some("true".into()), + }), + router_v0::Endpoint::DeleteKey { id } => Ok(Self::DeleteKey { id }), + // - UpdateKey: response format changed (secret key no longer returned) + + // Bucket endpoints + router_v0::Endpoint::GetBucketInfo { id, global_alias } => { + Ok(Self::GetBucketInfo { id, global_alias }) + } + router_v0::Endpoint::ListBuckets => Ok(Self::ListBuckets), + router_v0::Endpoint::CreateBucket => Ok(Self::CreateBucket), + router_v0::Endpoint::DeleteBucket { id } => Ok(Self::DeleteBucket { id }), + router_v0::Endpoint::UpdateBucket { id } => Ok(Self::UpdateBucket { id }), + + // Bucket-key permissions + router_v0::Endpoint::BucketAllowKey => Ok(Self::BucketAllowKey), + router_v0::Endpoint::BucketDenyKey => Ok(Self::BucketDenyKey), + + // Bucket alias endpoints + router_v0::Endpoint::GlobalAliasBucket { id, alias } => { + Ok(Self::GlobalAliasBucket { id, alias }) + } + router_v0::Endpoint::GlobalUnaliasBucket { id, alias } => { + Ok(Self::GlobalUnaliasBucket { id, alias }) + } + router_v0::Endpoint::LocalAliasBucket { + id, + access_key_id, + alias, + } => Ok(Self::LocalAliasBucket { + id, + access_key_id, + alias, + }), + router_v0::Endpoint::LocalUnaliasBucket { + id, + access_key_id, + alias, + } => Ok(Self::LocalUnaliasBucket { + id, + access_key_id, + alias, + }), + + // For endpoints that have different body content syntax, issue + // deprecation warning + _ => Err(Error::bad_request(format!( + "v0/ endpoint is no longer supported: {}", + v0_endpoint.name() + ))), + } + } + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + match self { + Self::Health => Authorization::None, + Self::CheckDomain => Authorization::None, + Self::Metrics => Authorization::MetricsToken, + _ => Authorization::AdminToken, + } + } +} + +generateQueryParameters! { + keywords: [], + fields: [ + "format" => format, + "id" => id, + "search" => search, + "globalAlias" => global_alias, + "alias" => alias, + "accessKeyId" => access_key_id, + "showSecretKey" => show_secret_key + ] +} |