aboutsummaryrefslogtreecommitdiff
path: root/src/api/admin
diff options
context:
space:
mode:
Diffstat (limited to 'src/api/admin')
-rw-r--r--src/api/admin/Cargo.toml1
-rw-r--r--src/api/admin/api.rs568
-rw-r--r--src/api/admin/api_server.rs237
-rw-r--r--src/api/admin/bucket.rs716
-rw-r--r--src/api/admin/cluster.rs528
-rw-r--r--src/api/admin/error.rs2
-rw-r--r--src/api/admin/key.rs279
-rw-r--r--src/api/admin/lib.rs24
-rw-r--r--src/api/admin/macros.rs94
-rw-r--r--src/api/admin/router_v1.rs15
-rw-r--r--src/api/admin/router_v2.rs251
-rw-r--r--src/api/admin/special.rs133
12 files changed, 1766 insertions, 1082 deletions
diff --git a/src/api/admin/Cargo.toml b/src/api/admin/Cargo.toml
index adddf306..94a321a6 100644
--- a/src/api/admin/Cargo.toml
+++ b/src/api/admin/Cargo.toml
@@ -24,6 +24,7 @@ argon2.workspace = true
async-trait.workspace = true
err-derive.workspace = true
hex.workspace = true
+paste.workspace = true
tracing.workspace = true
futures.workspace = true
diff --git a/src/api/admin/api.rs b/src/api/admin/api.rs
new file mode 100644
index 00000000..99832564
--- /dev/null
+++ b/src/api/admin/api.rs
@@ -0,0 +1,568 @@
+use std::convert::TryFrom;
+use std::net::SocketAddr;
+use std::sync::Arc;
+
+use async_trait::async_trait;
+use paste::paste;
+use serde::{Deserialize, Serialize};
+
+use garage_model::garage::Garage;
+
+use garage_api_common::helpers::is_default;
+
+use crate::error::Error;
+use crate::macros::*;
+use crate::EndpointHandler;
+
+// This generates the following:
+//
+// - An enum AdminApiRequest that contains a variant for all endpoints
+//
+// - An enum AdminApiResponse that contains a variant for all non-special endpoints.
+// This enum is serialized in api_server.rs, without the enum tag,
+// which gives directly the JSON response corresponding to the API call.
+// This enum does not implement Deserialize as its meaning can be ambiguous.
+//
+// - An enum TaggedAdminApiResponse that contains the same variants, but
+// serializes as a tagged enum. This allows it to be transmitted through
+// Garage RPC and deserialized correctly upon receival.
+// Conversion from untagged to tagged can be done using the `.tagged()` method.
+//
+// - AdminApiRequest::name() that returns the name of the endpoint
+//
+// - impl EndpointHandler for AdminApiHandler, that uses the impl EndpointHandler
+// of each request type below for non-special endpoints
+admin_endpoints![
+ // Special endpoints of the Admin API
+ @special Options,
+ @special CheckDomain,
+ @special Health,
+ @special Metrics,
+
+ // Cluster operations
+ GetClusterStatus,
+ GetClusterHealth,
+ ConnectClusterNodes,
+ GetClusterLayout,
+ UpdateClusterLayout,
+ ApplyClusterLayout,
+ RevertClusterLayout,
+
+ // Access key operations
+ ListKeys,
+ GetKeyInfo,
+ CreateKey,
+ ImportKey,
+ UpdateKey,
+ DeleteKey,
+
+ // Bucket operations
+ ListBuckets,
+ GetBucketInfo,
+ CreateBucket,
+ UpdateBucket,
+ DeleteBucket,
+
+ // Operations on permissions for keys on buckets
+ AllowBucketKey,
+ DenyBucketKey,
+
+ // Operations on bucket aliases
+ AddBucketAlias,
+ RemoveBucketAlias,
+];
+
+// **********************************************
+// Special endpoints
+//
+// These endpoints don't have associated *Response structs
+// because they directly produce an http::Response
+// **********************************************
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct OptionsRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CheckDomainRequest {
+ pub domain: String,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct HealthRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct MetricsRequest;
+
+// **********************************************
+// Cluster operations
+// **********************************************
+
+// ---- GetClusterStatus ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GetClusterStatusRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetClusterStatusResponse {
+ pub node: String,
+ pub garage_version: String,
+ pub garage_features: Option<Vec<String>>,
+ pub rust_version: String,
+ pub db_engine: String,
+ pub layout_version: u64,
+ pub nodes: Vec<NodeResp>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+#[serde(rename_all = "camelCase")]
+pub struct NodeResp {
+ pub id: String,
+ pub role: Option<NodeRoleResp>,
+ pub addr: Option<SocketAddr>,
+ pub hostname: Option<String>,
+ pub is_up: bool,
+ pub last_seen_secs_ago: Option<u64>,
+ pub draining: bool,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub data_partition: Option<FreeSpaceResp>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub metadata_partition: Option<FreeSpaceResp>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct NodeRoleResp {
+ pub id: String,
+ pub zone: String,
+ pub capacity: Option<u64>,
+ pub tags: Vec<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct FreeSpaceResp {
+ pub available: u64,
+ pub total: u64,
+}
+
+// ---- GetClusterHealth ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GetClusterHealthRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetClusterHealthResponse {
+ pub status: String,
+ pub known_nodes: usize,
+ pub connected_nodes: usize,
+ pub storage_nodes: usize,
+ pub storage_nodes_ok: usize,
+ pub partitions: usize,
+ pub partitions_quorum: usize,
+ pub partitions_all_ok: usize,
+}
+
+// ---- ConnectClusterNodes ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ConnectClusterNodesRequest(pub Vec<String>);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ConnectClusterNodesResponse(pub Vec<ConnectNodeResponse>);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ConnectNodeResponse {
+ pub success: bool,
+ pub error: Option<String>,
+}
+
+// ---- GetClusterLayout ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GetClusterLayoutRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetClusterLayoutResponse {
+ pub version: u64,
+ pub roles: Vec<NodeRoleResp>,
+ pub staged_role_changes: Vec<NodeRoleChange>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct NodeRoleChange {
+ pub id: String,
+ #[serde(flatten)]
+ pub action: NodeRoleChangeEnum,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum NodeRoleChangeEnum {
+ #[serde(rename_all = "camelCase")]
+ Remove { remove: bool },
+ #[serde(rename_all = "camelCase")]
+ Update {
+ zone: String,
+ capacity: Option<u64>,
+ tags: Vec<String>,
+ },
+}
+
+// ---- UpdateClusterLayout ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateClusterLayoutRequest(pub Vec<NodeRoleChange>);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateClusterLayoutResponse(pub GetClusterLayoutResponse);
+
+// ---- ApplyClusterLayout ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ApplyClusterLayoutRequest {
+ pub version: u64,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ApplyClusterLayoutResponse {
+ pub message: Vec<String>,
+ pub layout: GetClusterLayoutResponse,
+}
+
+// ---- RevertClusterLayout ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct RevertClusterLayoutRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct RevertClusterLayoutResponse(pub GetClusterLayoutResponse);
+
+// **********************************************
+// Access key operations
+// **********************************************
+
+// ---- ListKeys ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ListKeysRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ListKeysResponse(pub Vec<ListKeysResponseItem>);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ListKeysResponseItem {
+ pub id: String,
+ pub name: String,
+}
+
+// ---- GetKeyInfo ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GetKeyInfoRequest {
+ pub id: Option<String>,
+ pub search: Option<String>,
+ pub show_secret_key: bool,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetKeyInfoResponse {
+ pub name: String,
+ pub access_key_id: String,
+ #[serde(skip_serializing_if = "is_default")]
+ pub secret_access_key: Option<String>,
+ pub permissions: KeyPerm,
+ pub buckets: Vec<KeyInfoBucketResponse>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct KeyPerm {
+ #[serde(default)]
+ pub create_bucket: bool,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct KeyInfoBucketResponse {
+ pub id: String,
+ pub global_aliases: Vec<String>,
+ pub local_aliases: Vec<String>,
+ pub permissions: ApiBucketKeyPerm,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize, Default)]
+#[serde(rename_all = "camelCase")]
+pub struct ApiBucketKeyPerm {
+ #[serde(default)]
+ pub read: bool,
+ #[serde(default)]
+ pub write: bool,
+ #[serde(default)]
+ pub owner: bool,
+}
+
+// ---- CreateKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct CreateKeyRequest {
+ pub name: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CreateKeyResponse(pub GetKeyInfoResponse);
+
+// ---- ImportKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ImportKeyRequest {
+ pub access_key_id: String,
+ pub secret_access_key: String,
+ pub name: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ImportKeyResponse(pub GetKeyInfoResponse);
+
+// ---- UpdateKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateKeyRequest {
+ pub id: String,
+ pub body: UpdateKeyRequestBody,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateKeyResponse(pub GetKeyInfoResponse);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct UpdateKeyRequestBody {
+ pub name: Option<String>,
+ pub allow: Option<KeyPerm>,
+ pub deny: Option<KeyPerm>,
+}
+
+// ---- DeleteKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DeleteKeyRequest {
+ pub id: String,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DeleteKeyResponse;
+
+// **********************************************
+// Bucket operations
+// **********************************************
+
+// ---- ListBuckets ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ListBucketsRequest;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ListBucketsResponse(pub Vec<ListBucketsResponseItem>);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ListBucketsResponseItem {
+ pub id: String,
+ pub global_aliases: Vec<String>,
+ pub local_aliases: Vec<BucketLocalAlias>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct BucketLocalAlias {
+ pub access_key_id: String,
+ pub alias: String,
+}
+
+// ---- GetBucketInfo ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GetBucketInfoRequest {
+ pub id: Option<String>,
+ pub global_alias: Option<String>,
+ pub search: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetBucketInfoResponse {
+ pub id: String,
+ pub global_aliases: Vec<String>,
+ pub website_access: bool,
+ #[serde(default)]
+ pub website_config: Option<GetBucketInfoWebsiteResponse>,
+ pub keys: Vec<GetBucketInfoKey>,
+ pub objects: i64,
+ pub bytes: i64,
+ pub unfinished_uploads: i64,
+ pub unfinished_multipart_uploads: i64,
+ pub unfinished_multipart_upload_parts: i64,
+ pub unfinished_multipart_upload_bytes: i64,
+ pub quotas: ApiBucketQuotas,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetBucketInfoWebsiteResponse {
+ pub index_document: String,
+ pub error_document: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GetBucketInfoKey {
+ pub access_key_id: String,
+ pub name: String,
+ pub permissions: ApiBucketKeyPerm,
+ pub bucket_local_aliases: Vec<String>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ApiBucketQuotas {
+ pub max_size: Option<u64>,
+ pub max_objects: Option<u64>,
+}
+
+// ---- CreateBucket ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct CreateBucketRequest {
+ pub global_alias: Option<String>,
+ pub local_alias: Option<CreateBucketLocalAlias>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct CreateBucketResponse(pub GetBucketInfoResponse);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct CreateBucketLocalAlias {
+ pub access_key_id: String,
+ pub alias: String,
+ #[serde(default)]
+ pub allow: ApiBucketKeyPerm,
+}
+
+// ---- UpdateBucket ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateBucketRequest {
+ pub id: String,
+ pub body: UpdateBucketRequestBody,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct UpdateBucketResponse(pub GetBucketInfoResponse);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct UpdateBucketRequestBody {
+ pub website_access: Option<UpdateBucketWebsiteAccess>,
+ pub quotas: Option<ApiBucketQuotas>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct UpdateBucketWebsiteAccess {
+ pub enabled: bool,
+ pub index_document: Option<String>,
+ pub error_document: Option<String>,
+}
+
+// ---- DeleteBucket ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DeleteBucketRequest {
+ pub id: String,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DeleteBucketResponse;
+
+// **********************************************
+// Operations on permissions for keys on buckets
+// **********************************************
+
+// ---- AllowBucketKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AllowBucketKeyRequest(pub BucketKeyPermChangeRequest);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AllowBucketKeyResponse(pub GetBucketInfoResponse);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct BucketKeyPermChangeRequest {
+ pub bucket_id: String,
+ pub access_key_id: String,
+ pub permissions: ApiBucketKeyPerm,
+}
+
+// ---- DenyBucketKey ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DenyBucketKeyRequest(pub BucketKeyPermChangeRequest);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct DenyBucketKeyResponse(pub GetBucketInfoResponse);
+
+// **********************************************
+// Operations on bucket aliases
+// **********************************************
+
+// ---- AddBucketAlias ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct AddBucketAliasRequest {
+ pub bucket_id: String,
+ #[serde(flatten)]
+ pub alias: BucketAliasEnum,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct AddBucketAliasResponse(pub GetBucketInfoResponse);
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum BucketAliasEnum {
+ #[serde(rename_all = "camelCase")]
+ Global { global_alias: String },
+ #[serde(rename_all = "camelCase")]
+ Local {
+ local_alias: String,
+ access_key_id: String,
+ },
+}
+
+// ---- RemoveBucketAlias ----
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct RemoveBucketAliasRequest {
+ pub bucket_id: String,
+ #[serde(flatten)]
+ pub alias: BucketAliasEnum,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct RemoveBucketAliasResponse(pub GetBucketInfoResponse);
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index e39fa1ba..be29e617 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -1,10 +1,10 @@
-use std::collections::HashMap;
+use std::borrow::Cow;
use std::sync::Arc;
use argon2::password_hash::PasswordHash;
use async_trait::async_trait;
-use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
+use http::header::{HeaderValue, ACCESS_CONTROL_ALLOW_ORIGIN, AUTHORIZATION};
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
use tokio::sync::watch;
@@ -16,19 +16,18 @@ use opentelemetry_prometheus::PrometheusExporter;
use prometheus::{Encoder, TextEncoder};
use garage_model::garage::Garage;
-use garage_rpc::system::ClusterHealthStatus;
use garage_util::error::Error as GarageError;
use garage_util::socket_address::UnixOrTCPSocketAddress;
use garage_api_common::generic_server::*;
use garage_api_common::helpers::*;
-use crate::bucket::*;
-use crate::cluster::*;
+use crate::api::*;
use crate::error::*;
-use crate::key::*;
use crate::router_v0;
-use crate::router_v1::{Authorization, Endpoint};
+use crate::router_v1;
+use crate::Authorization;
+use crate::EndpointHandler;
pub type ResBody = BoxBody<Error>;
@@ -40,6 +39,11 @@ pub struct AdminApiServer {
admin_token: Option<String>,
}
+pub enum Endpoint {
+ Old(router_v1::Endpoint),
+ New(String),
+}
+
impl AdminApiServer {
pub fn new(
garage: Arc<Garage>,
@@ -68,130 +72,6 @@ impl AdminApiServer {
.await
}
- fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
- Ok(Response::builder()
- .status(StatusCode::NO_CONTENT)
- .header(ALLOW, "OPTIONS, GET, POST")
- .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
- .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
- .body(empty_body())?)
- }
-
- async fn handle_check_domain(
- &self,
- req: Request<IncomingBody>,
- ) -> Result<Response<ResBody>, Error> {
- let query_params: HashMap<String, String> = req
- .uri()
- .query()
- .map(|v| {
- url::form_urlencoded::parse(v.as_bytes())
- .into_owned()
- .collect()
- })
- .unwrap_or_else(HashMap::new);
-
- let has_domain_key = query_params.contains_key("domain");
-
- if !has_domain_key {
- return Err(Error::bad_request("No domain query string found"));
- }
-
- let domain = query_params
- .get("domain")
- .ok_or_internal_error("Could not parse domain query string")?;
-
- if self.check_domain(domain).await? {
- Ok(Response::builder()
- .status(StatusCode::OK)
- .body(string_body(format!(
- "Domain '{domain}' is managed by Garage"
- )))?)
- } else {
- Err(Error::bad_request(format!(
- "Domain '{domain}' is not managed by Garage"
- )))
- }
- }
-
- async fn check_domain(&self, domain: &str) -> Result<bool, Error> {
- // Resolve bucket from domain name, inferring if the website must be activated for the
- // domain to be valid.
- let (bucket_name, must_check_website) = if let Some(bname) = self
- .garage
- .config
- .s3_api
- .root_domain
- .as_ref()
- .and_then(|rd| host_to_bucket(domain, rd))
- {
- (bname.to_string(), false)
- } else if let Some(bname) = self
- .garage
- .config
- .s3_web
- .as_ref()
- .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
- {
- (bname.to_string(), true)
- } else {
- (domain.to_string(), true)
- };
-
- let bucket_id = match self
- .garage
- .bucket_helper()
- .resolve_global_bucket_name(&bucket_name)
- .await?
- {
- Some(bucket_id) => bucket_id,
- None => return Ok(false),
- };
-
- if !must_check_website {
- return Ok(true);
- }
-
- let bucket = self
- .garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
-
- let bucket_state = bucket.state.as_option().unwrap();
- let bucket_website_config = bucket_state.website_config.get();
-
- match bucket_website_config {
- Some(_v) => Ok(true),
- None => Ok(false),
- }
- }
-
- fn handle_health(&self) -> Result<Response<ResBody>, Error> {
- let health = self.garage.system.health();
-
- let (status, status_str) = match health.status {
- ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
- ClusterHealthStatus::Degraded => (
- StatusCode::OK,
- "Garage is operational but some storage nodes are unavailable",
- ),
- ClusterHealthStatus::Unavailable => (
- StatusCode::SERVICE_UNAVAILABLE,
- "Quorum is not available for some/all partitions, reads and writes will fail",
- ),
- };
- let status_str = format!(
- "{}\nConsult the full health check API endpoint at /v1/health for more details\n",
- status_str
- );
-
- Ok(Response::builder()
- .status(status)
- .header(http::header::CONTENT_TYPE, "text/plain")
- .body(string_body(status_str))?)
- }
-
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
#[cfg(feature = "metrics")]
{
@@ -232,9 +112,13 @@ impl ApiHandler for AdminApiServer {
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
if req.uri().path().starts_with("/v0/") {
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
- Endpoint::from_v0(endpoint_v0)
+ let endpoint_v1 = router_v1::Endpoint::from_v0(endpoint_v0)?;
+ Ok(Endpoint::Old(endpoint_v1))
+ } else if req.uri().path().starts_with("/v1/") {
+ let endpoint_v1 = router_v1::Endpoint::from_request(req)?;
+ Ok(Endpoint::Old(endpoint_v1))
} else {
- Endpoint::from_request(req)
+ Ok(Endpoint::New(req.uri().path().to_string()))
}
}
@@ -243,8 +127,15 @@ impl ApiHandler for AdminApiServer {
req: Request<IncomingBody>,
endpoint: Endpoint,
) -> Result<Response<ResBody>, Error> {
+ let auth_header = req.headers().get(AUTHORIZATION).cloned();
+
+ let request = match endpoint {
+ Endpoint::Old(endpoint_v1) => AdminApiRequest::from_v1(endpoint_v1, req).await?,
+ Endpoint::New(_) => AdminApiRequest::from_request(req).await?,
+ };
+
let required_auth_hash =
- match endpoint.authorization_type() {
+ match request.authorization_type() {
Authorization::None => None,
Authorization::MetricsToken => self.metrics_token.as_deref(),
Authorization::AdminToken => match self.admin_token.as_deref() {
@@ -256,7 +147,7 @@ impl ApiHandler for AdminApiServer {
};
if let Some(password_hash) = required_auth_hash {
- match req.headers().get("Authorization") {
+ match auth_header {
None => return Err(Error::forbidden("Authorization token must be provided")),
Some(authorization) => {
verify_bearer_token(&authorization, password_hash)?;
@@ -264,72 +155,28 @@ impl ApiHandler for AdminApiServer {
}
}
- match endpoint {
- Endpoint::Options => self.handle_options(&req),
- Endpoint::CheckDomain => self.handle_check_domain(req).await,
- Endpoint::Health => self.handle_health(),
- Endpoint::Metrics => self.handle_metrics(),
- Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
- Endpoint::GetClusterHealth => handle_get_cluster_health(&self.garage).await,
- Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await,
- // Layout
- Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await,
- Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await,
- Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await,
- Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage).await,
- // Keys
- Endpoint::ListKeys => handle_list_keys(&self.garage).await,
- Endpoint::GetKeyInfo {
- id,
- search,
- show_secret_key,
- } => {
- let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
- handle_get_key_info(&self.garage, id, search, show_secret_key).await
- }
- Endpoint::CreateKey => handle_create_key(&self.garage, req).await,
- Endpoint::ImportKey => handle_import_key(&self.garage, req).await,
- Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await,
- Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await,
- // Buckets
- Endpoint::ListBuckets => handle_list_buckets(&self.garage).await,
- Endpoint::GetBucketInfo { id, global_alias } => {
- handle_get_bucket_info(&self.garage, id, global_alias).await
+ match request {
+ AdminApiRequest::Options(req) => req.handle(&self.garage).await,
+ AdminApiRequest::CheckDomain(req) => req.handle(&self.garage).await,
+ AdminApiRequest::Health(req) => req.handle(&self.garage).await,
+ AdminApiRequest::Metrics(_req) => self.handle_metrics(),
+ req => {
+ let res = req.handle(&self.garage).await?;
+ let mut res = json_ok_response(&res)?;
+ res.headers_mut()
+ .insert(ACCESS_CONTROL_ALLOW_ORIGIN, HeaderValue::from_static("*"));
+ Ok(res)
}
- Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await,
- Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await,
- Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await,
- // Bucket-key permissions
- Endpoint::BucketAllowKey => {
- handle_bucket_change_key_perm(&self.garage, req, true).await
- }
- Endpoint::BucketDenyKey => {
- handle_bucket_change_key_perm(&self.garage, req, false).await
- }
- // Bucket aliasing
- Endpoint::GlobalAliasBucket { id, alias } => {
- handle_global_alias_bucket(&self.garage, id, alias).await
- }
- Endpoint::GlobalUnaliasBucket { id, alias } => {
- handle_global_unalias_bucket(&self.garage, id, alias).await
- }
- Endpoint::LocalAliasBucket {
- id,
- access_key_id,
- alias,
- } => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await,
- Endpoint::LocalUnaliasBucket {
- id,
- access_key_id,
- alias,
- } => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await,
}
}
}
impl ApiEndpoint for Endpoint {
- fn name(&self) -> &'static str {
- Endpoint::name(self)
+ fn name(&self) -> Cow<'static, str> {
+ match self {
+ Self::Old(endpoint_v1) => Cow::Borrowed(endpoint_v1.name()),
+ Self::New(path) => Cow::Owned(path.clone()),
+ }
}
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs
index 2537bfc9..123956ca 100644
--- a/src/api/admin/bucket.rs
+++ b/src/api/admin/bucket.rs
@@ -1,8 +1,7 @@
use std::collections::HashMap;
use std::sync::Arc;
-use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
-use serde::{Deserialize, Serialize};
+use async_trait::async_trait;
use garage_util::crdt::*;
use garage_util::data::*;
@@ -18,102 +17,91 @@ use garage_model::s3::mpu_table;
use garage_model::s3::object_table::*;
use garage_api_common::common_error::CommonError;
-use garage_api_common::helpers::*;
-use crate::api_server::ResBody;
+use crate::api::*;
use crate::error::*;
-use crate::key::ApiBucketKeyPerm;
-
-pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
- let buckets = garage
- .bucket_table
- .get_range(
- &EmptyKey,
- None,
- Some(DeletedFilter::NotDeleted),
- 10000,
- EnumerationOrder::Forward,
- )
- .await?;
-
- let res = buckets
- .into_iter()
- .map(|b| {
- let state = b.state.as_option().unwrap();
- ListBucketResultItem {
- id: hex::encode(b.id),
- global_aliases: state
- .aliases
- .items()
- .iter()
- .filter(|(_, _, a)| *a)
- .map(|(n, _, _)| n.to_string())
- .collect::<Vec<_>>(),
- local_aliases: state
- .local_aliases
- .items()
- .iter()
- .filter(|(_, _, a)| *a)
- .map(|((k, n), _, _)| BucketLocalAlias {
- access_key_id: k.to_string(),
- alias: n.to_string(),
- })
- .collect::<Vec<_>>(),
- }
- })
- .collect::<Vec<_>>();
-
- Ok(json_ok_response(&res)?)
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct ListBucketResultItem {
- id: String,
- global_aliases: Vec<String>,
- local_aliases: Vec<BucketLocalAlias>,
-}
+use crate::EndpointHandler;
+
+#[async_trait]
+impl EndpointHandler for ListBucketsRequest {
+ type Response = ListBucketsResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<ListBucketsResponse, Error> {
+ let buckets = garage
+ .bucket_table
+ .get_range(
+ &EmptyKey,
+ None,
+ Some(DeletedFilter::NotDeleted),
+ 10000,
+ EnumerationOrder::Forward,
+ )
+ .await?;
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct BucketLocalAlias {
- access_key_id: String,
- alias: String,
-}
+ let res = buckets
+ .into_iter()
+ .map(|b| {
+ let state = b.state.as_option().unwrap();
+ ListBucketsResponseItem {
+ id: hex::encode(b.id),
+ global_aliases: state
+ .aliases
+ .items()
+ .iter()
+ .filter(|(_, _, a)| *a)
+ .map(|(n, _, _)| n.to_string())
+ .collect::<Vec<_>>(),
+ local_aliases: state
+ .local_aliases
+ .items()
+ .iter()
+ .filter(|(_, _, a)| *a)
+ .map(|((k, n), _, _)| BucketLocalAlias {
+ access_key_id: k.to_string(),
+ alias: n.to_string(),
+ })
+ .collect::<Vec<_>>(),
+ }
+ })
+ .collect::<Vec<_>>();
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct ApiBucketQuotas {
- max_size: Option<u64>,
- max_objects: Option<u64>,
+ Ok(ListBucketsResponse(res))
+ }
}
-pub async fn handle_get_bucket_info(
- garage: &Arc<Garage>,
- id: Option<String>,
- global_alias: Option<String>,
-) -> Result<Response<ResBody>, Error> {
- let bucket_id = match (id, global_alias) {
- (Some(id), None) => parse_bucket_id(&id)?,
- (None, Some(ga)) => garage
- .bucket_helper()
- .resolve_global_bucket_name(&ga)
- .await?
- .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
- _ => {
- return Err(Error::bad_request(
- "Either id or globalAlias must be provided (but not both)",
- ));
- }
- };
+#[async_trait]
+impl EndpointHandler for GetBucketInfoRequest {
+ type Response = GetBucketInfoResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<GetBucketInfoResponse, Error> {
+ let bucket_id = match (self.id, self.global_alias, self.search) {
+ (Some(id), None, None) => parse_bucket_id(&id)?,
+ (None, Some(ga), None) => garage
+ .bucket_helper()
+ .resolve_global_bucket_name(&ga)
+ .await?
+ .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?,
+ (None, None, Some(search)) => {
+ garage
+ .bucket_helper()
+ .admin_get_existing_matching_bucket(&search)
+ .await?
+ }
+ _ => {
+ return Err(Error::bad_request(
+ "Either id, globalAlias or search must be provided (but not several of them)",
+ ));
+ }
+ };
- bucket_info_results(garage, bucket_id).await
+ bucket_info_results(garage, bucket_id).await
+ }
}
async fn bucket_info_results(
garage: &Arc<Garage>,
bucket_id: Uuid,
-) -> Result<Response<ResBody>, Error> {
+) -> Result<GetBucketInfoResponse, Error> {
let bucket = garage
.bucket_helper()
.get_existing_bucket(bucket_id)
@@ -176,301 +164,257 @@ async fn bucket_info_results(
let state = bucket.state.as_option().unwrap();
let quotas = state.quotas.get();
- let res =
- GetBucketInfoResult {
- id: hex::encode(bucket.id),
- global_aliases: state
- .aliases
- .items()
- .iter()
- .filter(|(_, _, a)| *a)
- .map(|(n, _, _)| n.to_string())
- .collect::<Vec<_>>(),
- website_access: state.website_config.get().is_some(),
- website_config: state.website_config.get().clone().map(|wsc| {
- GetBucketInfoWebsiteResult {
- index_document: wsc.index_document,
- error_document: wsc.error_document,
+ let res = GetBucketInfoResponse {
+ id: hex::encode(bucket.id),
+ global_aliases: state
+ .aliases
+ .items()
+ .iter()
+ .filter(|(_, _, a)| *a)
+ .map(|(n, _, _)| n.to_string())
+ .collect::<Vec<_>>(),
+ website_access: state.website_config.get().is_some(),
+ website_config: state.website_config.get().clone().map(|wsc| {
+ GetBucketInfoWebsiteResponse {
+ index_document: wsc.index_document,
+ error_document: wsc.error_document,
+ }
+ }),
+ keys: relevant_keys
+ .into_values()
+ .map(|key| {
+ let p = key.state.as_option().unwrap();
+ GetBucketInfoKey {
+ access_key_id: key.key_id,
+ name: p.name.get().to_string(),
+ permissions: p
+ .authorized_buckets
+ .get(&bucket.id)
+ .map(|p| ApiBucketKeyPerm {
+ read: p.allow_read,
+ write: p.allow_write,
+ owner: p.allow_owner,
+ })
+ .unwrap_or_default(),
+ bucket_local_aliases: p
+ .local_aliases
+ .items()
+ .iter()
+ .filter(|(_, _, b)| *b == Some(bucket.id))
+ .map(|(n, _, _)| n.to_string())
+ .collect::<Vec<_>>(),
}
- }),
- keys: relevant_keys
- .into_values()
- .map(|key| {
- let p = key.state.as_option().unwrap();
- GetBucketInfoKey {
- access_key_id: key.key_id,
- name: p.name.get().to_string(),
- permissions: p
- .authorized_buckets
- .get(&bucket.id)
- .map(|p| ApiBucketKeyPerm {
- read: p.allow_read,
- write: p.allow_write,
- owner: p.allow_owner,
- })
- .unwrap_or_default(),
- bucket_local_aliases: p
- .local_aliases
- .items()
- .iter()
- .filter(|(_, _, b)| *b == Some(bucket.id))
- .map(|(n, _, _)| n.to_string())
- .collect::<Vec<_>>(),
- }
- })
- .collect::<Vec<_>>(),
- objects: *counters.get(OBJECTS).unwrap_or(&0),
- bytes: *counters.get(BYTES).unwrap_or(&0),
- unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
- unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
- unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
- unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
- quotas: ApiBucketQuotas {
- max_size: quotas.max_size,
- max_objects: quotas.max_objects,
- },
- };
-
- Ok(json_ok_response(&res)?)
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetBucketInfoResult {
- id: String,
- global_aliases: Vec<String>,
- website_access: bool,
- #[serde(default)]
- website_config: Option<GetBucketInfoWebsiteResult>,
- keys: Vec<GetBucketInfoKey>,
- objects: i64,
- bytes: i64,
- unfinished_uploads: i64,
- unfinished_multipart_uploads: i64,
- unfinished_multipart_upload_parts: i64,
- unfinished_multipart_upload_bytes: i64,
- quotas: ApiBucketQuotas,
-}
+ })
+ .collect::<Vec<_>>(),
+ objects: *counters.get(OBJECTS).unwrap_or(&0),
+ bytes: *counters.get(BYTES).unwrap_or(&0),
+ unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0),
+ unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0),
+ unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0),
+ unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0),
+ quotas: ApiBucketQuotas {
+ max_size: quotas.max_size,
+ max_objects: quotas.max_objects,
+ },
+ };
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetBucketInfoWebsiteResult {
- index_document: String,
- error_document: Option<String>,
+ Ok(res)
}
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetBucketInfoKey {
- access_key_id: String,
- name: String,
- permissions: ApiBucketKeyPerm,
- bucket_local_aliases: Vec<String>,
-}
+#[async_trait]
+impl EndpointHandler for CreateBucketRequest {
+ type Response = CreateBucketResponse;
-pub async fn handle_create_bucket(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<CreateBucketResponse, Error> {
+ let helper = garage.locked_helper().await;
- let helper = garage.locked_helper().await;
-
- if let Some(ga) = &req.global_alias {
- if !is_valid_bucket_name(ga) {
- return Err(Error::bad_request(format!(
- "{}: {}",
- ga, INVALID_BUCKET_NAME_MESSAGE
- )));
- }
+ if let Some(ga) = &self.global_alias {
+ if !is_valid_bucket_name(ga) {
+ return Err(Error::bad_request(format!(
+ "{}: {}",
+ ga, INVALID_BUCKET_NAME_MESSAGE
+ )));
+ }
- if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
- if alias.state.get().is_some() {
- return Err(CommonError::BucketAlreadyExists.into());
+ if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? {
+ if alias.state.get().is_some() {
+ return Err(CommonError::BucketAlreadyExists.into());
+ }
}
}
- }
- if let Some(la) = &req.local_alias {
- if !is_valid_bucket_name(&la.alias) {
- return Err(Error::bad_request(format!(
- "{}: {}",
- la.alias, INVALID_BUCKET_NAME_MESSAGE
- )));
- }
+ if let Some(la) = &self.local_alias {
+ if !is_valid_bucket_name(&la.alias) {
+ return Err(Error::bad_request(format!(
+ "{}: {}",
+ la.alias, INVALID_BUCKET_NAME_MESSAGE
+ )));
+ }
- let key = helper.key().get_existing_key(&la.access_key_id).await?;
- let state = key.state.as_option().unwrap();
- if matches!(state.local_aliases.get(&la.alias), Some(_)) {
- return Err(Error::bad_request("Local alias already exists"));
+ let key = helper.key().get_existing_key(&la.access_key_id).await?;
+ let state = key.state.as_option().unwrap();
+ if matches!(state.local_aliases.get(&la.alias), Some(_)) {
+ return Err(Error::bad_request("Local alias already exists"));
+ }
}
- }
- let bucket = Bucket::new();
- garage.bucket_table.insert(&bucket).await?;
-
- if let Some(ga) = &req.global_alias {
- helper.set_global_bucket_alias(bucket.id, ga).await?;
- }
+ let bucket = Bucket::new();
+ garage.bucket_table.insert(&bucket).await?;
- if let Some(la) = &req.local_alias {
- helper
- .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
- .await?;
+ if let Some(ga) = &self.global_alias {
+ helper.set_global_bucket_alias(bucket.id, ga).await?;
+ }
- if la.allow.read || la.allow.write || la.allow.owner {
+ if let Some(la) = &self.local_alias {
helper
- .set_bucket_key_permissions(
- bucket.id,
- &la.access_key_id,
- BucketKeyPerm {
- timestamp: now_msec(),
- allow_read: la.allow.read,
- allow_write: la.allow.write,
- allow_owner: la.allow.owner,
- },
- )
+ .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias)
.await?;
- }
- }
- bucket_info_results(garage, bucket.id).await
-}
+ if la.allow.read || la.allow.write || la.allow.owner {
+ helper
+ .set_bucket_key_permissions(
+ bucket.id,
+ &la.access_key_id,
+ BucketKeyPerm {
+ timestamp: now_msec(),
+ allow_read: la.allow.read,
+ allow_write: la.allow.write,
+ allow_owner: la.allow.owner,
+ },
+ )
+ .await?;
+ }
+ }
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct CreateBucketRequest {
- global_alias: Option<String>,
- local_alias: Option<CreateBucketLocalAlias>,
+ Ok(CreateBucketResponse(
+ bucket_info_results(garage, bucket.id).await?,
+ ))
+ }
}
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct CreateBucketLocalAlias {
- access_key_id: String,
- alias: String,
- #[serde(default)]
- allow: ApiBucketKeyPerm,
-}
+#[async_trait]
+impl EndpointHandler for DeleteBucketRequest {
+ type Response = DeleteBucketResponse;
-pub async fn handle_delete_bucket(
- garage: &Arc<Garage>,
- id: String,
-) -> Result<Response<ResBody>, Error> {
- let helper = garage.locked_helper().await;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<DeleteBucketResponse, Error> {
+ let helper = garage.locked_helper().await;
- let bucket_id = parse_bucket_id(&id)?;
+ let bucket_id = parse_bucket_id(&self.id)?;
- let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
- let state = bucket.state.as_option().unwrap();
+ let mut bucket = helper.bucket().get_existing_bucket(bucket_id).await?;
+ let state = bucket.state.as_option().unwrap();
- // Check bucket is empty
- if !helper.bucket().is_bucket_empty(bucket_id).await? {
- return Err(CommonError::BucketNotEmpty.into());
- }
+ // Check bucket is empty
+ if !helper.bucket().is_bucket_empty(bucket_id).await? {
+ return Err(CommonError::BucketNotEmpty.into());
+ }
- // --- done checking, now commit ---
- // 1. delete authorization from keys that had access
- for (key_id, perm) in bucket.authorized_keys() {
- if perm.is_any() {
- helper
- .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
- .await?;
+ // --- done checking, now commit ---
+ // 1. delete authorization from keys that had access
+ for (key_id, perm) in bucket.authorized_keys() {
+ if perm.is_any() {
+ helper
+ .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS)
+ .await?;
+ }
}
- }
- // 2. delete all local aliases
- for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
- if *active {
- helper
- .unset_local_bucket_alias(bucket.id, key_id, alias)
- .await?;
+ // 2. delete all local aliases
+ for ((key_id, alias), _, active) in state.local_aliases.items().iter() {
+ if *active {
+ helper
+ .unset_local_bucket_alias(bucket.id, key_id, alias)
+ .await?;
+ }
}
- }
- // 3. delete all global aliases
- for (alias, _, active) in state.aliases.items().iter() {
- if *active {
- helper.purge_global_bucket_alias(bucket.id, alias).await?;
+ // 3. delete all global aliases
+ for (alias, _, active) in state.aliases.items().iter() {
+ if *active {
+ helper.purge_global_bucket_alias(bucket.id, alias).await?;
+ }
}
- }
- // 4. delete bucket
- bucket.state = Deletable::delete();
- garage.bucket_table.insert(&bucket).await?;
+ // 4. delete bucket
+ bucket.state = Deletable::delete();
+ garage.bucket_table.insert(&bucket).await?;
- Ok(Response::builder()
- .status(StatusCode::NO_CONTENT)
- .body(empty_body())?)
+ Ok(DeleteBucketResponse)
+ }
}
-pub async fn handle_update_bucket(
- garage: &Arc<Garage>,
- id: String,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<UpdateBucketRequest, _, Error>(req).await?;
- let bucket_id = parse_bucket_id(&id)?;
+#[async_trait]
+impl EndpointHandler for UpdateBucketRequest {
+ type Response = UpdateBucketResponse;
- let mut bucket = garage
- .bucket_helper()
- .get_existing_bucket(bucket_id)
- .await?;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<UpdateBucketResponse, Error> {
+ let bucket_id = parse_bucket_id(&self.id)?;
- let state = bucket.state.as_option_mut().unwrap();
-
- if let Some(wa) = req.website_access {
- if wa.enabled {
- state.website_config.update(Some(WebsiteConfig {
- index_document: wa.index_document.ok_or_bad_request(
- "Please specify indexDocument when enabling website access.",
- )?,
- error_document: wa.error_document,
- }));
- } else {
- if wa.index_document.is_some() || wa.error_document.is_some() {
- return Err(Error::bad_request(
- "Cannot specify indexDocument or errorDocument when disabling website access.",
- ));
+ let mut bucket = garage
+ .bucket_helper()
+ .get_existing_bucket(bucket_id)
+ .await?;
+
+ let state = bucket.state.as_option_mut().unwrap();
+
+ if let Some(wa) = self.body.website_access {
+ if wa.enabled {
+ state.website_config.update(Some(WebsiteConfig {
+ index_document: wa.index_document.ok_or_bad_request(
+ "Please specify indexDocument when enabling website access.",
+ )?,
+ error_document: wa.error_document,
+ }));
+ } else {
+ if wa.index_document.is_some() || wa.error_document.is_some() {
+ return Err(Error::bad_request(
+ "Cannot specify indexDocument or errorDocument when disabling website access.",
+ ));
+ }
+ state.website_config.update(None);
}
- state.website_config.update(None);
}
- }
- if let Some(q) = req.quotas {
- state.quotas.update(BucketQuotas {
- max_size: q.max_size,
- max_objects: q.max_objects,
- });
- }
+ if let Some(q) = self.body.quotas {
+ state.quotas.update(BucketQuotas {
+ max_size: q.max_size,
+ max_objects: q.max_objects,
+ });
+ }
- garage.bucket_table.insert(&bucket).await?;
+ garage.bucket_table.insert(&bucket).await?;
- bucket_info_results(garage, bucket_id).await
+ Ok(UpdateBucketResponse(
+ bucket_info_results(garage, bucket_id).await?,
+ ))
+ }
}
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct UpdateBucketRequest {
- website_access: Option<UpdateBucketWebsiteAccess>,
- quotas: Option<ApiBucketQuotas>,
-}
+// ---- BUCKET/KEY PERMISSIONS ----
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct UpdateBucketWebsiteAccess {
- enabled: bool,
- index_document: Option<String>,
- error_document: Option<String>,
+#[async_trait]
+impl EndpointHandler for AllowBucketKeyRequest {
+ type Response = AllowBucketKeyResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<AllowBucketKeyResponse, Error> {
+ let res = handle_bucket_change_key_perm(garage, self.0, true).await?;
+ Ok(AllowBucketKeyResponse(res))
+ }
}
-// ---- BUCKET/KEY PERMISSIONS ----
+#[async_trait]
+impl EndpointHandler for DenyBucketKeyRequest {
+ type Response = DenyBucketKeyResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<DenyBucketKeyResponse, Error> {
+ let res = handle_bucket_change_key_perm(garage, self.0, false).await?;
+ Ok(DenyBucketKeyResponse(res))
+ }
+}
pub async fn handle_bucket_change_key_perm(
garage: &Arc<Garage>,
- req: Request<IncomingBody>,
+ req: BucketKeyPermChangeRequest,
new_perm_flag: bool,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
-
+) -> Result<GetBucketInfoResponse, Error> {
let helper = garage.locked_helper().await;
let bucket_id = parse_bucket_id(&req.bucket_id)?;
@@ -503,76 +447,68 @@ pub async fn handle_bucket_change_key_perm(
bucket_info_results(garage, bucket.id).await
}
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct BucketKeyPermChangeRequest {
- bucket_id: String,
- access_key_id: String,
- permissions: ApiBucketKeyPerm,
-}
-
// ---- BUCKET ALIASES ----
-pub async fn handle_global_alias_bucket(
- garage: &Arc<Garage>,
- bucket_id: String,
- alias: String,
-) -> Result<Response<ResBody>, Error> {
- let bucket_id = parse_bucket_id(&bucket_id)?;
-
- let helper = garage.locked_helper().await;
-
- helper.set_global_bucket_alias(bucket_id, &alias).await?;
+#[async_trait]
+impl EndpointHandler for AddBucketAliasRequest {
+ type Response = AddBucketAliasResponse;
- bucket_info_results(garage, bucket_id).await
-}
+ async fn handle(self, garage: &Arc<Garage>) -> Result<AddBucketAliasResponse, Error> {
+ let bucket_id = parse_bucket_id(&self.bucket_id)?;
-pub async fn handle_global_unalias_bucket(
- garage: &Arc<Garage>,
- bucket_id: String,
- alias: String,
-) -> Result<Response<ResBody>, Error> {
- let bucket_id = parse_bucket_id(&bucket_id)?;
+ let helper = garage.locked_helper().await;
- let helper = garage.locked_helper().await;
-
- helper.unset_global_bucket_alias(bucket_id, &alias).await?;
+ match self.alias {
+ BucketAliasEnum::Global { global_alias } => {
+ helper
+ .set_global_bucket_alias(bucket_id, &global_alias)
+ .await?;
+ }
+ BucketAliasEnum::Local {
+ local_alias,
+ access_key_id,
+ } => {
+ helper
+ .set_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
+ .await?;
+ }
+ }
- bucket_info_results(garage, bucket_id).await
+ Ok(AddBucketAliasResponse(
+ bucket_info_results(garage, bucket_id).await?,
+ ))
+ }
}
-pub async fn handle_local_alias_bucket(
- garage: &Arc<Garage>,
- bucket_id: String,
- access_key_id: String,
- alias: String,
-) -> Result<Response<ResBody>, Error> {
- let bucket_id = parse_bucket_id(&bucket_id)?;
-
- let helper = garage.locked_helper().await;
-
- helper
- .set_local_bucket_alias(bucket_id, &access_key_id, &alias)
- .await?;
+#[async_trait]
+impl EndpointHandler for RemoveBucketAliasRequest {
+ type Response = RemoveBucketAliasResponse;
- bucket_info_results(garage, bucket_id).await
-}
+ async fn handle(self, garage: &Arc<Garage>) -> Result<RemoveBucketAliasResponse, Error> {
+ let bucket_id = parse_bucket_id(&self.bucket_id)?;
-pub async fn handle_local_unalias_bucket(
- garage: &Arc<Garage>,
- bucket_id: String,
- access_key_id: String,
- alias: String,
-) -> Result<Response<ResBody>, Error> {
- let bucket_id = parse_bucket_id(&bucket_id)?;
+ let helper = garage.locked_helper().await;
- let helper = garage.locked_helper().await;
-
- helper
- .unset_local_bucket_alias(bucket_id, &access_key_id, &alias)
- .await?;
+ match self.alias {
+ BucketAliasEnum::Global { global_alias } => {
+ helper
+ .unset_global_bucket_alias(bucket_id, &global_alias)
+ .await?;
+ }
+ BucketAliasEnum::Local {
+ local_alias,
+ access_key_id,
+ } => {
+ helper
+ .unset_local_bucket_alias(bucket_id, &access_key_id, &local_alias)
+ .await?;
+ }
+ }
- bucket_info_results(garage, bucket_id).await
+ Ok(RemoveBucketAliasResponse(
+ bucket_info_results(garage, bucket_id).await?,
+ ))
+ }
}
// ---- HELPER ----
diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs
index ffa0fa71..dc16bd50 100644
--- a/src/api/admin/cluster.rs
+++ b/src/api/admin/cluster.rs
@@ -1,9 +1,7 @@
use std::collections::HashMap;
-use std::net::SocketAddr;
use std::sync::Arc;
-use hyper::{body::Incoming as IncomingBody, Request, Response};
-use serde::{Deserialize, Serialize};
+use async_trait::async_trait;
use garage_util::crdt::*;
use garage_util::data::*;
@@ -12,158 +10,170 @@ use garage_rpc::layout;
use garage_model::garage::Garage;
-use garage_api_common::helpers::{json_ok_response, parse_json_body};
-
-use crate::api_server::ResBody;
+use crate::api::*;
use crate::error::*;
-
-pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
- let layout = garage.system.cluster_layout();
- let mut nodes = garage
- .system
- .get_known_nodes()
- .into_iter()
- .map(|i| {
- (
- i.id,
- NodeResp {
- id: hex::encode(i.id),
- addr: i.addr,
- hostname: i.status.hostname,
- is_up: i.is_up,
- last_seen_secs_ago: i.last_seen_secs_ago,
- data_partition: i
- .status
- .data_disk_avail
- .map(|(avail, total)| FreeSpaceResp {
- available: avail,
- total,
+use crate::EndpointHandler;
+
+#[async_trait]
+impl EndpointHandler for GetClusterStatusRequest {
+ type Response = GetClusterStatusResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<GetClusterStatusResponse, Error> {
+ let layout = garage.system.cluster_layout();
+ let mut nodes = garage
+ .system
+ .get_known_nodes()
+ .into_iter()
+ .map(|i| {
+ (
+ i.id,
+ NodeResp {
+ id: hex::encode(i.id),
+ addr: i.addr,
+ hostname: i.status.hostname,
+ is_up: i.is_up,
+ last_seen_secs_ago: i.last_seen_secs_ago,
+ data_partition: i.status.data_disk_avail.map(|(avail, total)| {
+ FreeSpaceResp {
+ available: avail,
+ total,
+ }
}),
- metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
- FreeSpaceResp {
- available: avail,
- total,
- }
- }),
- ..Default::default()
- },
- )
- })
- .collect::<HashMap<_, _>>();
-
- for (id, _, role) in layout.current().roles.items().iter() {
- if let layout::NodeRoleV(Some(r)) = role {
- let role = NodeRoleResp {
- id: hex::encode(id),
- zone: r.zone.to_string(),
- capacity: r.capacity,
- tags: r.tags.clone(),
- };
- match nodes.get_mut(id) {
- None => {
- nodes.insert(
- *id,
- NodeResp {
- id: hex::encode(id),
- role: Some(role),
- ..Default::default()
- },
- );
- }
- Some(n) => {
- n.role = Some(role);
- }
- }
- }
- }
+ metadata_partition: i.status.meta_disk_avail.map(|(avail, total)| {
+ FreeSpaceResp {
+ available: avail,
+ total,
+ }
+ }),
+ ..Default::default()
+ },
+ )
+ })
+ .collect::<HashMap<_, _>>();
- for ver in layout.versions().iter().rev().skip(1) {
- for (id, _, role) in ver.roles.items().iter() {
+ for (id, _, role) in layout.current().roles.items().iter() {
if let layout::NodeRoleV(Some(r)) = role {
- if r.capacity.is_some() {
- if let Some(n) = nodes.get_mut(id) {
- if n.role.is_none() {
- n.draining = true;
- }
- } else {
+ let role = NodeRoleResp {
+ id: hex::encode(id),
+ zone: r.zone.to_string(),
+ capacity: r.capacity,
+ tags: r.tags.clone(),
+ };
+ match nodes.get_mut(id) {
+ None => {
nodes.insert(
*id,
NodeResp {
id: hex::encode(id),
- draining: true,
+ role: Some(role),
..Default::default()
},
);
}
+ Some(n) => {
+ n.role = Some(role);
+ }
}
}
}
- }
-
- let mut nodes = nodes.into_values().collect::<Vec<_>>();
- nodes.sort_by(|x, y| x.id.cmp(&y.id));
- let res = GetClusterStatusResponse {
- node: hex::encode(garage.system.id),
- garage_version: garage_util::version::garage_version(),
- garage_features: garage_util::version::garage_features(),
- rust_version: garage_util::version::rust_version(),
- db_engine: garage.db.engine(),
- layout_version: layout.current().version,
- nodes,
- };
+ for ver in layout.versions().iter().rev().skip(1) {
+ for (id, _, role) in ver.roles.items().iter() {
+ if let layout::NodeRoleV(Some(r)) = role {
+ if r.capacity.is_some() {
+ if let Some(n) = nodes.get_mut(id) {
+ if n.role.is_none() {
+ n.draining = true;
+ }
+ } else {
+ nodes.insert(
+ *id,
+ NodeResp {
+ id: hex::encode(id),
+ draining: true,
+ ..Default::default()
+ },
+ );
+ }
+ }
+ }
+ }
+ }
- Ok(json_ok_response(&res)?)
+ let mut nodes = nodes.into_values().collect::<Vec<_>>();
+ nodes.sort_by(|x, y| x.id.cmp(&y.id));
+
+ Ok(GetClusterStatusResponse {
+ node: hex::encode(garage.system.id),
+ garage_version: garage_util::version::garage_version().to_string(),
+ garage_features: garage_util::version::garage_features()
+ .map(|features| features.iter().map(ToString::to_string).collect()),
+ rust_version: garage_util::version::rust_version().to_string(),
+ db_engine: garage.db.engine(),
+ layout_version: layout.current().version,
+ nodes,
+ })
+ }
}
-pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
- use garage_rpc::system::ClusterHealthStatus;
- let health = garage.system.health();
- let health = ClusterHealth {
- status: match health.status {
- ClusterHealthStatus::Healthy => "healthy",
- ClusterHealthStatus::Degraded => "degraded",
- ClusterHealthStatus::Unavailable => "unavailable",
- },
- known_nodes: health.known_nodes,
- connected_nodes: health.connected_nodes,
- storage_nodes: health.storage_nodes,
- storage_nodes_ok: health.storage_nodes_ok,
- partitions: health.partitions,
- partitions_quorum: health.partitions_quorum,
- partitions_all_ok: health.partitions_all_ok,
- };
- Ok(json_ok_response(&health)?)
+#[async_trait]
+impl EndpointHandler for GetClusterHealthRequest {
+ type Response = GetClusterHealthResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<GetClusterHealthResponse, Error> {
+ use garage_rpc::system::ClusterHealthStatus;
+ let health = garage.system.health();
+ let health = GetClusterHealthResponse {
+ status: match health.status {
+ ClusterHealthStatus::Healthy => "healthy",
+ ClusterHealthStatus::Degraded => "degraded",
+ ClusterHealthStatus::Unavailable => "unavailable",
+ }
+ .to_string(),
+ known_nodes: health.known_nodes,
+ connected_nodes: health.connected_nodes,
+ storage_nodes: health.storage_nodes,
+ storage_nodes_ok: health.storage_nodes_ok,
+ partitions: health.partitions,
+ partitions_quorum: health.partitions_quorum,
+ partitions_all_ok: health.partitions_all_ok,
+ };
+ Ok(health)
+ }
}
-pub async fn handle_connect_cluster_nodes(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
-
- let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
- .await
- .into_iter()
- .map(|r| match r {
- Ok(()) => ConnectClusterNodesResponse {
- success: true,
- error: None,
- },
- Err(e) => ConnectClusterNodesResponse {
- success: false,
- error: Some(format!("{}", e)),
- },
- })
- .collect::<Vec<_>>();
-
- Ok(json_ok_response(&res)?)
+#[async_trait]
+impl EndpointHandler for ConnectClusterNodesRequest {
+ type Response = ConnectClusterNodesResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<ConnectClusterNodesResponse, Error> {
+ let res = futures::future::join_all(self.0.iter().map(|node| garage.system.connect(node)))
+ .await
+ .into_iter()
+ .map(|r| match r {
+ Ok(()) => ConnectNodeResponse {
+ success: true,
+ error: None,
+ },
+ Err(e) => ConnectNodeResponse {
+ success: false,
+ error: Some(format!("{}", e)),
+ },
+ })
+ .collect::<Vec<_>>();
+ Ok(ConnectClusterNodesResponse(res))
+ }
}
-pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
- let res = format_cluster_layout(garage.system.cluster_layout().inner());
+#[async_trait]
+impl EndpointHandler for GetClusterLayoutRequest {
+ type Response = GetClusterLayoutResponse;
- Ok(json_ok_response(&res)?)
+ async fn handle(self, garage: &Arc<Garage>) -> Result<GetClusterLayoutResponse, Error> {
+ Ok(format_cluster_layout(
+ garage.system.cluster_layout().inner(),
+ ))
+ }
}
fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResponse {
@@ -213,199 +223,89 @@ fn format_cluster_layout(layout: &layout::LayoutHistory) -> GetClusterLayoutResp
// ----
-#[derive(Debug, Clone, Copy, Serialize)]
-#[serde(rename_all = "camelCase")]
-pub struct ClusterHealth {
- status: &'static str,
- known_nodes: usize,
- connected_nodes: usize,
- storage_nodes: usize,
- storage_nodes_ok: usize,
- partitions: usize,
- partitions_quorum: usize,
- partitions_all_ok: usize,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetClusterStatusResponse {
- node: String,
- garage_version: &'static str,
- garage_features: Option<&'static [&'static str]>,
- rust_version: &'static str,
- db_engine: String,
- layout_version: u64,
- nodes: Vec<NodeResp>,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct ApplyClusterLayoutResponse {
- message: Vec<String>,
- layout: GetClusterLayoutResponse,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct ConnectClusterNodesResponse {
- success: bool,
- error: Option<String>,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetClusterLayoutResponse {
- version: u64,
- roles: Vec<NodeRoleResp>,
- staged_role_changes: Vec<NodeRoleChange>,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct NodeRoleResp {
- id: String,
- zone: String,
- capacity: Option<u64>,
- tags: Vec<String>,
-}
-
-#[derive(Serialize, Default)]
-#[serde(rename_all = "camelCase")]
-struct FreeSpaceResp {
- available: u64,
- total: u64,
-}
-
-#[derive(Serialize, Default)]
-#[serde(rename_all = "camelCase")]
-struct NodeResp {
- id: String,
- role: Option<NodeRoleResp>,
- addr: Option<SocketAddr>,
- hostname: Option<String>,
- is_up: bool,
- last_seen_secs_ago: Option<u64>,
- draining: bool,
- #[serde(skip_serializing_if = "Option::is_none")]
- data_partition: Option<FreeSpaceResp>,
- #[serde(skip_serializing_if = "Option::is_none")]
- metadata_partition: Option<FreeSpaceResp>,
-}
-
// ---- update functions ----
-pub async fn handle_update_cluster_layout(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
-
- let mut layout = garage.system.cluster_layout().inner().clone();
-
- let mut roles = layout.current().roles.clone();
- roles.merge(&layout.staging.get().roles);
-
- for change in updates {
- let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
- let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
-
- let new_role = match change.action {
- NodeRoleChangeEnum::Remove { remove: true } => None,
- NodeRoleChangeEnum::Update {
- zone,
- capacity,
- tags,
- } => Some(layout::NodeRole {
- zone,
- capacity,
- tags,
- }),
- _ => return Err(Error::bad_request("Invalid layout change")),
- };
-
- layout
- .staging
- .get_mut()
- .roles
- .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
- }
-
- garage
- .system
- .layout_manager
- .update_cluster_layout(&layout)
- .await?;
+#[async_trait]
+impl EndpointHandler for UpdateClusterLayoutRequest {
+ type Response = UpdateClusterLayoutResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<UpdateClusterLayoutResponse, Error> {
+ let mut layout = garage.system.cluster_layout().inner().clone();
+
+ let mut roles = layout.current().roles.clone();
+ roles.merge(&layout.staging.get().roles);
+
+ for change in self.0 {
+ let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?;
+ let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?;
+
+ let new_role = match change.action {
+ NodeRoleChangeEnum::Remove { remove: true } => None,
+ NodeRoleChangeEnum::Update {
+ zone,
+ capacity,
+ tags,
+ } => Some(layout::NodeRole {
+ zone,
+ capacity,
+ tags,
+ }),
+ _ => return Err(Error::bad_request("Invalid layout change")),
+ };
- let res = format_cluster_layout(&layout);
- Ok(json_ok_response(&res)?)
-}
+ layout
+ .staging
+ .get_mut()
+ .roles
+ .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role)));
+ }
-pub async fn handle_apply_cluster_layout(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let param = parse_json_body::<ApplyLayoutRequest, _, Error>(req).await?;
-
- let layout = garage.system.cluster_layout().inner().clone();
- let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
-
- garage
- .system
- .layout_manager
- .update_cluster_layout(&layout)
- .await?;
-
- let res = ApplyClusterLayoutResponse {
- message: msg,
- layout: format_cluster_layout(&layout),
- };
- Ok(json_ok_response(&res)?)
-}
+ garage
+ .system
+ .layout_manager
+ .update_cluster_layout(&layout)
+ .await?;
-pub async fn handle_revert_cluster_layout(
- garage: &Arc<Garage>,
-) -> Result<Response<ResBody>, Error> {
- let layout = garage.system.cluster_layout().inner().clone();
- let layout = layout.revert_staged_changes()?;
- garage
- .system
- .layout_manager
- .update_cluster_layout(&layout)
- .await?;
-
- let res = format_cluster_layout(&layout);
- Ok(json_ok_response(&res)?)
+ let res = format_cluster_layout(&layout);
+ Ok(UpdateClusterLayoutResponse(res))
+ }
}
-// ----
-
-type UpdateClusterLayoutRequest = Vec<NodeRoleChange>;
+#[async_trait]
+impl EndpointHandler for ApplyClusterLayoutRequest {
+ type Response = ApplyClusterLayoutResponse;
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct ApplyLayoutRequest {
- version: u64,
-}
+ async fn handle(self, garage: &Arc<Garage>) -> Result<ApplyClusterLayoutResponse, Error> {
+ let layout = garage.system.cluster_layout().inner().clone();
+ let (layout, msg) = layout.apply_staged_changes(Some(self.version))?;
-// ----
+ garage
+ .system
+ .layout_manager
+ .update_cluster_layout(&layout)
+ .await?;
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct NodeRoleChange {
- id: String,
- #[serde(flatten)]
- action: NodeRoleChangeEnum,
+ Ok(ApplyClusterLayoutResponse {
+ message: msg,
+ layout: format_cluster_layout(&layout),
+ })
+ }
}
-#[derive(Serialize, Deserialize)]
-#[serde(untagged)]
-enum NodeRoleChangeEnum {
- #[serde(rename_all = "camelCase")]
- Remove { remove: bool },
- #[serde(rename_all = "camelCase")]
- Update {
- zone: String,
- capacity: Option<u64>,
- tags: Vec<String>,
- },
+#[async_trait]
+impl EndpointHandler for RevertClusterLayoutRequest {
+ type Response = RevertClusterLayoutResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<RevertClusterLayoutResponse, Error> {
+ let layout = garage.system.cluster_layout().inner().clone();
+ let layout = layout.revert_staged_changes()?;
+ garage
+ .system
+ .layout_manager
+ .update_cluster_layout(&layout)
+ .await?;
+
+ let res = format_cluster_layout(&layout);
+ Ok(RevertClusterLayoutResponse(res))
+ }
}
diff --git a/src/api/admin/error.rs b/src/api/admin/error.rs
index 201f9b40..3712ee7d 100644
--- a/src/api/admin/error.rs
+++ b/src/api/admin/error.rs
@@ -49,7 +49,7 @@ impl From<HelperError> for Error {
}
impl Error {
- fn code(&self) -> &'static str {
+ pub fn code(&self) -> &'static str {
match self {
Error::Common(c) => c.aws_code(),
Error::NoSuchAccessKey(_) => "NoSuchAccessKey",
diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs
index bebf3063..5b7de075 100644
--- a/src/api/admin/key.rs
+++ b/src/api/admin/key.rs
@@ -1,173 +1,156 @@
use std::collections::HashMap;
use std::sync::Arc;
-use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
-use serde::{Deserialize, Serialize};
+use async_trait::async_trait;
use garage_table::*;
use garage_model::garage::Garage;
use garage_model::key_table::*;
-use garage_api_common::helpers::*;
-
-use crate::api_server::ResBody;
+use crate::api::*;
use crate::error::*;
-
-pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
- let res = garage
- .key_table
- .get_range(
- &EmptyKey,
- None,
- Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
- 10000,
- EnumerationOrder::Forward,
- )
- .await?
- .iter()
- .map(|k| ListKeyResultItem {
- id: k.key_id.to_string(),
- name: k.params().unwrap().name.get().clone(),
- })
- .collect::<Vec<_>>();
-
- Ok(json_ok_response(&res)?)
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct ListKeyResultItem {
- id: String,
- name: String,
-}
-
-pub async fn handle_get_key_info(
- garage: &Arc<Garage>,
- id: Option<String>,
- search: Option<String>,
- show_secret_key: bool,
-) -> Result<Response<ResBody>, Error> {
- let key = if let Some(id) = id {
- garage.key_helper().get_existing_key(&id).await?
- } else if let Some(search) = search {
- garage
- .key_helper()
- .get_existing_matching_key(&search)
+use crate::EndpointHandler;
+
+#[async_trait]
+impl EndpointHandler for ListKeysRequest {
+ type Response = ListKeysResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<ListKeysResponse, Error> {
+ let res = garage
+ .key_table
+ .get_range(
+ &EmptyKey,
+ None,
+ Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)),
+ 10000,
+ EnumerationOrder::Forward,
+ )
.await?
- } else {
- unreachable!();
- };
+ .iter()
+ .map(|k| ListKeysResponseItem {
+ id: k.key_id.to_string(),
+ name: k.params().unwrap().name.get().clone(),
+ })
+ .collect::<Vec<_>>();
- key_info_results(garage, key, show_secret_key).await
+ Ok(ListKeysResponse(res))
+ }
}
-pub async fn handle_create_key(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
-
- let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
- garage.key_table.insert(&key).await?;
+#[async_trait]
+impl EndpointHandler for GetKeyInfoRequest {
+ type Response = GetKeyInfoResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<GetKeyInfoResponse, Error> {
+ let key = match (self.id, self.search) {
+ (Some(id), None) => garage.key_helper().get_existing_key(&id).await?,
+ (None, Some(search)) => {
+ garage
+ .key_helper()
+ .get_existing_matching_key(&search)
+ .await?
+ }
+ _ => {
+ return Err(Error::bad_request(
+ "Either id or search must be provided (but not both)",
+ ));
+ }
+ };
- key_info_results(garage, key, true).await
+ Ok(key_info_results(garage, key, self.show_secret_key).await?)
+ }
}
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct CreateKeyRequest {
- name: Option<String>,
-}
+#[async_trait]
+impl EndpointHandler for CreateKeyRequest {
+ type Response = CreateKeyResponse;
-pub async fn handle_import_key(
- garage: &Arc<Garage>,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<CreateKeyResponse, Error> {
+ let key = Key::new(self.name.as_deref().unwrap_or("Unnamed key"));
+ garage.key_table.insert(&key).await?;
- let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
- if prev_key.is_some() {
- return Err(Error::KeyAlreadyExists(req.access_key_id.to_string()));
+ Ok(CreateKeyResponse(
+ key_info_results(garage, key, true).await?,
+ ))
}
+}
- let imported_key = Key::import(
- &req.access_key_id,
- &req.secret_access_key,
- req.name.as_deref().unwrap_or("Imported key"),
- )
- .ok_or_bad_request("Invalid key format")?;
- garage.key_table.insert(&imported_key).await?;
+#[async_trait]
+impl EndpointHandler for ImportKeyRequest {
+ type Response = ImportKeyResponse;
- key_info_results(garage, imported_key, false).await
-}
+ async fn handle(self, garage: &Arc<Garage>) -> Result<ImportKeyResponse, Error> {
+ let prev_key = garage.key_table.get(&EmptyKey, &self.access_key_id).await?;
+ if prev_key.is_some() {
+ return Err(Error::KeyAlreadyExists(self.access_key_id.to_string()));
+ }
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct ImportKeyRequest {
- access_key_id: String,
- secret_access_key: String,
- name: Option<String>,
+ let imported_key = Key::import(
+ &self.access_key_id,
+ &self.secret_access_key,
+ self.name.as_deref().unwrap_or("Imported key"),
+ )
+ .ok_or_bad_request("Invalid key format")?;
+ garage.key_table.insert(&imported_key).await?;
+
+ Ok(ImportKeyResponse(
+ key_info_results(garage, imported_key, false).await?,
+ ))
+ }
}
-pub async fn handle_update_key(
- garage: &Arc<Garage>,
- id: String,
- req: Request<IncomingBody>,
-) -> Result<Response<ResBody>, Error> {
- let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
+#[async_trait]
+impl EndpointHandler for UpdateKeyRequest {
+ type Response = UpdateKeyResponse;
- let mut key = garage.key_helper().get_existing_key(&id).await?;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<UpdateKeyResponse, Error> {
+ let mut key = garage.key_helper().get_existing_key(&self.id).await?;
- let key_state = key.state.as_option_mut().unwrap();
+ let key_state = key.state.as_option_mut().unwrap();
- if let Some(new_name) = req.name {
- key_state.name.update(new_name);
- }
- if let Some(allow) = req.allow {
- if allow.create_bucket {
- key_state.allow_create_bucket.update(true);
+ if let Some(new_name) = self.body.name {
+ key_state.name.update(new_name);
}
- }
- if let Some(deny) = req.deny {
- if deny.create_bucket {
- key_state.allow_create_bucket.update(false);
+ if let Some(allow) = self.body.allow {
+ if allow.create_bucket {
+ key_state.allow_create_bucket.update(true);
+ }
+ }
+ if let Some(deny) = self.body.deny {
+ if deny.create_bucket {
+ key_state.allow_create_bucket.update(false);
+ }
}
- }
- garage.key_table.insert(&key).await?;
+ garage.key_table.insert(&key).await?;
- key_info_results(garage, key, false).await
+ Ok(UpdateKeyResponse(
+ key_info_results(garage, key, false).await?,
+ ))
+ }
}
-#[derive(Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct UpdateKeyRequest {
- name: Option<String>,
- allow: Option<KeyPerm>,
- deny: Option<KeyPerm>,
-}
+#[async_trait]
+impl EndpointHandler for DeleteKeyRequest {
+ type Response = DeleteKeyResponse;
-pub async fn handle_delete_key(
- garage: &Arc<Garage>,
- id: String,
-) -> Result<Response<ResBody>, Error> {
- let helper = garage.locked_helper().await;
+ async fn handle(self, garage: &Arc<Garage>) -> Result<DeleteKeyResponse, Error> {
+ let helper = garage.locked_helper().await;
- let mut key = helper.key().get_existing_key(&id).await?;
+ let mut key = helper.key().get_existing_key(&self.id).await?;
- helper.delete_key(&mut key).await?;
+ helper.delete_key(&mut key).await?;
- Ok(Response::builder()
- .status(StatusCode::NO_CONTENT)
- .body(empty_body())?)
+ Ok(DeleteKeyResponse)
+ }
}
async fn key_info_results(
garage: &Arc<Garage>,
key: Key,
show_secret: bool,
-) -> Result<Response<ResBody>, Error> {
+) -> Result<GetKeyInfoResponse, Error> {
let mut relevant_buckets = HashMap::new();
let key_state = key.state.as_option().unwrap();
@@ -193,7 +176,7 @@ async fn key_info_results(
}
}
- let res = GetKeyInfoResult {
+ let res = GetKeyInfoResponse {
name: key_state.name.get().clone(),
access_key_id: key.key_id.clone(),
secret_access_key: if show_secret {
@@ -208,7 +191,7 @@ async fn key_info_results(
.into_values()
.map(|bucket| {
let state = bucket.state.as_option().unwrap();
- KeyInfoBucketResult {
+ KeyInfoBucketResponse {
id: hex::encode(bucket.id),
global_aliases: state
.aliases
@@ -238,43 +221,5 @@ async fn key_info_results(
.collect::<Vec<_>>(),
};
- Ok(json_ok_response(&res)?)
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct GetKeyInfoResult {
- name: String,
- access_key_id: String,
- #[serde(skip_serializing_if = "is_default")]
- secret_access_key: Option<String>,
- permissions: KeyPerm,
- buckets: Vec<KeyInfoBucketResult>,
-}
-
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct KeyPerm {
- #[serde(default)]
- create_bucket: bool,
-}
-
-#[derive(Serialize)]
-#[serde(rename_all = "camelCase")]
-struct KeyInfoBucketResult {
- id: String,
- global_aliases: Vec<String>,
- local_aliases: Vec<String>,
- permissions: ApiBucketKeyPerm,
-}
-
-#[derive(Serialize, Deserialize, Default)]
-#[serde(rename_all = "camelCase")]
-pub(crate) struct ApiBucketKeyPerm {
- #[serde(default)]
- pub(crate) read: bool,
- #[serde(default)]
- pub(crate) write: bool,
- #[serde(default)]
- pub(crate) owner: bool,
+ Ok(res)
}
diff --git a/src/api/admin/lib.rs b/src/api/admin/lib.rs
index 599e9b44..31b3874d 100644
--- a/src/api/admin/lib.rs
+++ b/src/api/admin/lib.rs
@@ -3,9 +3,33 @@ extern crate tracing;
pub mod api_server;
mod error;
+mod macros;
+
+pub mod api;
mod router_v0;
mod router_v1;
+mod router_v2;
mod bucket;
mod cluster;
mod key;
+mod special;
+
+use std::sync::Arc;
+
+use async_trait::async_trait;
+
+use garage_model::garage::Garage;
+
+pub enum Authorization {
+ None,
+ MetricsToken,
+ AdminToken,
+}
+
+#[async_trait]
+pub trait EndpointHandler {
+ type Response;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<Self::Response, error::Error>;
+}
diff --git a/src/api/admin/macros.rs b/src/api/admin/macros.rs
new file mode 100644
index 00000000..9521616e
--- /dev/null
+++ b/src/api/admin/macros.rs
@@ -0,0 +1,94 @@
+macro_rules! admin_endpoints {
+ [
+ $(@special $special_endpoint:ident,)*
+ $($endpoint:ident,)*
+ ] => {
+ paste! {
+ #[derive(Debug, Clone, Serialize, Deserialize)]
+ pub enum AdminApiRequest {
+ $(
+ $special_endpoint( [<$special_endpoint Request>] ),
+ )*
+ $(
+ $endpoint( [<$endpoint Request>] ),
+ )*
+ }
+
+ #[derive(Debug, Clone, Serialize)]
+ #[serde(untagged)]
+ pub enum AdminApiResponse {
+ $(
+ $endpoint( [<$endpoint Response>] ),
+ )*
+ }
+
+ #[derive(Debug, Clone, Serialize, Deserialize)]
+ pub enum TaggedAdminApiResponse {
+ $(
+ $endpoint( [<$endpoint Response>] ),
+ )*
+ }
+
+ impl AdminApiRequest {
+ pub fn name(&self) -> &'static str {
+ match self {
+ $(
+ Self::$special_endpoint(_) => stringify!($special_endpoint),
+ )*
+ $(
+ Self::$endpoint(_) => stringify!($endpoint),
+ )*
+ }
+ }
+ }
+
+ impl AdminApiResponse {
+ pub fn tagged(self) -> TaggedAdminApiResponse {
+ match self {
+ $(
+ Self::$endpoint(res) => TaggedAdminApiResponse::$endpoint(res),
+ )*
+ }
+ }
+ }
+
+ $(
+ impl From< [< $endpoint Request >] > for AdminApiRequest {
+ fn from(req: [< $endpoint Request >]) -> AdminApiRequest {
+ AdminApiRequest::$endpoint(req)
+ }
+ }
+
+ impl TryFrom<TaggedAdminApiResponse> for [< $endpoint Response >] {
+ type Error = TaggedAdminApiResponse;
+ fn try_from(resp: TaggedAdminApiResponse) -> Result< [< $endpoint Response >], TaggedAdminApiResponse> {
+ match resp {
+ TaggedAdminApiResponse::$endpoint(v) => Ok(v),
+ x => Err(x),
+ }
+ }
+ }
+ )*
+
+ #[async_trait]
+ impl EndpointHandler for AdminApiRequest {
+ type Response = AdminApiResponse;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<AdminApiResponse, Error> {
+ Ok(match self {
+ $(
+ AdminApiRequest::$special_endpoint(_) => panic!(
+ concat!(stringify!($special_endpoint), " needs to go through a special handler")
+ ),
+ )*
+ $(
+ AdminApiRequest::$endpoint(req) => AdminApiResponse::$endpoint(req.handle(garage).await?),
+ )*
+ })
+ }
+ }
+ }
+ };
+}
+
+pub(crate) use admin_endpoints;
diff --git a/src/api/admin/router_v1.rs b/src/api/admin/router_v1.rs
index 0b4901ea..138a801d 100644
--- a/src/api/admin/router_v1.rs
+++ b/src/api/admin/router_v1.rs
@@ -7,12 +7,6 @@ use garage_api_common::router_macros::*;
use crate::error::*;
use crate::router_v0;
-pub enum Authorization {
- None,
- MetricsToken,
- AdminToken,
-}
-
router_match! {@func
/// List of all Admin API endpoints.
@@ -211,15 +205,6 @@ impl Endpoint {
))),
}
}
- /// Get the kind of authorization which is required to perform the operation.
- pub fn authorization_type(&self) -> Authorization {
- match self {
- Self::Health => Authorization::None,
- Self::CheckDomain => Authorization::None,
- Self::Metrics => Authorization::MetricsToken,
- _ => Authorization::AdminToken,
- }
- }
}
generateQueryParameters! {
diff --git a/src/api/admin/router_v2.rs b/src/api/admin/router_v2.rs
new file mode 100644
index 00000000..b36bca34
--- /dev/null
+++ b/src/api/admin/router_v2.rs
@@ -0,0 +1,251 @@
+use std::borrow::Cow;
+
+use hyper::body::Incoming as IncomingBody;
+use hyper::{Method, Request};
+use paste::paste;
+
+use garage_api_common::helpers::*;
+use garage_api_common::router_macros::*;
+
+use crate::api::*;
+use crate::error::*;
+use crate::router_v1;
+use crate::Authorization;
+
+impl AdminApiRequest {
+ /// Determine which S3 endpoint a request is for using the request, and a bucket which was
+ /// possibly extracted from the Host header.
+ /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets
+ pub async fn from_request(req: Request<IncomingBody>) -> Result<Self, Error> {
+ let uri = req.uri().clone();
+ let path = uri.path();
+ let query = uri.query();
+
+ let method = req.method().clone();
+
+ let mut query = QueryParameters::from_query(query.unwrap_or_default())?;
+
+ let res = router_match!(@gen_path_parser_v2 (&method, path, "/v2/", query, req) [
+ @special OPTIONS _ => Options (),
+ @special GET "/check" => CheckDomain (query::domain),
+ @special GET "/health" => Health (),
+ @special GET "/metrics" => Metrics (),
+ // Cluster endpoints
+ GET GetClusterStatus (),
+ GET GetClusterHealth (),
+ POST ConnectClusterNodes (body),
+ // Layout endpoints
+ GET GetClusterLayout (),
+ POST UpdateClusterLayout (body),
+ POST ApplyClusterLayout (body),
+ POST RevertClusterLayout (),
+ // API key endpoints
+ GET GetKeyInfo (query_opt::id, query_opt::search, parse_default(false)::show_secret_key),
+ POST UpdateKey (body_field, query::id),
+ POST CreateKey (body),
+ POST ImportKey (body),
+ POST DeleteKey (query::id),
+ GET ListKeys (),
+ // Bucket endpoints
+ GET GetBucketInfo (query_opt::id, query_opt::global_alias, query_opt::search),
+ GET ListBuckets (),
+ POST CreateBucket (body),
+ POST DeleteBucket (query::id),
+ POST UpdateBucket (body_field, query::id),
+ // Bucket-key permissions
+ POST AllowBucketKey (body),
+ POST DenyBucketKey (body),
+ // Bucket aliases
+ POST AddBucketAlias (body),
+ POST RemoveBucketAlias (body),
+ ]);
+
+ if let Some(message) = query.nonempty_message() {
+ debug!("Unused query parameter: {}", message)
+ }
+
+ Ok(res)
+ }
+
+ /// Some endpoints work exactly the same in their v2/ version as they did in their v1/ version.
+ /// For these endpoints, we can convert a v1/ call to its equivalent as if it was made using
+ /// its v2/ URL.
+ pub async fn from_v1(
+ v1_endpoint: router_v1::Endpoint,
+ req: Request<IncomingBody>,
+ ) -> Result<Self, Error> {
+ use router_v1::Endpoint;
+
+ match v1_endpoint {
+ Endpoint::GetClusterStatus => {
+ Ok(AdminApiRequest::GetClusterStatus(GetClusterStatusRequest))
+ }
+ Endpoint::GetClusterHealth => {
+ Ok(AdminApiRequest::GetClusterHealth(GetClusterHealthRequest))
+ }
+ Endpoint::ConnectClusterNodes => {
+ let req = parse_json_body::<ConnectClusterNodesRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::ConnectClusterNodes(req))
+ }
+
+ // Layout
+ Endpoint::GetClusterLayout => {
+ Ok(AdminApiRequest::GetClusterLayout(GetClusterLayoutRequest))
+ }
+ Endpoint::UpdateClusterLayout => {
+ let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::UpdateClusterLayout(updates))
+ }
+ Endpoint::ApplyClusterLayout => {
+ let param = parse_json_body::<ApplyClusterLayoutRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::ApplyClusterLayout(param))
+ }
+ Endpoint::RevertClusterLayout => Ok(AdminApiRequest::RevertClusterLayout(
+ RevertClusterLayoutRequest,
+ )),
+
+ // Keys
+ Endpoint::ListKeys => Ok(AdminApiRequest::ListKeys(ListKeysRequest)),
+ Endpoint::GetKeyInfo {
+ id,
+ search,
+ show_secret_key,
+ } => {
+ let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false);
+ Ok(AdminApiRequest::GetKeyInfo(GetKeyInfoRequest {
+ id,
+ search,
+ show_secret_key,
+ }))
+ }
+ Endpoint::CreateKey => {
+ let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::CreateKey(req))
+ }
+ Endpoint::ImportKey => {
+ let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::ImportKey(req))
+ }
+ Endpoint::UpdateKey { id } => {
+ let body = parse_json_body::<UpdateKeyRequestBody, _, Error>(req).await?;
+ Ok(AdminApiRequest::UpdateKey(UpdateKeyRequest { id, body }))
+ }
+
+ // DeleteKey semantics changed:
+ // - in v1/ : HTTP DELETE => HTTP 204 No Content
+ // - in v2/ : HTTP POST => HTTP 200 Ok
+ // Endpoint::DeleteKey { id } => Ok(AdminApiRequest::DeleteKey(DeleteKeyRequest { id })),
+
+ // Buckets
+ Endpoint::ListBuckets => Ok(AdminApiRequest::ListBuckets(ListBucketsRequest)),
+ Endpoint::GetBucketInfo { id, global_alias } => {
+ Ok(AdminApiRequest::GetBucketInfo(GetBucketInfoRequest {
+ id,
+ global_alias,
+ search: None,
+ }))
+ }
+ Endpoint::CreateBucket => {
+ let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::CreateBucket(req))
+ }
+
+ // DeleteBucket semantics changed::
+ // - in v1/ : HTTP DELETE => HTTP 204 No Content
+ // - in v2/ : HTTP POST => HTTP 200 Ok
+ // Endpoint::DeleteBucket { id } => {
+ // Ok(AdminApiRequest::DeleteBucket(DeleteBucketRequest { id }))
+ // }
+ Endpoint::UpdateBucket { id } => {
+ let body = parse_json_body::<UpdateBucketRequestBody, _, Error>(req).await?;
+ Ok(AdminApiRequest::UpdateBucket(UpdateBucketRequest {
+ id,
+ body,
+ }))
+ }
+
+ // Bucket-key permissions
+ Endpoint::BucketAllowKey => {
+ let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::AllowBucketKey(AllowBucketKeyRequest(req)))
+ }
+ Endpoint::BucketDenyKey => {
+ let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
+ Ok(AdminApiRequest::DenyBucketKey(DenyBucketKeyRequest(req)))
+ }
+ // Bucket aliasing
+ Endpoint::GlobalAliasBucket { id, alias } => {
+ Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
+ bucket_id: id,
+ alias: BucketAliasEnum::Global {
+ global_alias: alias,
+ },
+ }))
+ }
+ Endpoint::GlobalUnaliasBucket { id, alias } => Ok(AdminApiRequest::RemoveBucketAlias(
+ RemoveBucketAliasRequest {
+ bucket_id: id,
+ alias: BucketAliasEnum::Global {
+ global_alias: alias,
+ },
+ },
+ )),
+ Endpoint::LocalAliasBucket {
+ id,
+ access_key_id,
+ alias,
+ } => Ok(AdminApiRequest::AddBucketAlias(AddBucketAliasRequest {
+ bucket_id: id,
+ alias: BucketAliasEnum::Local {
+ local_alias: alias,
+ access_key_id,
+ },
+ })),
+ Endpoint::LocalUnaliasBucket {
+ id,
+ access_key_id,
+ alias,
+ } => Ok(AdminApiRequest::RemoveBucketAlias(
+ RemoveBucketAliasRequest {
+ bucket_id: id,
+ alias: BucketAliasEnum::Local {
+ local_alias: alias,
+ access_key_id,
+ },
+ },
+ )),
+
+ // For endpoints that have different body content syntax, issue
+ // deprecation warning
+ _ => Err(Error::bad_request(format!(
+ "v1/ endpoint is no longer supported: {}",
+ v1_endpoint.name()
+ ))),
+ }
+ }
+
+ /// Get the kind of authorization which is required to perform the operation.
+ pub fn authorization_type(&self) -> Authorization {
+ match self {
+ Self::Options(_) => Authorization::None,
+ Self::Health(_) => Authorization::None,
+ Self::CheckDomain(_) => Authorization::None,
+ Self::Metrics(_) => Authorization::MetricsToken,
+ _ => Authorization::AdminToken,
+ }
+ }
+}
+
+generateQueryParameters! {
+ keywords: [],
+ fields: [
+ "domain" => domain,
+ "format" => format,
+ "id" => id,
+ "search" => search,
+ "globalAlias" => global_alias,
+ "alias" => alias,
+ "accessKeyId" => access_key_id,
+ "showSecretKey" => show_secret_key
+ ]
+}
diff --git a/src/api/admin/special.rs b/src/api/admin/special.rs
new file mode 100644
index 00000000..0b26fe32
--- /dev/null
+++ b/src/api/admin/special.rs
@@ -0,0 +1,133 @@
+use std::sync::Arc;
+
+use async_trait::async_trait;
+
+use http::header::{
+ ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW,
+};
+use hyper::{Response, StatusCode};
+
+use garage_model::garage::Garage;
+use garage_rpc::system::ClusterHealthStatus;
+
+use garage_api_common::helpers::*;
+
+use crate::api::{CheckDomainRequest, HealthRequest, OptionsRequest};
+use crate::api_server::ResBody;
+use crate::error::*;
+use crate::EndpointHandler;
+
+#[async_trait]
+impl EndpointHandler for OptionsRequest {
+ type Response = Response<ResBody>;
+
+ async fn handle(self, _garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
+ Ok(Response::builder()
+ .status(StatusCode::OK)
+ .header(ALLOW, "OPTIONS,GET,POST")
+ .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS,GET,POST")
+ .header(ACCESS_CONTROL_ALLOW_HEADERS, "authorization,content-type")
+ .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
+ .body(empty_body())?)
+ }
+}
+
+#[async_trait]
+impl EndpointHandler for CheckDomainRequest {
+ type Response = Response<ResBody>;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
+ if check_domain(garage, &self.domain).await? {
+ Ok(Response::builder()
+ .status(StatusCode::OK)
+ .body(string_body(format!(
+ "Domain '{}' is managed by Garage",
+ self.domain
+ )))?)
+ } else {
+ Err(Error::bad_request(format!(
+ "Domain '{}' is not managed by Garage",
+ self.domain
+ )))
+ }
+ }
+}
+
+async fn check_domain(garage: &Arc<Garage>, domain: &str) -> Result<bool, Error> {
+ // Resolve bucket from domain name, inferring if the website must be activated for the
+ // domain to be valid.
+ let (bucket_name, must_check_website) = if let Some(bname) = garage
+ .config
+ .s3_api
+ .root_domain
+ .as_ref()
+ .and_then(|rd| host_to_bucket(domain, rd))
+ {
+ (bname.to_string(), false)
+ } else if let Some(bname) = garage
+ .config
+ .s3_web
+ .as_ref()
+ .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str()))
+ {
+ (bname.to_string(), true)
+ } else {
+ (domain.to_string(), true)
+ };
+
+ let bucket_id = match garage
+ .bucket_helper()
+ .resolve_global_bucket_name(&bucket_name)
+ .await?
+ {
+ Some(bucket_id) => bucket_id,
+ None => return Ok(false),
+ };
+
+ if !must_check_website {
+ return Ok(true);
+ }
+
+ let bucket = garage
+ .bucket_helper()
+ .get_existing_bucket(bucket_id)
+ .await?;
+
+ let bucket_state = bucket.state.as_option().unwrap();
+ let bucket_website_config = bucket_state.website_config.get();
+
+ match bucket_website_config {
+ Some(_v) => Ok(true),
+ None => Ok(false),
+ }
+}
+
+#[async_trait]
+impl EndpointHandler for HealthRequest {
+ type Response = Response<ResBody>;
+
+ async fn handle(self, garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
+ let health = garage.system.health();
+
+ let (status, status_str) = match health.status {
+ ClusterHealthStatus::Healthy => (StatusCode::OK, "Garage is fully operational"),
+ ClusterHealthStatus::Degraded => (
+ StatusCode::OK,
+ "Garage is operational but some storage nodes are unavailable",
+ ),
+ ClusterHealthStatus::Unavailable => (
+ StatusCode::SERVICE_UNAVAILABLE,
+ "Quorum is not available for some/all partitions, reads and writes will fail",
+ ),
+ };
+ let status_str = format!(
+ "{}\nConsult the full health check API endpoint at /v2/GetClusterHealth for more details\n",
+ status_str
+ );
+
+ Ok(Response::builder()
+ .status(status)
+ .header(http::header::CONTENT_TYPE, "text/plain")
+ .body(string_body(status_str))?)
+ }
+}