From 382e74c798263d042b1c6ca3788c866a8c69c4f4 Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 24 May 2022 12:16:39 +0200 Subject: First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex Co-committed-by: Alex --- src/api/admin/api_server.rs | 199 ++++++++++++++++ src/api/admin/bucket.rs | 549 ++++++++++++++++++++++++++++++++++++++++++++ src/api/admin/cluster.rs | 198 ++++++++++++++++ src/api/admin/error.rs | 96 ++++++++ src/api/admin/key.rs | 264 +++++++++++++++++++++ src/api/admin/mod.rs | 7 + src/api/admin/router.rs | 149 ++++++++++++ 7 files changed, 1462 insertions(+) create mode 100644 src/api/admin/api_server.rs create mode 100644 src/api/admin/bucket.rs create mode 100644 src/api/admin/cluster.rs create mode 100644 src/api/admin/error.rs create mode 100644 src/api/admin/key.rs create mode 100644 src/api/admin/mod.rs create mode 100644 src/api/admin/router.rs (limited to 'src/api/admin') diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs new file mode 100644 index 00000000..57e3e5cf --- /dev/null +++ b/src/api/admin/api_server.rs @@ -0,0 +1,199 @@ +use std::sync::Arc; + +use async_trait::async_trait; + +use futures::future::Future; +use http::header::{ + ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW, CONTENT_TYPE, +}; +use hyper::{Body, Request, Response}; + +use opentelemetry::trace::{SpanRef, Tracer}; +use opentelemetry_prometheus::PrometheusExporter; +use prometheus::{Encoder, TextEncoder}; + +use garage_model::garage::Garage; +use garage_util::error::Error as GarageError; + +use crate::generic_server::*; + +use crate::admin::bucket::*; +use crate::admin::cluster::*; +use crate::admin::error::*; +use crate::admin::key::*; +use crate::admin::router::{Authorization, Endpoint}; + +pub struct AdminApiServer { + garage: Arc, + exporter: PrometheusExporter, + metrics_token: Option, + admin_token: Option, +} + +impl AdminApiServer { + pub fn new(garage: Arc) -> Self { + let exporter = opentelemetry_prometheus::exporter().init(); + let cfg = &garage.config.admin; + let metrics_token = cfg + .metrics_token + .as_ref() + .map(|tok| format!("Bearer {}", tok)); + let admin_token = cfg + .admin_token + .as_ref() + .map(|tok| format!("Bearer {}", tok)); + Self { + garage, + exporter, + metrics_token, + admin_token, + } + } + + pub async fn run(self, shutdown_signal: impl Future) -> Result<(), GarageError> { + if let Some(bind_addr) = self.garage.config.admin.api_bind_addr { + let region = self.garage.config.s3_api.s3_region.clone(); + ApiServer::new(region, self) + .run_server(bind_addr, shutdown_signal) + .await + } else { + Ok(()) + } + } + + fn handle_options(&self, _req: &Request) -> Result, Error> { + Ok(Response::builder() + .status(204) + .header(ALLOW, "OPTIONS, GET, POST") + .header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST") + .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .body(Body::empty())?) + } + + fn handle_metrics(&self) -> Result, Error> { + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + let tracer = opentelemetry::global::tracer("garage"); + let metric_families = tracer.in_span("admin/gather_metrics", |_| { + self.exporter.registry().gather() + }); + + encoder + .encode(&metric_families, &mut buffer) + .ok_or_internal_error("Could not serialize metrics")?; + + Ok(Response::builder() + .status(200) + .header(CONTENT_TYPE, encoder.format_type()) + .body(Body::from(buffer))?) + } +} + +#[async_trait] +impl ApiHandler for AdminApiServer { + const API_NAME: &'static str = "admin"; + const API_NAME_DISPLAY: &'static str = "Admin"; + + type Endpoint = Endpoint; + type Error = Error; + + fn parse_endpoint(&self, req: &Request) -> Result { + Endpoint::from_request(req) + } + + async fn handle( + &self, + req: Request, + endpoint: Endpoint, + ) -> Result, Error> { + let expected_auth_header = + match endpoint.authorization_type() { + Authorization::MetricsToken => self.metrics_token.as_ref(), + Authorization::AdminToken => match &self.admin_token { + None => return Err(Error::forbidden( + "Admin token isn't configured, admin API access is disabled for security.", + )), + Some(t) => Some(t), + }, + }; + + if let Some(h) = expected_auth_header { + match req.headers().get("Authorization") { + None => return Err(Error::forbidden("Authorization token must be provided")), + Some(v) => { + let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false); + if !authorized { + return Err(Error::forbidden("Invalid authorization token provided")); + } + } + } + } + + match endpoint { + Endpoint::Options => self.handle_options(&req), + Endpoint::Metrics => self.handle_metrics(), + Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await, + Endpoint::ConnectClusterNodes => handle_connect_cluster_nodes(&self.garage, req).await, + // Layout + Endpoint::GetClusterLayout => handle_get_cluster_layout(&self.garage).await, + Endpoint::UpdateClusterLayout => handle_update_cluster_layout(&self.garage, req).await, + Endpoint::ApplyClusterLayout => handle_apply_cluster_layout(&self.garage, req).await, + Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await, + // Keys + Endpoint::ListKeys => handle_list_keys(&self.garage).await, + Endpoint::GetKeyInfo { id, search } => { + handle_get_key_info(&self.garage, id, search).await + } + Endpoint::CreateKey => handle_create_key(&self.garage, req).await, + Endpoint::ImportKey => handle_import_key(&self.garage, req).await, + Endpoint::UpdateKey { id } => handle_update_key(&self.garage, id, req).await, + Endpoint::DeleteKey { id } => handle_delete_key(&self.garage, id).await, + // Buckets + Endpoint::ListBuckets => handle_list_buckets(&self.garage).await, + Endpoint::GetBucketInfo { id, global_alias } => { + handle_get_bucket_info(&self.garage, id, global_alias).await + } + Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await, + Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await, + Endpoint::PutBucketWebsite { id } => { + handle_put_bucket_website(&self.garage, id, req).await + } + Endpoint::DeleteBucketWebsite { id } => { + handle_delete_bucket_website(&self.garage, id).await + } + // Bucket-key permissions + Endpoint::BucketAllowKey => { + handle_bucket_change_key_perm(&self.garage, req, true).await + } + Endpoint::BucketDenyKey => { + handle_bucket_change_key_perm(&self.garage, req, false).await + } + // Bucket aliasing + Endpoint::GlobalAliasBucket { id, alias } => { + handle_global_alias_bucket(&self.garage, id, alias).await + } + Endpoint::GlobalUnaliasBucket { id, alias } => { + handle_global_unalias_bucket(&self.garage, id, alias).await + } + Endpoint::LocalAliasBucket { + id, + access_key_id, + alias, + } => handle_local_alias_bucket(&self.garage, id, access_key_id, alias).await, + Endpoint::LocalUnaliasBucket { + id, + access_key_id, + alias, + } => handle_local_unalias_bucket(&self.garage, id, access_key_id, alias).await, + } + } +} + +impl ApiEndpoint for Endpoint { + fn name(&self) -> &'static str { + Endpoint::name(self) + } + + fn add_span_attributes(&self, _span: SpanRef<'_>) {} +} diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs new file mode 100644 index 00000000..849d28ac --- /dev/null +++ b/src/api/admin/bucket.rs @@ -0,0 +1,549 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::Error as GarageError; +use garage_util::time::*; + +use garage_table::*; + +use garage_model::bucket_alias_table::*; +use garage_model::bucket_table::*; +use garage_model::garage::Garage; +use garage_model::permission::*; + +use crate::admin::error::*; +use crate::admin::key::ApiBucketKeyPerm; +use crate::common_error::CommonError; +use crate::helpers::parse_json_body; + +pub async fn handle_list_buckets(garage: &Arc) -> Result, Error> { + let buckets = garage + .bucket_table + .get_range( + &EmptyKey, + None, + Some(DeletedFilter::NotDeleted), + 10000, + EnumerationOrder::Forward, + ) + .await?; + + let res = buckets + .into_iter() + .map(|b| { + let state = b.state.as_option().unwrap(); + ListBucketResultItem { + id: hex::encode(b.id), + global_aliases: state + .aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + local_aliases: state + .local_aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|((k, n), _, _)| BucketLocalAlias { + access_key_id: k.to_string(), + alias: n.to_string(), + }) + .collect::>(), + } + }) + .collect::>(); + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ListBucketResultItem { + id: String, + global_aliases: Vec, + local_aliases: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct BucketLocalAlias { + access_key_id: String, + alias: String, +} + +pub async fn handle_get_bucket_info( + garage: &Arc, + id: Option, + global_alias: Option, +) -> Result, Error> { + let bucket_id = match (id, global_alias) { + (Some(id), None) => parse_bucket_id(&id)?, + (None, Some(ga)) => garage + .bucket_helper() + .resolve_global_bucket_name(&ga) + .await? + .ok_or_else(|| HelperError::NoSuchBucket(ga.to_string()))?, + _ => { + return Err(Error::bad_request( + "Either id or globalAlias must be provided (but not both)", + )); + } + }; + + bucket_info_results(garage, bucket_id).await +} + +async fn bucket_info_results( + garage: &Arc, + bucket_id: Uuid, +) -> Result, Error> { + let bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let mut relevant_keys = HashMap::new(); + for (k, _) in bucket + .state + .as_option() + .unwrap() + .authorized_keys + .items() + .iter() + { + if let Some(key) = garage + .key_table + .get(&EmptyKey, k) + .await? + .filter(|k| !k.is_deleted()) + { + if !key.state.is_deleted() { + relevant_keys.insert(k.clone(), key); + } + } + } + for ((k, _), _, _) in bucket + .state + .as_option() + .unwrap() + .local_aliases + .items() + .iter() + { + if relevant_keys.contains_key(k) { + continue; + } + if let Some(key) = garage.key_table.get(&EmptyKey, k).await? { + if !key.state.is_deleted() { + relevant_keys.insert(k.clone(), key); + } + } + } + + let state = bucket.state.as_option().unwrap(); + + let res = + GetBucketInfoResult { + id: hex::encode(&bucket.id), + global_aliases: state + .aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + website_access: state.website_config.get().is_some(), + website_config: state.website_config.get().clone().map(|wsc| { + GetBucketInfoWebsiteResult { + index_document: wsc.index_document, + error_document: wsc.error_document, + } + }), + keys: relevant_keys + .into_iter() + .map(|(_, key)| { + let p = key.state.as_option().unwrap(); + GetBucketInfoKey { + access_key_id: key.key_id, + name: p.name.get().to_string(), + permissions: p + .authorized_buckets + .get(&bucket.id) + .map(|p| ApiBucketKeyPerm { + read: p.allow_read, + write: p.allow_write, + owner: p.allow_owner, + }) + .unwrap_or_default(), + bucket_local_aliases: p + .local_aliases + .items() + .iter() + .filter(|(_, _, b)| *b == Some(bucket.id)) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + } + }) + .collect::>(), + }; + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoResult { + id: String, + global_aliases: Vec, + website_access: bool, + #[serde(default)] + website_config: Option, + keys: Vec, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoWebsiteResult { + index_document: String, + error_document: Option, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetBucketInfoKey { + access_key_id: String, + name: String, + permissions: ApiBucketKeyPerm, + bucket_local_aliases: Vec, +} + +pub async fn handle_create_bucket( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + if let Some(ga) = &req.global_alias { + if !is_valid_bucket_name(ga) { + return Err(Error::bad_request(format!( + "{}: {}", + ga, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + if let Some(alias) = garage.bucket_alias_table.get(&EmptyKey, ga).await? { + if alias.state.get().is_some() { + return Err(CommonError::BucketAlreadyExists.into()); + } + } + } + + if let Some(la) = &req.local_alias { + if !is_valid_bucket_name(&la.alias) { + return Err(Error::bad_request(format!( + "{}: {}", + la.alias, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let key = garage + .key_helper() + .get_existing_key(&la.access_key_id) + .await?; + let state = key.state.as_option().unwrap(); + if matches!(state.local_aliases.get(&la.alias), Some(_)) { + return Err(Error::bad_request("Local alias already exists")); + } + } + + let bucket = Bucket::new(); + garage.bucket_table.insert(&bucket).await?; + + if let Some(ga) = &req.global_alias { + garage + .bucket_helper() + .set_global_bucket_alias(bucket.id, ga) + .await?; + } + + if let Some(la) = &req.local_alias { + garage + .bucket_helper() + .set_local_bucket_alias(bucket.id, &la.access_key_id, &la.alias) + .await?; + + if la.allow.read || la.allow.write || la.allow.owner { + garage + .bucket_helper() + .set_bucket_key_permissions( + bucket.id, + &la.access_key_id, + BucketKeyPerm { + timestamp: now_msec(), + allow_read: la.allow.read, + allow_write: la.allow.write, + allow_owner: la.allow.owner, + }, + ) + .await?; + } + } + + bucket_info_results(garage, bucket.id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateBucketRequest { + global_alias: Option, + local_alias: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct CreateBucketLocalAlias { + access_key_id: String, + alias: String, + #[serde(default)] + allow: ApiBucketKeyPerm, +} + +pub async fn handle_delete_bucket( + garage: &Arc, + id: String, +) -> Result, Error> { + let helper = garage.bucket_helper(); + + let bucket_id = parse_bucket_id(&id)?; + + let mut bucket = helper.get_existing_bucket(bucket_id).await?; + let state = bucket.state.as_option().unwrap(); + + // Check bucket is empty + if !helper.is_bucket_empty(bucket_id).await? { + return Err(CommonError::BucketNotEmpty.into()); + } + + // --- done checking, now commit --- + // 1. delete authorization from keys that had access + for (key_id, perm) in bucket.authorized_keys() { + if perm.is_any() { + helper + .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) + .await?; + } + } + // 2. delete all local aliases + for ((key_id, alias), _, active) in state.local_aliases.items().iter() { + if *active { + helper + .unset_local_bucket_alias(bucket.id, key_id, alias) + .await?; + } + } + // 3. delete all global aliases + for (alias, _, active) in state.aliases.items().iter() { + if *active { + helper.purge_global_bucket_alias(bucket.id, alias).await?; + } + } + + // 4. delete bucket + bucket.state = Deletable::delete(); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +// ---- BUCKET WEBSITE CONFIGURATION ---- + +pub async fn handle_put_bucket_website( + garage: &Arc, + id: String, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + let bucket_id = parse_bucket_id(&id)?; + + let mut bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let state = bucket.state.as_option_mut().unwrap(); + state.website_config.update(Some(WebsiteConfig { + index_document: req.index_document, + error_document: req.error_document, + })); + + garage.bucket_table.insert(&bucket).await?; + + bucket_info_results(garage, bucket_id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct PutBucketWebsiteRequest { + index_document: String, + #[serde(default)] + error_document: Option, +} + +pub async fn handle_delete_bucket_website( + garage: &Arc, + id: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&id)?; + + let mut bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + + let state = bucket.state.as_option_mut().unwrap(); + state.website_config.update(None); + + garage.bucket_table.insert(&bucket).await?; + + bucket_info_results(garage, bucket_id).await +} + +// ---- BUCKET/KEY PERMISSIONS ---- + +pub async fn handle_bucket_change_key_perm( + garage: &Arc, + req: Request, + new_perm_flag: bool, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let bucket_id = parse_bucket_id(&req.bucket_id)?; + + let bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let state = bucket.state.as_option().unwrap(); + + let key = garage + .key_helper() + .get_existing_key(&req.access_key_id) + .await?; + + let mut perm = state + .authorized_keys + .get(&key.key_id) + .cloned() + .unwrap_or(BucketKeyPerm::NO_PERMISSIONS); + + if req.permissions.read { + perm.allow_read = new_perm_flag; + } + if req.permissions.write { + perm.allow_write = new_perm_flag; + } + if req.permissions.owner { + perm.allow_owner = new_perm_flag; + } + + garage + .bucket_helper() + .set_bucket_key_permissions(bucket.id, &key.key_id, perm) + .await?; + + bucket_info_results(garage, bucket.id).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct BucketKeyPermChangeRequest { + bucket_id: String, + access_key_id: String, + permissions: ApiBucketKeyPerm, +} + +// ---- BUCKET ALIASES ---- + +pub async fn handle_global_alias_bucket( + garage: &Arc, + bucket_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + garage + .bucket_helper() + .set_global_bucket_alias(bucket_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_global_unalias_bucket( + garage: &Arc, + bucket_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + garage + .bucket_helper() + .unset_global_bucket_alias(bucket_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_local_alias_bucket( + garage: &Arc, + bucket_id: String, + access_key_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + garage + .bucket_helper() + .set_local_bucket_alias(bucket_id, &access_key_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +pub async fn handle_local_unalias_bucket( + garage: &Arc, + bucket_id: String, + access_key_id: String, + alias: String, +) -> Result, Error> { + let bucket_id = parse_bucket_id(&bucket_id)?; + + garage + .bucket_helper() + .unset_local_bucket_alias(bucket_id, &access_key_id, &alias) + .await?; + + bucket_info_results(garage, bucket_id).await +} + +// ---- HELPER ---- + +fn parse_bucket_id(id: &str) -> Result { + let id_hex = hex::decode(&id).ok_or_bad_request("Invalid bucket id")?; + Ok(Uuid::try_from(&id_hex).ok_or_bad_request("Invalid bucket id")?) +} diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs new file mode 100644 index 00000000..3401be42 --- /dev/null +++ b/src/api/admin/cluster.rs @@ -0,0 +1,198 @@ +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::Error as GarageError; + +use garage_rpc::layout::*; + +use garage_model::garage::Garage; + +use crate::admin::error::*; +use crate::helpers::parse_json_body; + +pub async fn handle_get_cluster_status(garage: &Arc) -> Result, Error> { + let res = GetClusterStatusResponse { + node: hex::encode(garage.system.id), + garage_version: garage.system.garage_version(), + known_nodes: garage + .system + .get_known_nodes() + .into_iter() + .map(|i| { + ( + hex::encode(i.id), + KnownNodeResp { + addr: i.addr, + is_up: i.is_up, + last_seen_secs_ago: i.last_seen_secs_ago, + hostname: i.status.hostname, + }, + ) + }) + .collect(), + layout: get_cluster_layout(garage), + }; + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +pub async fn handle_connect_cluster_nodes( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::>(req).await?; + + let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node))) + .await + .into_iter() + .map(|r| match r { + Ok(()) => ConnectClusterNodesResponse { + success: true, + error: None, + }, + Err(e) => ConnectClusterNodesResponse { + success: false, + error: Some(format!("{}", e)), + }, + }) + .collect::>(); + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +pub async fn handle_get_cluster_layout(garage: &Arc) -> Result, Error> { + let res = get_cluster_layout(garage); + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +fn get_cluster_layout(garage: &Arc) -> GetClusterLayoutResponse { + let layout = garage.system.get_cluster_layout(); + + GetClusterLayoutResponse { + version: layout.version, + roles: layout + .roles + .items() + .iter() + .filter(|(_, _, v)| v.0.is_some()) + .map(|(k, _, v)| (hex::encode(k), v.0.clone())) + .collect(), + staged_role_changes: layout + .staging + .items() + .iter() + .filter(|(k, _, v)| layout.roles.get(k) != Some(v)) + .map(|(k, _, v)| (hex::encode(k), v.0.clone())) + .collect(), + } +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetClusterStatusResponse { + node: String, + garage_version: &'static str, + known_nodes: HashMap, + layout: GetClusterLayoutResponse, +} + +#[derive(Serialize)] +struct ConnectClusterNodesResponse { + success: bool, + error: Option, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetClusterLayoutResponse { + version: u64, + roles: HashMap>, + staged_role_changes: HashMap>, +} + +#[derive(Serialize)] +struct KnownNodeResp { + addr: SocketAddr, + is_up: bool, + last_seen_secs_ago: Option, + hostname: String, +} + +pub async fn handle_update_cluster_layout( + garage: &Arc, + req: Request, +) -> Result, Error> { + let updates = parse_json_body::(req).await?; + + let mut layout = garage.system.get_cluster_layout(); + + let mut roles = layout.roles.clone(); + roles.merge(&layout.staging); + + for (node, role) in updates { + let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?; + let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; + + layout + .staging + .merge(&roles.update_mutator(node, NodeRoleV(role))); + } + + garage.system.update_cluster_layout(&layout).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +pub async fn handle_apply_cluster_layout( + garage: &Arc, + req: Request, +) -> Result, Error> { + let param = parse_json_body::(req).await?; + + let layout = garage.system.get_cluster_layout(); + let layout = layout.apply_staged_changes(Some(param.version))?; + garage.system.update_cluster_layout(&layout).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +pub async fn handle_revert_cluster_layout( + garage: &Arc, + req: Request, +) -> Result, Error> { + let param = parse_json_body::(req).await?; + + let layout = garage.system.get_cluster_layout(); + let layout = layout.revert_staged_changes(Some(param.version))?; + garage.system.update_cluster_layout(&layout).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +type UpdateClusterLayoutRequest = HashMap>; + +#[derive(Deserialize)] +struct ApplyRevertLayoutRequest { + version: u64, +} diff --git a/src/api/admin/error.rs b/src/api/admin/error.rs new file mode 100644 index 00000000..c4613cb3 --- /dev/null +++ b/src/api/admin/error.rs @@ -0,0 +1,96 @@ +use err_derive::Error; +use hyper::header::HeaderValue; +use hyper::{Body, HeaderMap, StatusCode}; + +pub use garage_model::helper::error::Error as HelperError; + +use crate::common_error::CommonError; +pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError}; +use crate::generic_server::ApiError; +use crate::helpers::CustomApiErrorBody; + +/// Errors of this crate +#[derive(Debug, Error)] +pub enum Error { + #[error(display = "{}", _0)] + /// Error from common error + Common(CommonError), + + // Category: cannot process + /// The API access key does not exist + #[error(display = "Access key not found: {}", _0)] + NoSuchAccessKey(String), + + /// In Import key, the key already exists + #[error( + display = "Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", + _0 + )] + KeyAlreadyExists(String), +} + +impl From for Error +where + CommonError: From, +{ + fn from(err: T) -> Self { + Error::Common(CommonError::from(err)) + } +} + +impl CommonErrorDerivative for Error {} + +impl From for Error { + fn from(err: HelperError) -> Self { + match err { + HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)), + HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)), + HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)), + HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)), + HelperError::NoSuchAccessKey(n) => Self::NoSuchAccessKey(n), + } + } +} + +impl Error { + fn code(&self) -> &'static str { + match self { + Error::Common(c) => c.aws_code(), + Error::NoSuchAccessKey(_) => "NoSuchAccessKey", + Error::KeyAlreadyExists(_) => "KeyAlreadyExists", + } + } +} + +impl ApiError for Error { + /// Get the HTTP status code that best represents the meaning of the error for the client + fn http_status_code(&self) -> StatusCode { + match self { + Error::Common(c) => c.http_status_code(), + Error::NoSuchAccessKey(_) => StatusCode::NOT_FOUND, + Error::KeyAlreadyExists(_) => StatusCode::CONFLICT, + } + } + + fn add_http_headers(&self, _header_map: &mut HeaderMap) { + // nothing + } + + fn http_body(&self, garage_region: &str, path: &str) -> Body { + let error = CustomApiErrorBody { + code: self.code().to_string(), + message: format!("{}", self), + path: path.to_string(), + region: garage_region.to_string(), + }; + Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| { + r#" +{ + "code": "InternalError", + "message": "JSON encoding of error failed" +} + "# + .into() + })) + } +} diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs new file mode 100644 index 00000000..f30b5dbb --- /dev/null +++ b/src/api/admin/key.rs @@ -0,0 +1,264 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + +use garage_util::error::Error as GarageError; + +use garage_table::*; + +use garage_model::garage::Garage; +use garage_model::key_table::*; + +use crate::admin::error::*; +use crate::helpers::parse_json_body; + +pub async fn handle_list_keys(garage: &Arc) -> Result, Error> { + let res = garage + .key_table + .get_range( + &EmptyKey, + None, + Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), + 10000, + EnumerationOrder::Forward, + ) + .await? + .iter() + .map(|k| ListKeyResultItem { + id: k.key_id.to_string(), + name: k.params().unwrap().name.get().clone(), + }) + .collect::>(); + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +#[derive(Serialize)] +struct ListKeyResultItem { + id: String, + name: String, +} + +pub async fn handle_get_key_info( + garage: &Arc, + id: Option, + search: Option, +) -> Result, Error> { + let key = if let Some(id) = id { + garage.key_helper().get_existing_key(&id).await? + } else if let Some(search) = search { + garage + .key_helper() + .get_existing_matching_key(&search) + .await? + } else { + unreachable!(); + }; + + key_info_results(garage, key).await +} + +pub async fn handle_create_key( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let key = Key::new(&req.name); + garage.key_table.insert(&key).await?; + + key_info_results(garage, key).await +} + +#[derive(Deserialize)] +struct CreateKeyRequest { + name: String, +} + +pub async fn handle_import_key( + garage: &Arc, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?; + if prev_key.is_some() { + return Err(Error::KeyAlreadyExists(req.access_key_id.to_string())); + } + + let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name); + garage.key_table.insert(&imported_key).await?; + + key_info_results(garage, imported_key).await +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct ImportKeyRequest { + access_key_id: String, + secret_access_key: String, + name: String, +} + +pub async fn handle_update_key( + garage: &Arc, + id: String, + req: Request, +) -> Result, Error> { + let req = parse_json_body::(req).await?; + + let mut key = garage.key_helper().get_existing_key(&id).await?; + + let key_state = key.state.as_option_mut().unwrap(); + + if let Some(new_name) = req.name { + key_state.name.update(new_name); + } + if let Some(allow) = req.allow { + if allow.create_bucket { + key_state.allow_create_bucket.update(true); + } + } + if let Some(deny) = req.deny { + if deny.create_bucket { + key_state.allow_create_bucket.update(false); + } + } + + garage.key_table.insert(&key).await?; + + key_info_results(garage, key).await +} + +#[derive(Deserialize)] +struct UpdateKeyRequest { + name: Option, + allow: Option, + deny: Option, +} + +pub async fn handle_delete_key(garage: &Arc, id: String) -> Result, Error> { + let mut key = garage.key_helper().get_existing_key(&id).await?; + + key.state.as_option().unwrap(); + + garage.key_helper().delete_key(&mut key).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +async fn key_info_results(garage: &Arc, key: Key) -> Result, Error> { + let mut relevant_buckets = HashMap::new(); + + let key_state = key.state.as_option().unwrap(); + + for id in key_state + .authorized_buckets + .items() + .iter() + .map(|(id, _)| id) + .chain( + key_state + .local_aliases + .items() + .iter() + .filter_map(|(_, _, v)| v.as_ref()), + ) { + if !relevant_buckets.contains_key(id) { + if let Some(b) = garage.bucket_table.get(&EmptyKey, id).await? { + if b.state.as_option().is_some() { + relevant_buckets.insert(*id, b); + } + } + } + } + + let res = GetKeyInfoResult { + name: key_state.name.get().clone(), + access_key_id: key.key_id.clone(), + secret_access_key: key_state.secret_key.clone(), + permissions: KeyPerm { + create_bucket: *key_state.allow_create_bucket.get(), + }, + buckets: relevant_buckets + .into_iter() + .map(|(_, bucket)| { + let state = bucket.state.as_option().unwrap(); + KeyInfoBucketResult { + id: hex::encode(bucket.id), + global_aliases: state + .aliases + .items() + .iter() + .filter(|(_, _, a)| *a) + .map(|(n, _, _)| n.to_string()) + .collect::>(), + local_aliases: state + .local_aliases + .items() + .iter() + .filter(|((k, _), _, a)| *a && *k == key.key_id) + .map(|((_, n), _, _)| n.to_string()) + .collect::>(), + permissions: key_state + .authorized_buckets + .get(&bucket.id) + .map(|p| ApiBucketKeyPerm { + read: p.allow_read, + write: p.allow_write, + owner: p.allow_owner, + }) + .unwrap_or_default(), + } + }) + .collect::>(), + }; + + let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct GetKeyInfoResult { + name: String, + access_key_id: String, + secret_access_key: String, + permissions: KeyPerm, + buckets: Vec, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct KeyPerm { + #[serde(default)] + create_bucket: bool, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct KeyInfoBucketResult { + id: String, + global_aliases: Vec, + local_aliases: Vec, + permissions: ApiBucketKeyPerm, +} + +#[derive(Serialize, Deserialize, Default)] +pub(crate) struct ApiBucketKeyPerm { + #[serde(default)] + pub(crate) read: bool, + #[serde(default)] + pub(crate) write: bool, + #[serde(default)] + pub(crate) owner: bool, +} diff --git a/src/api/admin/mod.rs b/src/api/admin/mod.rs new file mode 100644 index 00000000..c4857c10 --- /dev/null +++ b/src/api/admin/mod.rs @@ -0,0 +1,7 @@ +pub mod api_server; +mod error; +mod router; + +mod bucket; +mod cluster; +mod key; diff --git a/src/api/admin/router.rs b/src/api/admin/router.rs new file mode 100644 index 00000000..93639873 --- /dev/null +++ b/src/api/admin/router.rs @@ -0,0 +1,149 @@ +use std::borrow::Cow; + +use hyper::{Method, Request}; + +use crate::admin::error::*; +use crate::router_macros::*; + +pub enum Authorization { + MetricsToken, + AdminToken, +} + +router_match! {@func + +/// List of all Admin API endpoints. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Endpoint { + Options, + Metrics, + GetClusterStatus, + ConnectClusterNodes, + // Layout + GetClusterLayout, + UpdateClusterLayout, + ApplyClusterLayout, + RevertClusterLayout, + // Keys + ListKeys, + CreateKey, + ImportKey, + GetKeyInfo { + id: Option, + search: Option, + }, + DeleteKey { + id: String, + }, + UpdateKey { + id: String, + }, + // Buckets + ListBuckets, + CreateBucket, + GetBucketInfo { + id: Option, + global_alias: Option, + }, + DeleteBucket { + id: String, + }, + PutBucketWebsite { + id: String, + }, + DeleteBucketWebsite { + id: String, + }, + // Bucket-Key Permissions + BucketAllowKey, + BucketDenyKey, + // Bucket aliases + GlobalAliasBucket { + id: String, + alias: String, + }, + GlobalUnaliasBucket { + id: String, + alias: String, + }, + LocalAliasBucket { + id: String, + access_key_id: String, + alias: String, + }, + LocalUnaliasBucket { + id: String, + access_key_id: String, + alias: String, + }, +}} + +impl Endpoint { + /// Determine which S3 endpoint a request is for using the request, and a bucket which was + /// possibly extracted from the Host header. + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets + pub fn from_request(req: &Request) -> Result { + let uri = req.uri(); + let path = uri.path(); + let query = uri.query(); + + let mut query = QueryParameters::from_query(query.unwrap_or_default())?; + + let res = router_match!(@gen_path_parser (req.method(), path, query) [ + OPTIONS _ => Options, + GET "/metrics" => Metrics, + GET "/v0/status" => GetClusterStatus, + POST "/v0/connect" => ConnectClusterNodes, + // Layout endpoints + GET "/v0/layout" => GetClusterLayout, + POST "/v0/layout" => UpdateClusterLayout, + POST "/v0/layout/apply" => ApplyClusterLayout, + POST "/v0/layout/revert" => RevertClusterLayout, + // API key endpoints + GET "/v0/key" if id => GetKeyInfo (query_opt::id, query_opt::search), + GET "/v0/key" if search => GetKeyInfo (query_opt::id, query_opt::search), + POST "/v0/key" if id => UpdateKey (query::id), + POST "/v0/key" => CreateKey, + POST "/v0/key/import" => ImportKey, + DELETE "/v0/key" if id => DeleteKey (query::id), + GET "/v0/key" => ListKeys, + // Bucket endpoints + GET "/v0/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v0/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v0/bucket" => ListBuckets, + POST "/v0/bucket" => CreateBucket, + DELETE "/v0/bucket" if id => DeleteBucket (query::id), + PUT "/v0/bucket/website" if id => PutBucketWebsite (query::id), + DELETE "/v0/bucket/website" if id => DeleteBucketWebsite (query::id), + // Bucket-key permissions + POST "/v0/bucket/allow" => BucketAllowKey, + POST "/v0/bucket/deny" => BucketDenyKey, + // Bucket aliases + PUT "/v0/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias), + DELETE "/v0/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias), + PUT "/v0/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias), + DELETE "/v0/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias), + ]); + + if let Some(message) = query.nonempty_message() { + debug!("Unused query parameter: {}", message) + } + + Ok(res) + } + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + match self { + Self::Metrics => Authorization::MetricsToken, + _ => Authorization::AdminToken, + } + } +} + +generateQueryParameters! { + "id" => id, + "search" => search, + "globalAlias" => global_alias, + "alias" => alias, + "accessKeyId" => access_key_id +} -- cgit v1.2.3 From ff06d3f0829464863e64ed55471f2caa13bed191 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 25 May 2022 17:05:56 +0200 Subject: Fix Content-Type headers for {admin,k2v} errors and admin responses Fix #315 --- src/api/admin/bucket.rs | 13 +++---------- src/api/admin/cluster.rs | 19 +++++-------------- src/api/admin/error.rs | 5 +++-- src/api/admin/key.rs | 14 +++----------- 4 files changed, 14 insertions(+), 37 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index 849d28ac..7f9a813f 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::Error as GarageError; use garage_util::time::*; use garage_table::*; @@ -19,7 +18,7 @@ use garage_model::permission::*; use crate::admin::error::*; use crate::admin::key::ApiBucketKeyPerm; use crate::common_error::CommonError; -use crate::helpers::parse_json_body; +use crate::helpers::{json_ok_response, parse_json_body}; pub async fn handle_list_buckets(garage: &Arc) -> Result, Error> { let buckets = garage @@ -60,10 +59,7 @@ pub async fn handle_list_buckets(garage: &Arc) -> Result, }) .collect::>(); - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + Ok(json_ok_response(&res)?) } #[derive(Serialize)] @@ -197,10 +193,7 @@ async fn bucket_info_results( .collect::>(), }; - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + Ok(json_ok_response(&res)?) } #[derive(Serialize)] diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 3401be42..6d01317d 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -7,14 +7,13 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_util::error::Error as GarageError; use garage_rpc::layout::*; use garage_model::garage::Garage; use crate::admin::error::*; -use crate::helpers::parse_json_body; +use crate::helpers::{json_ok_response, parse_json_body}; pub async fn handle_get_cluster_status(garage: &Arc) -> Result, Error> { let res = GetClusterStatusResponse { @@ -39,10 +38,7 @@ pub async fn handle_get_cluster_status(garage: &Arc) -> Result>(); - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + Ok(json_ok_response(&res)?) } pub async fn handle_get_cluster_layout(garage: &Arc) -> Result, Error> { let res = get_cluster_layout(garage); - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + + Ok(json_ok_response(&res)?) } fn get_cluster_layout(garage: &Arc) -> GetClusterLayoutResponse { diff --git a/src/api/admin/error.rs b/src/api/admin/error.rs index c4613cb3..ed1a07bd 100644 --- a/src/api/admin/error.rs +++ b/src/api/admin/error.rs @@ -72,8 +72,9 @@ impl ApiError for Error { } } - fn add_http_headers(&self, _header_map: &mut HeaderMap) { - // nothing + fn add_http_headers(&self, header_map: &mut HeaderMap) { + use hyper::header; + header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap()); } fn http_body(&self, garage_region: &str, path: &str) -> Body { diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs index f30b5dbb..2bbabb7b 100644 --- a/src/api/admin/key.rs +++ b/src/api/admin/key.rs @@ -4,15 +4,13 @@ use std::sync::Arc; use hyper::{Body, Request, Response, StatusCode}; use serde::{Deserialize, Serialize}; -use garage_util::error::Error as GarageError; - use garage_table::*; use garage_model::garage::Garage; use garage_model::key_table::*; use crate::admin::error::*; -use crate::helpers::parse_json_body; +use crate::helpers::{json_ok_response, parse_json_body}; pub async fn handle_list_keys(garage: &Arc) -> Result, Error> { let res = garage @@ -32,10 +30,7 @@ pub async fn handle_list_keys(garage: &Arc) -> Result, Er }) .collect::>(); - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + Ok(json_ok_response(&res)?) } #[derive(Serialize)] @@ -221,10 +216,7 @@ async fn key_info_results(garage: &Arc, key: Key) -> Result>(), }; - let resp_json = serde_json::to_string_pretty(&res).map_err(GarageError::from)?; - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(resp_json))?) + Ok(json_ok_response(&res)?) } #[derive(Serialize)] -- cgit v1.2.3 From b44d3fc796484a50cd6854f20c9b46e5fddedc9d Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 8 Jun 2022 10:01:44 +0200 Subject: Abstract database behind generic interface and implement alternative drivers (#322) - [x] Design interface - [x] Implement Sled backend - [x] Re-implement the SledCountedTree hack ~~on Sled backend~~ on all backends (i.e. over the abstraction) - [x] Convert Garage code to use generic interface - [x] Proof-read converted Garage code - [ ] Test everything well - [x] Implement sqlite backend - [x] Implement LMDB backend - [ ] (Implement Persy backend?) - [ ] (Implement other backends? (like RocksDB, ...)) - [x] Implement backend choice in config file and garage server module - [x] Add CLI for converting between DB formats - Exploit the new interface to put more things in transactions - [x] `.updated()` trigger on Garage tables Fix #284 **Bugs** - [x] When exporting sqlite, trees iterate empty?? - [x] LMDB doesn't work **Known issues for various back-ends** - Sled: - Eats all my RAM and also all my disk space - `.len()` has to traverse the whole table - Is actually quite slow on some operations - And is actually pretty bad code... - Sqlite: - Requires a lock to be taken on all operations. The lock is also taken when iterating on a table with `.iter()`, and the lock isn't released until the iterator is dropped. This means that we must be VERY carefull to not do anything else inside a `.iter()` loop or else we will have a deadlock! Most such cases have been eliminated from the Garage codebase, but there might still be some that remain. If your Garage-over-Sqlite seems to hang/freeze, this is the reason. - (adapter uses a bunch of unsafe code) - Heed (LMDB): - Not suited for 32-bit machines as it has to map the whole DB in memory. - (adpater uses a tiny bit of unsafe code) **My recommendation:** avoid 32-bit machines and use LMDB as much as possible. **Converting databases** is actually quite easy. For example from Sled to LMDB: ```bash cd src/db cargo run --features cli --bin convert -- -i path/to/garage/meta/db -a sled -o path/to/garage/meta/db.lmdb -b lmdb ``` Then, just add this to your `config.toml`: ```toml db_engine = "lmdb" ``` Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/322 Co-authored-by: Alex Co-committed-by: Alex --- src/api/admin/cluster.rs | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/api/admin') diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 6d01317d..4b7716a3 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -19,6 +19,7 @@ pub async fn handle_get_cluster_status(garage: &Arc) -> Result) -> GetClusterLayoutResponse { struct GetClusterStatusResponse { node: String, garage_version: &'static str, + db_engine: String, known_nodes: HashMap, layout: GetClusterLayoutResponse, } -- cgit v1.2.3 From 77e3fd6db2c9cd3a10889bd071e95ef839cfbefc Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 Jun 2022 20:20:28 +0200 Subject: improve internal item counter mechanisms and implement bucket quotas (#326) - [x] Refactoring of internal counting API - [x] Repair procedure for counters (it's an offline procedure!!!) - [x] New counter for objects in buckets - [x] Add quotas to buckets struct - [x] Add CLI to manage bucket quotas - [x] Add admin API to manage bucket quotas - [x] Apply quotas by adding checks on put operations - [x] Proof-read Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/326 Co-authored-by: Alex Co-committed-by: Alex --- src/api/admin/api_server.rs | 7 +--- src/api/admin/bucket.rs | 96 +++++++++++++++++++++++++++++++-------------- src/api/admin/router.rs | 8 +--- 3 files changed, 70 insertions(+), 41 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index 57e3e5cf..c3b16715 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -156,12 +156,7 @@ impl ApiHandler for AdminApiServer { } Endpoint::CreateBucket => handle_create_bucket(&self.garage, req).await, Endpoint::DeleteBucket { id } => handle_delete_bucket(&self.garage, id).await, - Endpoint::PutBucketWebsite { id } => { - handle_put_bucket_website(&self.garage, id, req).await - } - Endpoint::DeleteBucketWebsite { id } => { - handle_delete_bucket_website(&self.garage, id).await - } + Endpoint::UpdateBucket { id } => handle_update_bucket(&self.garage, id, req).await, // Bucket-key permissions Endpoint::BucketAllowKey => { handle_bucket_change_key_perm(&self.garage, req, true).await diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index 7f9a813f..ac8a8a40 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -14,6 +14,7 @@ use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_model::permission::*; +use garage_model::s3::object_table::*; use crate::admin::error::*; use crate::admin::key::ApiBucketKeyPerm; @@ -77,6 +78,13 @@ struct BucketLocalAlias { alias: String, } +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct ApiBucketQuotas { + max_size: Option, + max_objects: Option, +} + pub async fn handle_get_bucket_info( garage: &Arc, id: Option, @@ -108,6 +116,14 @@ async fn bucket_info_results( .get_existing_bucket(bucket_id) .await?; + let counters = garage + .object_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&garage.system.ring.borrow())) + .unwrap_or_default(); + let mut relevant_keys = HashMap::new(); for (k, _) in bucket .state @@ -148,6 +164,7 @@ async fn bucket_info_results( let state = bucket.state.as_option().unwrap(); + let quotas = state.quotas.get(); let res = GetBucketInfoResult { id: hex::encode(&bucket.id), @@ -191,6 +208,16 @@ async fn bucket_info_results( } }) .collect::>(), + objects: counters.get(OBJECTS).cloned().unwrap_or_default(), + bytes: counters.get(BYTES).cloned().unwrap_or_default(), + unfinshed_uploads: counters + .get(UNFINISHED_UPLOADS) + .cloned() + .unwrap_or_default(), + quotas: ApiBucketQuotas { + max_size: quotas.max_size, + max_objects: quotas.max_objects, + }, }; Ok(json_ok_response(&res)?) @@ -205,6 +232,10 @@ struct GetBucketInfoResult { #[serde(default)] website_config: Option, keys: Vec, + objects: i64, + bytes: i64, + unfinshed_uploads: i64, + quotas: ApiBucketQuotas, } #[derive(Serialize)] @@ -363,14 +394,12 @@ pub async fn handle_delete_bucket( .body(Body::empty())?) } -// ---- BUCKET WEBSITE CONFIGURATION ---- - -pub async fn handle_put_bucket_website( +pub async fn handle_update_bucket( garage: &Arc, id: String, req: Request, ) -> Result, Error> { - let req = parse_json_body::(req).await?; + let req = parse_json_body::(req).await?; let bucket_id = parse_bucket_id(&id)?; let mut bucket = garage @@ -379,10 +408,31 @@ pub async fn handle_put_bucket_website( .await?; let state = bucket.state.as_option_mut().unwrap(); - state.website_config.update(Some(WebsiteConfig { - index_document: req.index_document, - error_document: req.error_document, - })); + + if let Some(wa) = req.website_access { + if wa.enabled { + state.website_config.update(Some(WebsiteConfig { + index_document: wa.index_document.ok_or_bad_request( + "Please specify indexDocument when enabling website access.", + )?, + error_document: wa.error_document, + })); + } else { + if wa.index_document.is_some() || wa.error_document.is_some() { + return Err(Error::bad_request( + "Cannot specify indexDocument or errorDocument when disabling website access.", + )); + } + state.website_config.update(None); + } + } + + if let Some(q) = req.quotas { + state.quotas.update(BucketQuotas { + max_size: q.max_size, + max_objects: q.max_objects, + }); + } garage.bucket_table.insert(&bucket).await?; @@ -391,29 +441,17 @@ pub async fn handle_put_bucket_website( #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct PutBucketWebsiteRequest { - index_document: String, - #[serde(default)] - error_document: Option, +struct UpdateBucketRequest { + website_access: Option, + quotas: Option, } -pub async fn handle_delete_bucket_website( - garage: &Arc, - id: String, -) -> Result, Error> { - let bucket_id = parse_bucket_id(&id)?; - - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let state = bucket.state.as_option_mut().unwrap(); - state.website_config.update(None); - - garage.bucket_table.insert(&bucket).await?; - - bucket_info_results(garage, bucket_id).await +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct UpdateBucketWebsiteAccess { + enabled: bool, + index_document: Option, + error_document: Option, } // ---- BUCKET/KEY PERMISSIONS ---- diff --git a/src/api/admin/router.rs b/src/api/admin/router.rs index 93639873..3eee8b67 100644 --- a/src/api/admin/router.rs +++ b/src/api/admin/router.rs @@ -48,10 +48,7 @@ pub enum Endpoint { DeleteBucket { id: String, }, - PutBucketWebsite { - id: String, - }, - DeleteBucketWebsite { + UpdateBucket { id: String, }, // Bucket-Key Permissions @@ -113,8 +110,7 @@ impl Endpoint { GET "/v0/bucket" => ListBuckets, POST "/v0/bucket" => CreateBucket, DELETE "/v0/bucket" if id => DeleteBucket (query::id), - PUT "/v0/bucket/website" if id => PutBucketWebsite (query::id), - DELETE "/v0/bucket/website" if id => DeleteBucketWebsite (query::id), + PUT "/v0/bucket" if id => UpdateBucket (query::id), // Bucket-key permissions POST "/v0/bucket/allow" => BucketAllowKey, POST "/v0/bucket/deny" => BucketDenyKey, -- cgit v1.2.3 From ea36b9ff904a8300afb8fb1601cde88c915a810f Mon Sep 17 00:00:00 2001 From: Jakub Jirutka Date: Sun, 4 Sep 2022 00:43:48 +0200 Subject: Allow building without Prometheus exporter (/metrics endpoint) prometheus and opentelemetry-prometheus add 7 extra dependencies in total and increases the size of the garage binary by ~7 % (with fat LTO). --- src/api/admin/api_server.rs | 55 +++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 22 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index c3b16715..d871d4e2 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -3,13 +3,14 @@ use std::sync::Arc; use async_trait::async_trait; use futures::future::Future; -use http::header::{ - ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW, CONTENT_TYPE, -}; +use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW}; use hyper::{Body, Request, Response}; -use opentelemetry::trace::{SpanRef, Tracer}; +use opentelemetry::trace::SpanRef; + +#[cfg(feature = "metrics")] use opentelemetry_prometheus::PrometheusExporter; +#[cfg(feature = "metrics")] use prometheus::{Encoder, TextEncoder}; use garage_model::garage::Garage; @@ -25,6 +26,7 @@ use crate::admin::router::{Authorization, Endpoint}; pub struct AdminApiServer { garage: Arc, + #[cfg(feature = "metrics")] exporter: PrometheusExporter, metrics_token: Option, admin_token: Option, @@ -32,7 +34,6 @@ pub struct AdminApiServer { impl AdminApiServer { pub fn new(garage: Arc) -> Self { - let exporter = opentelemetry_prometheus::exporter().init(); let cfg = &garage.config.admin; let metrics_token = cfg .metrics_token @@ -44,7 +45,8 @@ impl AdminApiServer { .map(|tok| format!("Bearer {}", tok)); Self { garage, - exporter, + #[cfg(feature = "metrics")] + exporter: opentelemetry_prometheus::exporter().init(), metrics_token, admin_token, } @@ -71,22 +73,31 @@ impl AdminApiServer { } fn handle_metrics(&self) -> Result, Error> { - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - - let tracer = opentelemetry::global::tracer("garage"); - let metric_families = tracer.in_span("admin/gather_metrics", |_| { - self.exporter.registry().gather() - }); - - encoder - .encode(&metric_families, &mut buffer) - .ok_or_internal_error("Could not serialize metrics")?; - - Ok(Response::builder() - .status(200) - .header(CONTENT_TYPE, encoder.format_type()) - .body(Body::from(buffer))?) + #[cfg(feature = "metrics")] + { + use opentelemetry::trace::Tracer; + + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + let tracer = opentelemetry::global::tracer("garage"); + let metric_families = tracer.in_span("admin/gather_metrics", |_| { + self.exporter.registry().gather() + }); + + encoder + .encode(&metric_families, &mut buffer) + .ok_or_internal_error("Could not serialize metrics")?; + + Ok(Response::builder() + .status(200) + .header(http::header::CONTENT_TYPE, encoder.format_type()) + .body(Body::from(buffer))?) + } + #[cfg(not(feature = "metrics"))] + Err(Error::bad_request( + "Garage was built without the metrics feature".to_string(), + )) } } -- cgit v1.2.3 From db61f41030678c5756c844c8aa41a210c658769e Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 7 Sep 2022 11:59:56 +0200 Subject: Move GIT_VERSION injection later in build chain to reduce build times --- src/api/admin/cluster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/api/admin') diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 4b7716a3..8e6dfb3f 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -18,7 +18,7 @@ use crate::helpers::{json_ok_response, parse_json_body}; pub async fn handle_get_cluster_status(garage: &Arc) -> Result, Error> { let res = GetClusterStatusResponse { node: hex::encode(garage.system.id), - garage_version: garage.system.garage_version(), + garage_version: garage_model::version::garage_version(), db_engine: garage.db.engine(), known_nodes: garage .system -- cgit v1.2.3 From 28d86e76021bed674ca78684b9522cfb664a8ae2 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 7 Sep 2022 17:05:21 +0200 Subject: Report build features in garage --help --- src/api/admin/cluster.rs | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/api/admin') diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 8e6dfb3f..010382f2 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -19,6 +19,7 @@ pub async fn handle_get_cluster_status(garage: &Arc) -> Result) -> GetClusterLayoutResponse { struct GetClusterStatusResponse { node: String, garage_version: &'static str, + garage_features: Option<&'static [&'static str]>, db_engine: String, known_nodes: HashMap, layout: GetClusterLayoutResponse, -- cgit v1.2.3 From 2559f63e9bb58a66da70f33e852ebbd5f909876e Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 7 Sep 2022 17:54:16 +0200 Subject: Make all HTTP services optionnal --- src/api/admin/api_server.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index d871d4e2..fb0078cc 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::sync::Arc; use async_trait::async_trait; @@ -52,15 +53,15 @@ impl AdminApiServer { } } - pub async fn run(self, shutdown_signal: impl Future) -> Result<(), GarageError> { - if let Some(bind_addr) = self.garage.config.admin.api_bind_addr { - let region = self.garage.config.s3_api.s3_region.clone(); - ApiServer::new(region, self) - .run_server(bind_addr, shutdown_signal) - .await - } else { - Ok(()) - } + pub async fn run( + self, + bind_addr: SocketAddr, + shutdown_signal: impl Future, + ) -> Result<(), GarageError> { + let region = self.garage.config.s3_api.s3_region.clone(); + ApiServer::new(region, self) + .run_server(bind_addr, shutdown_signal) + .await } fn handle_options(&self, _req: &Request) -> Result, Error> { -- cgit v1.2.3 From ceb1f0229a9c8b9f8255b4a4c70272627f0c34d7 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 7 Sep 2022 18:36:46 +0200 Subject: Move version back into util --- src/api/admin/cluster.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 010382f2..99c6e332 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -18,8 +18,8 @@ use crate::helpers::{json_ok_response, parse_json_body}; pub async fn handle_get_cluster_status(garage: &Arc) -> Result, Error> { let res = GetClusterStatusResponse { node: hex::encode(garage.system.id), - garage_version: garage_model::version::garage_version(), - garage_features: garage_model::version::garage_features(), + garage_version: garage_util::version::garage_version(), + garage_features: garage_util::version::garage_features(), db_engine: garage.db.engine(), known_nodes: garage .system -- cgit v1.2.3 From 782630fc27b41b9ae58d1417cace2995c99856fc Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 20 Sep 2022 17:45:18 +0200 Subject: Initialize metrics exporter earlier (fix #389) --- src/api/admin/api_server.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'src/api/admin') diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index fb0078cc..0816bda1 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -34,7 +34,10 @@ pub struct AdminApiServer { } impl AdminApiServer { - pub fn new(garage: Arc) -> Self { + pub fn new( + garage: Arc, + #[cfg(feature = "metrics")] exporter: PrometheusExporter, + ) -> Self { let cfg = &garage.config.admin; let metrics_token = cfg .metrics_token @@ -47,7 +50,7 @@ impl AdminApiServer { Self { garage, #[cfg(feature = "metrics")] - exporter: opentelemetry_prometheus::exporter().init(), + exporter, metrics_token, admin_token, } -- cgit v1.2.3