diff options
Diffstat (limited to 'src')
93 files changed, 6662 insertions, 2266 deletions
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 3d6b527e..e8cbc1c8 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -28,7 +28,7 @@ crypto-common = "0.1" err-derive = "0.3" hex = "0.4" hmac = "0.12" -idna = "0.3" +idna = "0.4" tracing = "0.1" md-5 = "0.10" nom = "7.1" @@ -45,6 +45,7 @@ http = "0.2" httpdate = "1.0" http-range = "0.1" hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] } +hyperlocal = { version = "0.8.0", default-features = false, features = ["server"] } multer = "2.0" percent-encoding = "2.1.0" roxmltree = "0.18" diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index b0dfdfb7..0ce3ca0d 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::net::SocketAddr; use std::sync::Arc; use async_trait::async_trait; @@ -18,6 +17,7 @@ use prometheus::{Encoder, TextEncoder}; use garage_model::garage::Garage; use garage_rpc::system::ClusterHealthStatus; use garage_util::error::Error as GarageError; +use garage_util::socket_address::UnixOrTCPSocketAddress; use crate::generic_server::*; @@ -25,7 +25,9 @@ use crate::admin::bucket::*; use crate::admin::cluster::*; use crate::admin::error::*; use crate::admin::key::*; -use crate::admin::router::{Authorization, Endpoint}; +use crate::admin::router_v0; +use crate::admin::router_v1::{Authorization, Endpoint}; +use crate::helpers::host_to_bucket; pub struct AdminApiServer { garage: Arc<Garage>, @@ -60,12 +62,12 @@ impl AdminApiServer { pub async fn run( self, - bind_addr: SocketAddr, + bind_addr: UnixOrTCPSocketAddress, shutdown_signal: impl Future<Output = ()>, ) -> Result<(), GarageError> { let region = self.garage.config.s3_api.s3_region.clone(); ApiServer::new(region, self) - .run_server(bind_addr, shutdown_signal) + .run_server(bind_addr, Some(0o220), shutdown_signal) .await } @@ -78,10 +80,7 @@ impl AdminApiServer { .body(Body::empty())?) } - async fn handle_check_website_enabled( - &self, - req: Request<Body>, - ) -> Result<Response<Body>, Error> { + async fn handle_check_domain(&self, req: Request<Body>) -> Result<Response<Body>, Error> { let query_params: HashMap<String, String> = req .uri() .query() @@ -102,12 +101,56 @@ impl AdminApiServer { .get("domain") .ok_or_internal_error("Could not parse domain query string")?; - let bucket_id = self + if self.check_domain(domain).await? { + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(format!( + "Domain '{domain}' is managed by Garage" + )))?) + } else { + Err(Error::bad_request(format!( + "Domain '{domain}' is not managed by Garage" + ))) + } + } + + async fn check_domain(&self, domain: &str) -> Result<bool, Error> { + // Resolve bucket from domain name, inferring if the website must be activated for the + // domain to be valid. + let (bucket_name, must_check_website) = if let Some(bname) = self + .garage + .config + .s3_api + .root_domain + .as_ref() + .and_then(|rd| host_to_bucket(domain, rd)) + { + (bname.to_string(), false) + } else if let Some(bname) = self + .garage + .config + .s3_web + .as_ref() + .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str())) + { + (bname.to_string(), true) + } else { + (domain.to_string(), true) + }; + + let bucket_id = match self .garage .bucket_helper() - .resolve_global_bucket_name(domain) + .resolve_global_bucket_name(&bucket_name) .await? - .ok_or(HelperError::NoSuchBucket(domain.to_string()))?; + { + Some(bucket_id) => bucket_id, + None => return Ok(false), + }; + + if !must_check_website { + return Ok(true); + } let bucket = self .garage @@ -119,16 +162,8 @@ impl AdminApiServer { let bucket_website_config = bucket_state.website_config.get(); match bucket_website_config { - Some(_v) => { - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(format!( - "Bucket '{domain}' is authorized for website hosting" - )))?) - } - None => Err(Error::bad_request(format!( - "Bucket '{domain}' is not authorized for website hosting" - ))), + Some(_v) => Ok(true), + None => Ok(false), } } @@ -147,7 +182,7 @@ impl AdminApiServer { ), }; let status_str = format!( - "{}\nConsult the full health check API endpoint at /v0/health for more details\n", + "{}\nConsult the full health check API endpoint at /v1/health for more details\n", status_str ); @@ -195,7 +230,12 @@ impl ApiHandler for AdminApiServer { type Error = Error; fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> { - Endpoint::from_request(req) + if req.uri().path().starts_with("/v0/") { + let endpoint_v0 = router_v0::Endpoint::from_request(req)?; + Endpoint::from_v0(endpoint_v0) + } else { + Endpoint::from_request(req) + } } async fn handle( @@ -229,7 +269,7 @@ impl ApiHandler for AdminApiServer { match endpoint { Endpoint::Options => self.handle_options(&req), - Endpoint::CheckWebsiteEnabled => self.handle_check_website_enabled(req).await, + Endpoint::CheckDomain => self.handle_check_domain(req).await, Endpoint::Health => self.handle_health(), Endpoint::Metrics => self.handle_metrics(), Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await, @@ -242,8 +282,13 @@ impl ApiHandler for AdminApiServer { Endpoint::RevertClusterLayout => handle_revert_cluster_layout(&self.garage, req).await, // Keys Endpoint::ListKeys => handle_list_keys(&self.garage).await, - Endpoint::GetKeyInfo { id, search } => { - handle_get_key_info(&self.garage, id, search).await + Endpoint::GetKeyInfo { + id, + search, + show_secret_key, + } => { + let show_secret_key = show_secret_key.map(|x| x == "true").unwrap_or(false); + handle_get_key_info(&self.garage, id, search, show_secret_key).await } Endpoint::CreateKey => handle_create_key(&self.garage, req).await, Endpoint::ImportKey => handle_import_key(&self.garage, req).await, diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index f0a4a9e7..17f46c30 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -14,6 +14,7 @@ use garage_model::bucket_alias_table::*; use garage_model::bucket_table::*; use garage_model::garage::Garage; use garage_model::permission::*; +use garage_model::s3::mpu_table; use garage_model::s3::object_table::*; use crate::admin::error::*; @@ -124,6 +125,14 @@ async fn bucket_info_results( .map(|x| x.filtered_values(&garage.system.ring.borrow())) .unwrap_or_default(); + let mpu_counters = garage + .mpu_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&garage.system.ring.borrow())) + .unwrap_or_default(); + let mut relevant_keys = HashMap::new(); for (k, _) in bucket .state @@ -208,12 +217,12 @@ async fn bucket_info_results( } }) .collect::<Vec<_>>(), - objects: counters.get(OBJECTS).cloned().unwrap_or_default(), - bytes: counters.get(BYTES).cloned().unwrap_or_default(), - unfinished_uploads: counters - .get(UNFINISHED_UPLOADS) - .cloned() - .unwrap_or_default(), + objects: *counters.get(OBJECTS).unwrap_or(&0), + bytes: *counters.get(BYTES).unwrap_or(&0), + unfinished_uploads: *counters.get(UNFINISHED_UPLOADS).unwrap_or(&0), + unfinished_multipart_uploads: *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0), + unfinished_multipart_upload_parts: *mpu_counters.get(mpu_table::PARTS).unwrap_or(&0), + unfinished_multipart_upload_bytes: *mpu_counters.get(mpu_table::BYTES).unwrap_or(&0), quotas: ApiBucketQuotas { max_size: quotas.max_size, max_objects: quotas.max_objects, @@ -235,6 +244,9 @@ struct GetBucketInfoResult { objects: i64, bytes: i64, unfinished_uploads: i64, + unfinished_multipart_uploads: i64, + unfinished_multipart_upload_parts: i64, + unfinished_multipart_upload_bytes: i64, quotas: ApiBucketQuotas, } diff --git a/src/api/admin/cluster.rs b/src/api/admin/cluster.rs index 98bf2b5a..c8107b82 100644 --- a/src/api/admin/cluster.rs +++ b/src/api/admin/cluster.rs @@ -1,14 +1,13 @@ -use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; -use hyper::{Body, Request, Response, StatusCode}; +use hyper::{Body, Request, Response}; use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; -use garage_rpc::layout::*; +use garage_rpc::layout; use garage_model::garage::Garage; @@ -26,26 +25,37 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response< .system .get_known_nodes() .into_iter() - .map(|i| { - ( - hex::encode(i.id), - KnownNodeResp { - addr: i.addr, - is_up: i.is_up, - last_seen_secs_ago: i.last_seen_secs_ago, - hostname: i.status.hostname, - }, - ) + .map(|i| KnownNodeResp { + id: hex::encode(i.id), + addr: i.addr, + is_up: i.is_up, + last_seen_secs_ago: i.last_seen_secs_ago, + hostname: i.status.hostname, }) .collect(), - layout: get_cluster_layout(garage), + layout: format_cluster_layout(&garage.system.get_cluster_layout()), }; Ok(json_ok_response(&res)?) } pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { + use garage_rpc::system::ClusterHealthStatus; let health = garage.system.health(); + let health = ClusterHealth { + status: match health.status { + ClusterHealthStatus::Healthy => "healthy", + ClusterHealthStatus::Degraded => "degraded", + ClusterHealthStatus::Unavailable => "unavailable", + }, + known_nodes: health.known_nodes, + connected_nodes: health.connected_nodes, + storage_nodes: health.storage_nodes, + storage_nodes_ok: health.storage_nodes_ok, + partitions: health.partitions, + partitions_quorum: health.partitions_quorum, + partitions_all_ok: health.partitions_all_ok, + }; Ok(json_ok_response(&health)?) } @@ -74,33 +84,68 @@ pub async fn handle_connect_cluster_nodes( } pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { - let res = get_cluster_layout(garage); + let res = format_cluster_layout(&garage.system.get_cluster_layout()); Ok(json_ok_response(&res)?) } -fn get_cluster_layout(garage: &Arc<Garage>) -> GetClusterLayoutResponse { - let layout = garage.system.get_cluster_layout(); +fn format_cluster_layout(layout: &layout::ClusterLayout) -> GetClusterLayoutResponse { + let roles = layout + .roles + .items() + .iter() + .filter_map(|(k, _, v)| v.0.clone().map(|x| (k, x))) + .map(|(k, v)| NodeRoleResp { + id: hex::encode(k), + zone: v.zone.clone(), + capacity: v.capacity, + tags: v.tags.clone(), + }) + .collect::<Vec<_>>(); + + let staged_role_changes = layout + .staging_roles + .items() + .iter() + .filter(|(k, _, v)| layout.roles.get(k) != Some(v)) + .map(|(k, _, v)| match &v.0 { + None => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Remove { remove: true }, + }, + Some(r) => NodeRoleChange { + id: hex::encode(k), + action: NodeRoleChangeEnum::Update { + zone: r.zone.clone(), + capacity: r.capacity, + tags: r.tags.clone(), + }, + }, + }) + .collect::<Vec<_>>(); GetClusterLayoutResponse { version: layout.version, - roles: layout - .roles - .items() - .iter() - .filter(|(_, _, v)| v.0.is_some()) - .map(|(k, _, v)| (hex::encode(k), v.0.clone())) - .collect(), - staged_role_changes: layout - .staging - .items() - .iter() - .filter(|(k, _, v)| layout.roles.get(k) != Some(v)) - .map(|(k, _, v)| (hex::encode(k), v.0.clone())) - .collect(), + roles, + staged_role_changes, } } +// ---- + +#[derive(Debug, Clone, Copy, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ClusterHealth { + status: &'static str, + known_nodes: usize, + connected_nodes: usize, + storage_nodes: usize, + storage_nodes_ok: usize, + partitions: usize, + partitions_quorum: usize, + partitions_all_ok: usize, +} + #[derive(Serialize)] #[serde(rename_all = "camelCase")] struct GetClusterStatusResponse { @@ -109,11 +154,19 @@ struct GetClusterStatusResponse { garage_features: Option<&'static [&'static str]>, rust_version: &'static str, db_engine: String, - known_nodes: HashMap<String, KnownNodeResp>, + known_nodes: Vec<KnownNodeResp>, + layout: GetClusterLayoutResponse, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct ApplyClusterLayoutResponse { + message: Vec<String>, layout: GetClusterLayoutResponse, } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct ConnectClusterNodesResponse { success: bool, error: Option<String>, @@ -123,18 +176,31 @@ struct ConnectClusterNodesResponse { #[serde(rename_all = "camelCase")] struct GetClusterLayoutResponse { version: u64, - roles: HashMap<String, Option<NodeRole>>, - staged_role_changes: HashMap<String, Option<NodeRole>>, + roles: Vec<NodeRoleResp>, + staged_role_changes: Vec<NodeRoleChange>, } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleResp { + id: String, + zone: String, + capacity: Option<u64>, + tags: Vec<String>, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct KnownNodeResp { + id: String, addr: SocketAddr, is_up: bool, last_seen_secs_ago: Option<u64>, hostname: String, } +// ---- update functions ---- + pub async fn handle_update_cluster_layout( garage: &Arc<Garage>, req: Request<Body>, @@ -144,22 +210,35 @@ pub async fn handle_update_cluster_layout( let mut layout = garage.system.get_cluster_layout(); let mut roles = layout.roles.clone(); - roles.merge(&layout.staging); + roles.merge(&layout.staging_roles); - for (node, role) in updates { - let node = hex::decode(node).ok_or_bad_request("Invalid node identifier")?; + for change in updates { + let node = hex::decode(&change.id).ok_or_bad_request("Invalid node identifier")?; let node = Uuid::try_from(&node).ok_or_bad_request("Invalid node identifier")?; + let new_role = match change.action { + NodeRoleChangeEnum::Remove { remove: true } => None, + NodeRoleChangeEnum::Update { + zone, + capacity, + tags, + } => Some(layout::NodeRole { + zone, + capacity, + tags, + }), + _ => return Err(Error::bad_request("Invalid layout change")), + }; + layout - .staging - .merge(&roles.update_mutator(node, NodeRoleV(role))); + .staging_roles + .merge(&roles.update_mutator(node, layout::NodeRoleV(new_role))); } garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) } pub async fn handle_apply_cluster_layout( @@ -169,12 +248,15 @@ pub async fn handle_apply_cluster_layout( let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?; let layout = garage.system.get_cluster_layout(); - let layout = layout.apply_staged_changes(Some(param.version))?; + let (layout, msg) = layout.apply_staged_changes(Some(param.version))?; + garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = ApplyClusterLayoutResponse { + message: msg, + layout: format_cluster_layout(&layout), + }; + Ok(json_ok_response(&res)?) } pub async fn handle_revert_cluster_layout( @@ -187,14 +269,39 @@ pub async fn handle_revert_cluster_layout( let layout = layout.revert_staged_changes(Some(param.version))?; garage.system.update_cluster_layout(&layout).await?; - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) + let res = format_cluster_layout(&layout); + Ok(json_ok_response(&res)?) } -type UpdateClusterLayoutRequest = HashMap<String, Option<NodeRole>>; +// ---- + +type UpdateClusterLayoutRequest = Vec<NodeRoleChange>; #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct ApplyRevertLayoutRequest { version: u64, } + +// ---- + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct NodeRoleChange { + id: String, + #[serde(flatten)] + action: NodeRoleChangeEnum, +} + +#[derive(Serialize, Deserialize)] +#[serde(untagged)] +enum NodeRoleChangeEnum { + #[serde(rename_all = "camelCase")] + Remove { remove: bool }, + #[serde(rename_all = "camelCase")] + Update { + zone: String, + capacity: Option<u64>, + tags: Vec<String>, + }, +} diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs index d74ca361..8d1c6890 100644 --- a/src/api/admin/key.rs +++ b/src/api/admin/key.rs @@ -10,7 +10,7 @@ use garage_model::garage::Garage; use garage_model::key_table::*; use crate::admin::error::*; -use crate::helpers::{json_ok_response, parse_json_body}; +use crate::helpers::{is_default, json_ok_response, parse_json_body}; pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> { let res = garage @@ -34,6 +34,7 @@ pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Er } #[derive(Serialize)] +#[serde(rename_all = "camelCase")] struct ListKeyResultItem { id: String, name: String, @@ -43,6 +44,7 @@ pub async fn handle_get_key_info( garage: &Arc<Garage>, id: Option<String>, search: Option<String>, + show_secret_key: bool, ) -> Result<Response<Body>, Error> { let key = if let Some(id) = id { garage.key_helper().get_existing_key(&id).await? @@ -55,7 +57,7 @@ pub async fn handle_get_key_info( unreachable!(); }; - key_info_results(garage, key).await + key_info_results(garage, key, show_secret_key).await } pub async fn handle_create_key( @@ -64,15 +66,16 @@ pub async fn handle_create_key( ) -> Result<Response<Body>, Error> { let req = parse_json_body::<CreateKeyRequest>(req).await?; - let key = Key::new(&req.name); + let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key")); garage.key_table.insert(&key).await?; - key_info_results(garage, key).await + key_info_results(garage, key, true).await } #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct CreateKeyRequest { - name: String, + name: Option<String>, } pub async fn handle_import_key( @@ -86,10 +89,15 @@ pub async fn handle_import_key( return Err(Error::KeyAlreadyExists(req.access_key_id.to_string())); } - let imported_key = Key::import(&req.access_key_id, &req.secret_access_key, &req.name); + let imported_key = Key::import( + &req.access_key_id, + &req.secret_access_key, + req.name.as_deref().unwrap_or("Imported key"), + ) + .ok_or_bad_request("Invalid key format")?; garage.key_table.insert(&imported_key).await?; - key_info_results(garage, imported_key).await + key_info_results(garage, imported_key, false).await } #[derive(Deserialize)] @@ -97,7 +105,7 @@ pub async fn handle_import_key( struct ImportKeyRequest { access_key_id: String, secret_access_key: String, - name: String, + name: Option<String>, } pub async fn handle_update_key( @@ -127,10 +135,11 @@ pub async fn handle_update_key( garage.key_table.insert(&key).await?; - key_info_results(garage, key).await + key_info_results(garage, key, false).await } #[derive(Deserialize)] +#[serde(rename_all = "camelCase")] struct UpdateKeyRequest { name: Option<String>, allow: Option<KeyPerm>, @@ -149,7 +158,11 @@ pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Respo .body(Body::empty())?) } -async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Body>, Error> { +async fn key_info_results( + garage: &Arc<Garage>, + key: Key, + show_secret: bool, +) -> Result<Response<Body>, Error> { let mut relevant_buckets = HashMap::new(); let key_state = key.state.as_option().unwrap(); @@ -178,7 +191,11 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod let res = GetKeyInfoResult { name: key_state.name.get().clone(), access_key_id: key.key_id.clone(), - secret_access_key: key_state.secret_key.clone(), + secret_access_key: if show_secret { + Some(key_state.secret_key.clone()) + } else { + None + }, permissions: KeyPerm { create_bucket: *key_state.allow_create_bucket.get(), }, @@ -224,7 +241,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod struct GetKeyInfoResult { name: String, access_key_id: String, - secret_access_key: String, + #[serde(skip_serializing_if = "is_default")] + secret_access_key: Option<String>, permissions: KeyPerm, buckets: Vec<KeyInfoBucketResult>, } @@ -246,6 +264,7 @@ struct KeyInfoBucketResult { } #[derive(Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] pub(crate) struct ApiBucketKeyPerm { #[serde(default)] pub(crate) read: bool, diff --git a/src/api/admin/mod.rs b/src/api/admin/mod.rs index c4857c10..43a8c59c 100644 --- a/src/api/admin/mod.rs +++ b/src/api/admin/mod.rs @@ -1,6 +1,7 @@ pub mod api_server; mod error; -mod router; +mod router_v0; +mod router_v1; mod bucket; mod cluster; diff --git a/src/api/admin/router.rs b/src/api/admin/router_v0.rs index 0dcb1546..68676445 100644 --- a/src/api/admin/router.rs +++ b/src/api/admin/router_v0.rs @@ -5,19 +5,13 @@ use hyper::{Method, Request}; use crate::admin::error::*; use crate::router_macros::*; -pub enum Authorization { - None, - MetricsToken, - AdminToken, -} - router_match! {@func /// List of all Admin API endpoints. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Endpoint { Options, - CheckWebsiteEnabled, + CheckDomain, Health, Metrics, GetClusterStatus, @@ -92,7 +86,7 @@ impl Endpoint { let res = router_match!(@gen_path_parser (req.method(), path, query) [ OPTIONS _ => Options, - GET "/check" => CheckWebsiteEnabled, + GET "/check" => CheckDomain, GET "/health" => Health, GET "/metrics" => Metrics, GET "/v0/status" => GetClusterStatus, @@ -134,15 +128,6 @@ impl Endpoint { Ok(res) } - /// Get the kind of authorization which is required to perform the operation. - pub fn authorization_type(&self) -> Authorization { - match self { - Self::Health => Authorization::None, - Self::CheckWebsiteEnabled => Authorization::None, - Self::Metrics => Authorization::MetricsToken, - _ => Authorization::AdminToken, - } - } } generateQueryParameters! { diff --git a/src/api/admin/router_v1.rs b/src/api/admin/router_v1.rs new file mode 100644 index 00000000..cc5ff2ec --- /dev/null +++ b/src/api/admin/router_v1.rs @@ -0,0 +1,235 @@ +use std::borrow::Cow; + +use hyper::{Method, Request}; + +use crate::admin::error::*; +use crate::admin::router_v0; +use crate::router_macros::*; + +pub enum Authorization { + None, + MetricsToken, + AdminToken, +} + +router_match! {@func + +/// List of all Admin API endpoints. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Endpoint { + Options, + CheckDomain, + Health, + Metrics, + GetClusterStatus, + GetClusterHealth, + ConnectClusterNodes, + // Layout + GetClusterLayout, + UpdateClusterLayout, + ApplyClusterLayout, + RevertClusterLayout, + // Keys + ListKeys, + CreateKey, + ImportKey, + GetKeyInfo { + id: Option<String>, + search: Option<String>, + show_secret_key: Option<String>, + }, + DeleteKey { + id: String, + }, + UpdateKey { + id: String, + }, + // Buckets + ListBuckets, + CreateBucket, + GetBucketInfo { + id: Option<String>, + global_alias: Option<String>, + }, + DeleteBucket { + id: String, + }, + UpdateBucket { + id: String, + }, + // Bucket-Key Permissions + BucketAllowKey, + BucketDenyKey, + // Bucket aliases + GlobalAliasBucket { + id: String, + alias: String, + }, + GlobalUnaliasBucket { + id: String, + alias: String, + }, + LocalAliasBucket { + id: String, + access_key_id: String, + alias: String, + }, + LocalUnaliasBucket { + id: String, + access_key_id: String, + alias: String, + }, +}} + +impl Endpoint { + /// Determine which S3 endpoint a request is for using the request, and a bucket which was + /// possibly extracted from the Host header. + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets + pub fn from_request<T>(req: &Request<T>) -> Result<Self, Error> { + let uri = req.uri(); + let path = uri.path(); + let query = uri.query(); + + let mut query = QueryParameters::from_query(query.unwrap_or_default())?; + + let res = router_match!(@gen_path_parser (req.method(), path, query) [ + OPTIONS _ => Options, + GET "/check" => CheckDomain, + GET "/health" => Health, + GET "/metrics" => Metrics, + GET "/v1/status" => GetClusterStatus, + GET "/v1/health" => GetClusterHealth, + POST "/v1/connect" => ConnectClusterNodes, + // Layout endpoints + GET "/v1/layout" => GetClusterLayout, + POST "/v1/layout" => UpdateClusterLayout, + POST "/v1/layout/apply" => ApplyClusterLayout, + POST "/v1/layout/revert" => RevertClusterLayout, + // API key endpoints + GET "/v1/key" if id => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key), + GET "/v1/key" if search => GetKeyInfo (query_opt::id, query_opt::search, query_opt::show_secret_key), + POST "/v1/key" if id => UpdateKey (query::id), + POST "/v1/key" => CreateKey, + POST "/v1/key/import" => ImportKey, + DELETE "/v1/key" if id => DeleteKey (query::id), + GET "/v1/key" => ListKeys, + // Bucket endpoints + GET "/v1/bucket" if id => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v1/bucket" if global_alias => GetBucketInfo (query_opt::id, query_opt::global_alias), + GET "/v1/bucket" => ListBuckets, + POST "/v1/bucket" => CreateBucket, + DELETE "/v1/bucket" if id => DeleteBucket (query::id), + PUT "/v1/bucket" if id => UpdateBucket (query::id), + // Bucket-key permissions + POST "/v1/bucket/allow" => BucketAllowKey, + POST "/v1/bucket/deny" => BucketDenyKey, + // Bucket aliases + PUT "/v1/bucket/alias/global" => GlobalAliasBucket (query::id, query::alias), + DELETE "/v1/bucket/alias/global" => GlobalUnaliasBucket (query::id, query::alias), + PUT "/v1/bucket/alias/local" => LocalAliasBucket (query::id, query::access_key_id, query::alias), + DELETE "/v1/bucket/alias/local" => LocalUnaliasBucket (query::id, query::access_key_id, query::alias), + ]); + + if let Some(message) = query.nonempty_message() { + debug!("Unused query parameter: {}", message) + } + + Ok(res) + } + /// Some endpoints work exactly the same in their v1/ version as they did in their v0/ version. + /// For these endpoints, we can convert a v0/ call to its equivalent as if it was made using + /// its v1/ URL. + pub fn from_v0(v0_endpoint: router_v0::Endpoint) -> Result<Self, Error> { + match v0_endpoint { + // Cluster endpoints + router_v0::Endpoint::ConnectClusterNodes => Ok(Self::ConnectClusterNodes), + // - GetClusterStatus: response format changed + // - GetClusterHealth: response format changed + + // Layout endpoints + router_v0::Endpoint::RevertClusterLayout => Ok(Self::RevertClusterLayout), + // - GetClusterLayout: response format changed + // - UpdateClusterLayout: query format changed + // - ApplyCusterLayout: response format changed + + // Key endpoints + router_v0::Endpoint::ListKeys => Ok(Self::ListKeys), + router_v0::Endpoint::CreateKey => Ok(Self::CreateKey), + router_v0::Endpoint::GetKeyInfo { id, search } => Ok(Self::GetKeyInfo { + id, + search, + show_secret_key: Some("true".into()), + }), + router_v0::Endpoint::DeleteKey { id } => Ok(Self::DeleteKey { id }), + // - UpdateKey: response format changed (secret key no longer returned) + + // Bucket endpoints + router_v0::Endpoint::GetBucketInfo { id, global_alias } => { + Ok(Self::GetBucketInfo { id, global_alias }) + } + router_v0::Endpoint::ListBuckets => Ok(Self::ListBuckets), + router_v0::Endpoint::CreateBucket => Ok(Self::CreateBucket), + router_v0::Endpoint::DeleteBucket { id } => Ok(Self::DeleteBucket { id }), + router_v0::Endpoint::UpdateBucket { id } => Ok(Self::UpdateBucket { id }), + + // Bucket-key permissions + router_v0::Endpoint::BucketAllowKey => Ok(Self::BucketAllowKey), + router_v0::Endpoint::BucketDenyKey => Ok(Self::BucketDenyKey), + + // Bucket alias endpoints + router_v0::Endpoint::GlobalAliasBucket { id, alias } => { + Ok(Self::GlobalAliasBucket { id, alias }) + } + router_v0::Endpoint::GlobalUnaliasBucket { id, alias } => { + Ok(Self::GlobalUnaliasBucket { id, alias }) + } + router_v0::Endpoint::LocalAliasBucket { + id, + access_key_id, + alias, + } => Ok(Self::LocalAliasBucket { + id, + access_key_id, + alias, + }), + router_v0::Endpoint::LocalUnaliasBucket { + id, + access_key_id, + alias, + } => Ok(Self::LocalUnaliasBucket { + id, + access_key_id, + alias, + }), + + // For endpoints that have different body content syntax, issue + // deprecation warning + _ => Err(Error::bad_request(format!( + "v0/ endpoint is no longer supported: {}", + v0_endpoint.name() + ))), + } + } + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + match self { + Self::Health => Authorization::None, + Self::CheckDomain => Authorization::None, + Self::Metrics => Authorization::MetricsToken, + _ => Authorization::AdminToken, + } + } +} + +generateQueryParameters! { + keywords: [], + fields: [ + "format" => format, + "id" => id, + "search" => search, + "globalAlias" => global_alias, + "alias" => alias, + "accessKeyId" => access_key_id, + "showSecretKey" => show_secret_key + ] +} diff --git a/src/api/generic_server.rs b/src/api/generic_server.rs index 757b85ec..fa346f48 100644 --- a/src/api/generic_server.rs +++ b/src/api/generic_server.rs @@ -1,4 +1,5 @@ -use std::net::SocketAddr; +use std::fs::{self, Permissions}; +use std::os::unix::fs::PermissionsExt; use std::sync::Arc; use async_trait::async_trait; @@ -11,6 +12,10 @@ use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; use hyper::{HeaderMap, StatusCode}; +use hyperlocal::UnixServerExt; + +use tokio::net::UnixStream; + use opentelemetry::{ global, metrics::{Counter, ValueRecorder}, @@ -21,6 +26,7 @@ use opentelemetry::{ use garage_util::error::Error as GarageError; use garage_util::forwarded_headers; use garage_util::metrics::{gen_trace_id, RecordDuration}; +use garage_util::socket_address::UnixOrTCPSocketAddress; pub(crate) trait ApiEndpoint: Send + Sync + 'static { fn name(&self) -> &'static str; @@ -91,10 +97,11 @@ impl<A: ApiHandler> ApiServer<A> { pub async fn run_server( self: Arc<Self>, - bind_addr: SocketAddr, + bind_addr: UnixOrTCPSocketAddress, + unix_bind_addr_mode: Option<u32>, shutdown_signal: impl Future<Output = ()>, ) -> Result<(), GarageError> { - let service = make_service_fn(|conn: &AddrStream| { + let tcp_service = make_service_fn(|conn: &AddrStream| { let this = self.clone(); let client_addr = conn.remote_addr(); @@ -102,28 +109,63 @@ impl<A: ApiHandler> ApiServer<A> { Ok::<_, GarageError>(service_fn(move |req: Request<Body>| { let this = this.clone(); - this.handler(req, client_addr) + this.handler(req, client_addr.to_string()) })) } }); - let server = Server::bind(&bind_addr).serve(service); + let unix_service = make_service_fn(|_: &UnixStream| { + let this = self.clone(); + + let path = bind_addr.to_string(); + async move { + Ok::<_, GarageError>(service_fn(move |req: Request<Body>| { + let this = this.clone(); + + this.handler(req, path.clone()) + })) + } + }); - let graceful = server.with_graceful_shutdown(shutdown_signal); info!( - "{} API server listening on http://{}", + "{} API server listening on {}", A::API_NAME_DISPLAY, bind_addr ); - graceful.await?; + match bind_addr { + UnixOrTCPSocketAddress::TCPSocket(addr) => { + Server::bind(&addr) + .serve(tcp_service) + .with_graceful_shutdown(shutdown_signal) + .await? + } + UnixOrTCPSocketAddress::UnixSocket(ref path) => { + if path.exists() { + fs::remove_file(path)? + } + + let bound = Server::bind_unix(path)?; + + fs::set_permissions( + path, + Permissions::from_mode(unix_bind_addr_mode.unwrap_or(0o222)), + )?; + + bound + .serve(unix_service) + .with_graceful_shutdown(shutdown_signal) + .await?; + } + }; + Ok(()) } async fn handler( self: Arc<Self>, req: Request<Body>, - addr: SocketAddr, + addr: String, ) -> Result<Response<Body>, GarageError> { let uri = req.uri().clone(); diff --git a/src/api/helpers.rs b/src/api/helpers.rs index 642dbc42..1d55ebd5 100644 --- a/src/api/helpers.rs +++ b/src/api/helpers.rs @@ -152,6 +152,10 @@ pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error> .body(Body::from(resp_json))?) } +pub fn is_default<T: Default + PartialEq>(v: &T) -> bool { + *v == T::default() +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/api/k2v/api_server.rs b/src/api/k2v/api_server.rs index bb85b2e7..3a032aba 100644 --- a/src/api/k2v/api_server.rs +++ b/src/api/k2v/api_server.rs @@ -1,4 +1,3 @@ -use std::net::SocketAddr; use std::sync::Arc; use async_trait::async_trait; @@ -9,6 +8,7 @@ use hyper::{Body, Method, Request, Response}; use opentelemetry::{trace::SpanRef, KeyValue}; use garage_util::error::Error as GarageError; +use garage_util::socket_address::UnixOrTCPSocketAddress; use garage_model::garage::Garage; @@ -37,12 +37,12 @@ pub(crate) struct K2VApiEndpoint { impl K2VApiServer { pub async fn run( garage: Arc<Garage>, - bind_addr: SocketAddr, + bind_addr: UnixOrTCPSocketAddress, s3_region: String, shutdown_signal: impl Future<Output = ()>, ) -> Result<(), GarageError> { ApiServer::new(s3_region, K2VApiServer { garage }) - .run_server(bind_addr, shutdown_signal) + .run_server(bind_addr, None, shutdown_signal) .await } } diff --git a/src/api/router_macros.rs b/src/api/router_macros.rs index 07b5570c..cfecbc92 100644 --- a/src/api/router_macros.rs +++ b/src/api/router_macros.rs @@ -26,6 +26,7 @@ macro_rules! router_match { $($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)* ]) => {{ { + #[allow(unused_parens)] match ($method, $reqpath) { $( (&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api { @@ -128,12 +129,6 @@ macro_rules! router_match { } } }; - (@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => { - $($then)* - }; - (@if () then ($($then:tt)*) else ($($else:tt)*)) => { - $($else)* - }; } /// This macro is used to generate part of the code in this module. It must be called only one, and diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs index 27837297..887839dd 100644 --- a/src/api/s3/api_server.rs +++ b/src/api/s3/api_server.rs @@ -1,4 +1,3 @@ -use std::net::SocketAddr; use std::sync::Arc; use async_trait::async_trait; @@ -10,6 +9,7 @@ use hyper::{Body, Request, Response}; use opentelemetry::{trace::SpanRef, KeyValue}; use garage_util::error::Error as GarageError; +use garage_util::socket_address::UnixOrTCPSocketAddress; use garage_model::garage::Garage; use garage_model::key_table::Key; @@ -26,7 +26,9 @@ use crate::s3::copy::*; use crate::s3::cors::*; use crate::s3::delete::*; use crate::s3::get::*; +use crate::s3::lifecycle::*; use crate::s3::list::*; +use crate::s3::multipart::*; use crate::s3::post_object::handle_post_object; use crate::s3::put::*; use crate::s3::router::Endpoint; @@ -44,12 +46,12 @@ pub(crate) struct S3ApiEndpoint { impl S3ApiServer { pub async fn run( garage: Arc<Garage>, - addr: SocketAddr, + addr: UnixOrTCPSocketAddress, s3_region: String, shutdown_signal: impl Future<Output = ()>, ) -> Result<(), GarageError> { ApiServer::new(s3_region, S3ApiServer { garage }) - .run_server(addr, shutdown_signal) + .run_server(addr, None, shutdown_signal) .await } @@ -256,7 +258,7 @@ impl ApiHandler for S3ApiServer { bucket_name, bucket_id, delimiter: delimiter.map(|d| d.to_string()), - page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + page_size: max_keys.unwrap_or(1000).clamp(1, 1000), prefix: prefix.unwrap_or_default(), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), }, @@ -286,7 +288,7 @@ impl ApiHandler for S3ApiServer { bucket_name, bucket_id, delimiter: delimiter.map(|d| d.to_string()), - page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + page_size: max_keys.unwrap_or(1000).clamp(1, 1000), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), prefix: prefix.unwrap_or_default(), }, @@ -319,7 +321,7 @@ impl ApiHandler for S3ApiServer { bucket_name, bucket_id, delimiter: delimiter.map(|d| d.to_string()), - page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + page_size: max_uploads.unwrap_or(1000).clamp(1, 1000), prefix: prefix.unwrap_or_default(), urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), }, @@ -342,8 +344,8 @@ impl ApiHandler for S3ApiServer { bucket_id, key, upload_id, - part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)), - max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + part_number_marker: part_number_marker.map(|p| p.min(10000)), + max_parts: max_parts.unwrap_or(1000).clamp(1, 1000), }, ) .await @@ -353,14 +355,21 @@ impl ApiHandler for S3ApiServer { } Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await, Endpoint::PutBucketWebsite {} => { - handle_put_website(garage, bucket_id, req, content_sha256).await + handle_put_website(garage, bucket.clone(), req, content_sha256).await } - Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await, + Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket.clone()).await, Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await, Endpoint::PutBucketCors {} => { - handle_put_cors(garage, bucket_id, req, content_sha256).await + handle_put_cors(garage, bucket.clone(), req, content_sha256).await + } + Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket.clone()).await, + Endpoint::GetBucketLifecycleConfiguration {} => handle_get_lifecycle(&bucket).await, + Endpoint::PutBucketLifecycleConfiguration {} => { + handle_put_lifecycle(garage, bucket.clone(), req, content_sha256).await + } + Endpoint::DeleteBucketLifecycle {} => { + handle_delete_lifecycle(garage, bucket.clone()).await } - Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await, endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), }; diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs index 7eb6459d..68b4f0c9 100644 --- a/src/api/s3/copy.rs +++ b/src/api/s3/copy.rs @@ -2,7 +2,7 @@ use std::pin::Pin; use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use futures::{stream, stream::Stream, StreamExt, TryFutureExt}; +use futures::{stream, stream::Stream, StreamExt}; use md5::{Digest as Md5Digest, Md5}; use bytes::Bytes; @@ -18,12 +18,14 @@ use garage_util::time::*; use garage_model::garage::Garage; use garage_model::key_table::Key; use garage_model::s3::block_ref_table::*; +use garage_model::s3::mpu_table::*; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; use crate::helpers::parse_bucket_key; use crate::s3::error::*; -use crate::s3::put::{decode_upload_id, get_headers}; +use crate::s3::multipart; +use crate::s3::put::get_headers; use crate::s3::xml::{self as s3_xml, xmlns_tag}; pub async fn handle_copy( @@ -92,7 +94,10 @@ pub async fn handle_copy( let tmp_dest_object_version = ObjectVersion { uuid: new_uuid, timestamp: new_timestamp, - state: ObjectVersionState::Uploading(new_meta.headers.clone()), + state: ObjectVersionState::Uploading { + headers: new_meta.headers.clone(), + multipart: false, + }, }; let tmp_dest_object = Object::new( dest_bucket_id, @@ -105,8 +110,14 @@ pub async fn handle_copy( // this means that the BlockRef entries linked to this version cannot be // marked as deleted (they are marked as deleted only if the Version // doesn't exist or is marked as deleted). - let mut dest_version = - Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false); + let mut dest_version = Version::new( + new_uuid, + VersionBacklink::Object { + bucket_id: dest_bucket_id, + key: dest_key.to_string(), + }, + false, + ); garage.version_table.insert(&dest_version).await?; // Fill in block list for version and insert block refs @@ -179,17 +190,13 @@ pub async fn handle_upload_part_copy( ) -> Result<Response<Body>, Error> { let copy_precondition = CopyPreconditionHeaders::parse(req)?; - let dest_version_uuid = decode_upload_id(upload_id)?; + let dest_upload_id = multipart::decode_upload_id(upload_id)?; let dest_key = dest_key.to_string(); - let (source_object, dest_object) = futures::try_join!( + let (source_object, (_, _, mut dest_mpu)) = futures::try_join!( get_copy_source(&garage, api_key, req), - garage - .object_table - .get(&dest_bucket_id, &dest_key) - .map_err(Error::from), + multipart::get_upload(&garage, &dest_bucket_id, &dest_key, &dest_upload_id) )?; - let dest_object = dest_object.ok_or(Error::NoSuchKey)?; let (source_object_version, source_version_data, source_version_meta) = extract_source_info(&source_object)?; @@ -217,15 +224,6 @@ pub async fn handle_upload_part_copy( }, }; - // Check destination version is indeed in uploading state - if !dest_object - .versions() - .iter() - .any(|v| v.uuid == dest_version_uuid && v.is_uploading()) - { - return Err(Error::NoSuchUpload); - } - // Check source version is not inlined match source_version_data { ObjectVersionData::DeleteMarker => unreachable!(), @@ -242,23 +240,11 @@ pub async fn handle_upload_part_copy( // Fetch source versin with its block list, // and destination version to check part hasn't yet been uploaded - let (source_version, dest_version) = futures::try_join!( - garage - .version_table - .get(&source_object_version.uuid, &EmptyKey), - garage.version_table.get(&dest_version_uuid, &EmptyKey), - )?; - let source_version = source_version.ok_or(Error::NoSuchKey)?; - - // Check this part number hasn't yet been uploaded - if let Some(dv) = dest_version { - if dv.has_part_number(part_number) { - return Err(Error::bad_request(format!( - "Part number {} has already been uploaded", - part_number - ))); - } - } + let source_version = garage + .version_table + .get(&source_object_version.uuid, &EmptyKey) + .await? + .ok_or(Error::NoSuchKey)?; // We want to reuse blocks from the source version as much as possible. // However, we still need to get the data from these blocks @@ -299,6 +285,33 @@ pub async fn handle_upload_part_copy( current_offset = block_end; } + // Calculate the identity of destination part: timestamp, version id + let dest_version_id = gen_uuid(); + let dest_mpu_part_key = MpuPartKey { + part_number, + timestamp: dest_mpu.next_timestamp(part_number), + }; + + // Create the uploaded part + dest_mpu.parts.clear(); + dest_mpu.parts.put( + dest_mpu_part_key, + MpuPart { + version: dest_version_id, + etag: None, + size: None, + }, + ); + garage.mpu_table.insert(&dest_mpu).await?; + + let mut dest_version = Version::new( + dest_version_id, + VersionBacklink::MultipartUpload { + upload_id: dest_upload_id, + }, + false, + ); + // Now, actually copy the blocks let mut md5hasher = Md5::new(); @@ -348,8 +361,8 @@ pub async fn handle_upload_part_copy( let must_upload = existing_block_hash.is_none(); let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); - let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); - version.blocks.put( + dest_version.blocks.clear(); + dest_version.blocks.put( VersionBlockKey { part_number, offset: current_offset, @@ -363,7 +376,7 @@ pub async fn handle_upload_part_copy( let block_ref = BlockRef { block: final_hash, - version: dest_version_uuid, + version: dest_version_id, deleted: false.into(), }; @@ -378,23 +391,33 @@ pub async fn handle_upload_part_copy( Ok(()) } }, - // Thing 2: we need to insert the block in the version - garage.version_table.insert(&version), - // Thing 3: we need to add a block reference - garage.block_ref_table.insert(&block_ref), + async { + // Thing 2: we need to insert the block in the version + garage.version_table.insert(&dest_version).await?; + // Thing 3: we need to add a block reference + garage.block_ref_table.insert(&block_ref).await + }, // Thing 4: we need to prefetch the next block defragmenter.next(), )?; - next_block = res.3; + next_block = res.2; } + assert_eq!(current_offset, source_range.length); + let data_md5sum = md5hasher.finalize(); let etag = hex::encode(data_md5sum); // Put the part's ETag in the Versiontable - let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); - version.parts_etags.put(part_number, etag.clone()); - garage.version_table.insert(&version).await?; + dest_mpu.parts.put( + dest_mpu_part_key, + MpuPart { + version: dest_version_id, + etag: Some(etag.clone()), + size: Some(current_offset), + }, + ); + garage.mpu_table.insert(&dest_mpu).await?; // LGTM let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult { diff --git a/src/api/s3/cors.rs b/src/api/s3/cors.rs index c7273464..49097ad1 100644 --- a/src/api/s3/cors.rs +++ b/src/api/s3/cors.rs @@ -44,14 +44,11 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> { pub async fn handle_delete_cors( garage: Arc<Garage>, - bucket_id: Uuid, + mut bucket: Bucket, ) -> Result<Response<Body>, Error> { - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let param = bucket.params_mut().unwrap(); + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; param.cors_config.update(None); garage.bucket_table.insert(&bucket).await?; @@ -63,7 +60,7 @@ pub async fn handle_delete_cors( pub async fn handle_put_cors( garage: Arc<Garage>, - bucket_id: Uuid, + mut bucket: Bucket, req: Request<Body>, content_sha256: Option<Hash>, ) -> Result<Response<Body>, Error> { @@ -73,12 +70,9 @@ pub async fn handle_put_cors( verify_signed_content(content_sha256, &body[..])?; } - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let param = bucket.params_mut().unwrap(); + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; let conf: CorsConfiguration = from_reader(&body as &[u8])?; conf.validate()?; diff --git a/src/api/s3/delete.rs b/src/api/s3/delete.rs index b337155f..1c491eac 100644 --- a/src/api/s3/delete.rs +++ b/src/api/s3/delete.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use hyper::{Body, Request, Response, StatusCode}; use garage_util::data::*; -use garage_util::time::*; use garage_model::garage::Garage; use garage_model::s3::object_table::*; use crate::s3::error::*; +use crate::s3::put::next_timestamp; use crate::s3::xml as s3_xml; use crate::signature::verify_signed_content; @@ -23,40 +23,36 @@ async fn handle_delete_internal( .await? .ok_or(Error::NoSuchKey)?; // No need to delete - let interesting_versions = object.versions().iter().filter(|v| { - !matches!( - v.state, - ObjectVersionState::Aborted - | ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) - ) - }); - - let mut version_to_delete = None; - let mut timestamp = now_msec(); - for v in interesting_versions { - if v.timestamp + 1 > timestamp || version_to_delete.is_none() { - version_to_delete = Some(v.uuid); + let del_timestamp = next_timestamp(Some(&object)); + let del_uuid = gen_uuid(); + + let deleted_version = object + .versions() + .iter() + .rev() + .find(|v| !matches!(&v.state, ObjectVersionState::Aborted)) + .or_else(|| object.versions().iter().rev().next()); + let deleted_version = match deleted_version { + Some(dv) => dv.uuid, + None => { + warn!("Object has no versions: {:?}", object); + Uuid::from([0u8; 32]) } - timestamp = std::cmp::max(timestamp, v.timestamp + 1); - } - - let deleted_version = version_to_delete.ok_or(Error::NoSuchKey)?; - - let version_uuid = gen_uuid(); + }; let object = Object::new( bucket_id, key.into(), vec![ObjectVersion { - uuid: version_uuid, - timestamp, + uuid: del_uuid, + timestamp: del_timestamp, state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), }], ); garage.object_table.insert(&object).await?; - Ok((deleted_version, version_uuid)) + Ok((deleted_version, del_uuid)) } pub async fn handle_delete( diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs index cde7b461..5e682726 100644 --- a/src/api/s3/get.rs +++ b/src/api/s3/get.rs @@ -149,7 +149,6 @@ pub async fn handle_head( let (part_offset, part_end) = calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; - let n_parts = version.parts_etags.items().len(); Ok(object_headers(object_version, version_meta) .header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) @@ -162,7 +161,7 @@ pub async fn handle_head( version_meta.size ), ) - .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) + .header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) .status(StatusCode::PARTIAL_CONTENT) .body(Body::empty())?) } @@ -376,7 +375,6 @@ async fn handle_get_part( let (begin, end) = calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; - let n_parts = version.parts_etags.items().len(); let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); @@ -386,7 +384,7 @@ async fn handle_get_part( CONTENT_RANGE, format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), ) - .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) + .header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?)) .body(body)?) } _ => unreachable!(), diff --git a/src/api/s3/lifecycle.rs b/src/api/s3/lifecycle.rs new file mode 100644 index 00000000..1e7d6755 --- /dev/null +++ b/src/api/s3/lifecycle.rs @@ -0,0 +1,401 @@ +use quick_xml::de::from_reader; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; + +use serde::{Deserialize, Serialize}; + +use crate::s3::error::*; +use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; +use crate::signature::verify_signed_content; + +use garage_model::bucket_table::{ + parse_lifecycle_date, Bucket, LifecycleExpiration as GarageLifecycleExpiration, + LifecycleFilter as GarageLifecycleFilter, LifecycleRule as GarageLifecycleRule, +}; +use garage_model::garage::Garage; +use garage_util::data::*; + +pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Error> { + let param = bucket + .params() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + if let Some(lifecycle) = param.lifecycle_config.get() { + let wc = LifecycleConfiguration::from_garage_lifecycle_config(lifecycle); + let xml = to_xml_with_header(&wc)?; + Ok(Response::builder() + .status(StatusCode::OK) + .header(http::header::CONTENT_TYPE, "application/xml") + .body(Body::from(xml))?) + } else { + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) + } +} + +pub async fn handle_delete_lifecycle( + garage: Arc<Garage>, + mut bucket: Bucket, +) -> Result<Response<Body>, Error> { + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + param.lifecycle_config.update(None); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +pub async fn handle_put_lifecycle( + garage: Arc<Garage>, + mut bucket: Bucket, + req: Request<Body>, + content_sha256: Option<Hash>, +) -> Result<Response<Body>, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + let conf: LifecycleConfiguration = from_reader(&body as &[u8])?; + let config = conf + .validate_into_garage_lifecycle_config() + .ok_or_bad_request("Invalid lifecycle configuration")?; + + param.lifecycle_config.update(Some(config)); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct LifecycleConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + #[serde(rename = "Rule")] + pub lifecycle_rules: Vec<LifecycleRule>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct LifecycleRule { + #[serde(rename = "ID")] + pub id: Option<Value>, + #[serde(rename = "Status")] + pub status: Value, + #[serde(rename = "Filter", default)] + pub filter: Option<Filter>, + #[serde(rename = "Expiration", default)] + pub expiration: Option<Expiration>, + #[serde(rename = "AbortIncompleteMultipartUpload", default)] + pub abort_incomplete_mpu: Option<AbortIncompleteMpu>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Default)] +pub struct Filter { + #[serde(rename = "And")] + pub and: Option<Box<Filter>>, + #[serde(rename = "Prefix")] + pub prefix: Option<Value>, + #[serde(rename = "ObjectSizeGreaterThan")] + pub size_gt: Option<IntValue>, + #[serde(rename = "ObjectSizeLessThan")] + pub size_lt: Option<IntValue>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Expiration { + #[serde(rename = "Days")] + pub days: Option<IntValue>, + #[serde(rename = "Date")] + pub at_date: Option<Value>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AbortIncompleteMpu { + #[serde(rename = "DaysAfterInitiation")] + pub days: IntValue, +} + +impl LifecycleConfiguration { + pub fn validate_into_garage_lifecycle_config( + self, + ) -> Result<Vec<GarageLifecycleRule>, &'static str> { + let mut ret = vec![]; + for rule in self.lifecycle_rules { + ret.push(rule.validate_into_garage_lifecycle_rule()?); + } + Ok(ret) + } + + pub fn from_garage_lifecycle_config(config: &[GarageLifecycleRule]) -> Self { + Self { + xmlns: (), + lifecycle_rules: config + .iter() + .map(LifecycleRule::from_garage_lifecycle_rule) + .collect(), + } + } +} + +impl LifecycleRule { + pub fn validate_into_garage_lifecycle_rule(self) -> Result<GarageLifecycleRule, &'static str> { + let enabled = match self.status.0.as_str() { + "Enabled" => true, + "Disabled" => false, + _ => return Err("invalid value for <Status>"), + }; + + let filter = self + .filter + .map(Filter::validate_into_garage_lifecycle_filter) + .transpose()? + .unwrap_or_default(); + + let abort_incomplete_mpu_days = self.abort_incomplete_mpu.map(|x| x.days.0 as usize); + + let expiration = self + .expiration + .map(Expiration::validate_into_garage_lifecycle_expiration) + .transpose()?; + + Ok(GarageLifecycleRule { + id: self.id.map(|x| x.0), + enabled, + filter, + abort_incomplete_mpu_days, + expiration, + }) + } + + pub fn from_garage_lifecycle_rule(rule: &GarageLifecycleRule) -> Self { + Self { + id: rule.id.as_deref().map(Value::from), + status: if rule.enabled { + Value::from("Enabled") + } else { + Value::from("Disabled") + }, + filter: Filter::from_garage_lifecycle_filter(&rule.filter), + abort_incomplete_mpu: rule + .abort_incomplete_mpu_days + .map(|days| AbortIncompleteMpu { + days: IntValue(days as i64), + }), + expiration: rule + .expiration + .as_ref() + .map(Expiration::from_garage_lifecycle_expiration), + } + } +} + +impl Filter { + pub fn count(&self) -> i32 { + fn count<T>(x: &Option<T>) -> i32 { + x.as_ref().map(|_| 1).unwrap_or(0) + } + count(&self.prefix) + count(&self.size_gt) + count(&self.size_lt) + } + + pub fn validate_into_garage_lifecycle_filter( + self, + ) -> Result<GarageLifecycleFilter, &'static str> { + if self.count() > 0 && self.and.is_some() { + Err("Filter tag cannot contain both <And> and another condition") + } else if let Some(and) = self.and { + if and.and.is_some() { + return Err("Nested <And> tags"); + } + Ok(and.internal_into_garage_lifecycle_filter()) + } else if self.count() > 1 { + Err("Multiple Filter conditions must be wrapped in an <And> tag") + } else { + Ok(self.internal_into_garage_lifecycle_filter()) + } + } + + fn internal_into_garage_lifecycle_filter(self) -> GarageLifecycleFilter { + GarageLifecycleFilter { + prefix: self.prefix.map(|x| x.0), + size_gt: self.size_gt.map(|x| x.0 as u64), + size_lt: self.size_lt.map(|x| x.0 as u64), + } + } + + pub fn from_garage_lifecycle_filter(rule: &GarageLifecycleFilter) -> Option<Self> { + let filter = Filter { + and: None, + prefix: rule.prefix.as_deref().map(Value::from), + size_gt: rule.size_gt.map(|x| IntValue(x as i64)), + size_lt: rule.size_lt.map(|x| IntValue(x as i64)), + }; + match filter.count() { + 0 => None, + 1 => Some(filter), + _ => Some(Filter { + and: Some(Box::new(filter)), + ..Default::default() + }), + } + } +} + +impl Expiration { + pub fn validate_into_garage_lifecycle_expiration( + self, + ) -> Result<GarageLifecycleExpiration, &'static str> { + match (self.days, self.at_date) { + (Some(_), Some(_)) => Err("cannot have both <Days> and <Date> in <Expiration>"), + (None, None) => Err("<Expiration> must contain either <Days> or <Date>"), + (Some(days), None) => Ok(GarageLifecycleExpiration::AfterDays(days.0 as usize)), + (None, Some(date)) => { + parse_lifecycle_date(&date.0)?; + Ok(GarageLifecycleExpiration::AtDate(date.0)) + } + } + } + + pub fn from_garage_lifecycle_expiration(exp: &GarageLifecycleExpiration) -> Self { + match exp { + GarageLifecycleExpiration::AfterDays(days) => Expiration { + days: Some(IntValue(*days as i64)), + at_date: None, + }, + GarageLifecycleExpiration::AtDate(date) => Expiration { + days: None, + at_date: Some(Value(date.to_string())), + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize_lifecycle_config() -> Result<(), Error> { + let message = r#"<?xml version="1.0" encoding="UTF-8"?> +<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <Rule> + <ID>id1</ID> + <Status>Enabled</Status> + <Filter> + <Prefix>documents/</Prefix> + </Filter> + <AbortIncompleteMultipartUpload> + <DaysAfterInitiation>7</DaysAfterInitiation> + </AbortIncompleteMultipartUpload> + </Rule> + <Rule> + <ID>id2</ID> + <Status>Enabled</Status> + <Filter> + <And> + <Prefix>logs/</Prefix> + <ObjectSizeGreaterThan>1000000</ObjectSizeGreaterThan> + </And> + </Filter> + <Expiration> + <Days>365</Days> + </Expiration> + </Rule> +</LifecycleConfiguration>"#; + let conf: LifecycleConfiguration = from_str(message).unwrap(); + let ref_value = LifecycleConfiguration { + xmlns: (), + lifecycle_rules: vec![ + LifecycleRule { + id: Some("id1".into()), + status: "Enabled".into(), + filter: Some(Filter { + prefix: Some("documents/".into()), + ..Default::default() + }), + expiration: None, + abort_incomplete_mpu: Some(AbortIncompleteMpu { days: IntValue(7) }), + }, + LifecycleRule { + id: Some("id2".into()), + status: "Enabled".into(), + filter: Some(Filter { + and: Some(Box::new(Filter { + prefix: Some("logs/".into()), + size_gt: Some(IntValue(1000000)), + ..Default::default() + })), + ..Default::default() + }), + expiration: Some(Expiration { + days: Some(IntValue(365)), + at_date: None, + }), + abort_incomplete_mpu: None, + }, + ], + }; + assert_eq! { + ref_value, + conf + }; + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + // Check validation + let validated = ref_value + .validate_into_garage_lifecycle_config() + .ok_or_bad_request("invalid xml config")?; + + let ref_config = vec![ + GarageLifecycleRule { + id: Some("id1".into()), + enabled: true, + filter: GarageLifecycleFilter { + prefix: Some("documents/".into()), + ..Default::default() + }, + expiration: None, + abort_incomplete_mpu_days: Some(7), + }, + GarageLifecycleRule { + id: Some("id2".into()), + enabled: true, + filter: GarageLifecycleFilter { + prefix: Some("logs/".into()), + size_gt: Some(1000000), + ..Default::default() + }, + expiration: Some(GarageLifecycleExpiration::AfterDays(365)), + abort_incomplete_mpu_days: None, + }, + ]; + assert_eq!(validated, ref_config); + + let message3 = to_xml_with_header(&LifecycleConfiguration::from_garage_lifecycle_config( + &validated, + ))?; + assert_eq!(cleanup(message), cleanup(&message3)); + + Ok(()) + } +} diff --git a/src/api/s3/list.rs b/src/api/s3/list.rs index 5cb0d65a..1b9e8cd5 100644 --- a/src/api/s3/list.rs +++ b/src/api/s3/list.rs @@ -1,4 +1,3 @@ -use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::iter::{Iterator, Peekable}; use std::sync::Arc; @@ -11,15 +10,15 @@ use garage_util::error::Error as GarageError; use garage_util::time::*; use garage_model::garage::Garage; +use garage_model::s3::mpu_table::*; use garage_model::s3::object_table::*; -use garage_model::s3::version_table::Version; -use garage_table::{EmptyKey, EnumerationOrder}; +use garage_table::EnumerationOrder; use crate::encoding::*; use crate::helpers::key_after_prefix; use crate::s3::error::*; -use crate::s3::put as s3_put; +use crate::s3::multipart as s3_multipart; use crate::s3::xml as s3_xml; const DUMMY_NAME: &str = "Dummy Key"; @@ -176,7 +175,9 @@ pub async fn handle_list_multipart_upload( t.get_range( &bucket, key, - Some(ObjectFilter::IsUploading), + Some(ObjectFilter::IsUploading { + check_multipart: Some(true), + }), count, EnumerationOrder::Forward, ) @@ -272,24 +273,26 @@ pub async fn handle_list_parts( ) -> Result<Response<Body>, Error> { debug!("ListParts {:?}", query); - let upload_id = s3_put::decode_upload_id(&query.upload_id)?; + let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?; - let (object, version) = futures::try_join!( - garage.object_table.get(&query.bucket_id, &query.key), - garage.version_table.get(&upload_id, &EmptyKey), - )?; + let (_, _, mpu) = + s3_multipart::get_upload(&garage, &query.bucket_id, &query.key, &upload_id).await?; - let (info, next) = fetch_part_info(query, object, version, upload_id)?; + let (info, next) = fetch_part_info(query, &mpu)?; let result = s3_xml::ListPartsResult { xmlns: (), + + // Query parameters bucket: s3_xml::Value(query.bucket_name.to_string()), key: s3_xml::Value(query.key.to_string()), upload_id: s3_xml::Value(query.upload_id.to_string()), part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)), - next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)), max_parts: s3_xml::IntValue(query.max_parts as i64), - is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()), + + // Result values + next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)), + is_truncated: s3_xml::Value(format!("{}", next.is_some())), parts: info .iter() .map(|part| s3_xml::PartItem { @@ -299,6 +302,8 @@ pub async fn handle_list_parts( size: s3_xml::IntValue(part.size as i64), }) .collect(), + + // Dummy result values (unsupported features) initiator: s3_xml::Initiator { display_name: s3_xml::Value(DUMMY_NAME.to_string()), id: s3_xml::Value(DUMMY_KEY.to_string()), @@ -335,8 +340,8 @@ struct UploadInfo { } #[derive(Debug, PartialEq)] -struct PartInfo { - etag: String, +struct PartInfo<'a> { + etag: &'a str, timestamp: u64, part_number: u64, size: u64, @@ -421,8 +426,10 @@ where // Drop the first key if needed // Only AfterKey requires it according to the S3 spec and our implem. match (&cursor, iter.peek()) { - (RangeBegin::AfterKey { key }, Some(object)) if &object.key == key => iter.next(), - (_, _) => None, + (RangeBegin::AfterKey { key }, Some(object)) if &object.key == key => { + iter.next(); + } + _ => (), }; while let Some(object) = iter.peek() { @@ -431,16 +438,22 @@ where return Ok(None); } - cursor = match acc.extract(query, &cursor, &mut iter) { - ExtractionResult::Extracted { key } => RangeBegin::AfterKey { key }, + match acc.extract(query, &cursor, &mut iter) { + ExtractionResult::Extracted { key } => { + cursor = RangeBegin::AfterKey { key }; + } ExtractionResult::SkipTo { key, fallback_key } => { - RangeBegin::IncludingKey { key, fallback_key } + cursor = RangeBegin::IncludingKey { key, fallback_key }; } ExtractionResult::FilledAtUpload { key, upload } => { - return Ok(Some(RangeBegin::AfterUpload { key, upload })) + return Ok(Some(RangeBegin::AfterUpload { key, upload })); + } + ExtractionResult::Filled => { + return Ok(Some(cursor)); + } + ExtractionResult::NoMore => { + return Ok(None); } - ExtractionResult::Filled => return Ok(Some(cursor)), - ExtractionResult::NoMore => return Ok(None), }; } @@ -456,107 +469,51 @@ where } } -fn fetch_part_info( +fn fetch_part_info<'a>( query: &ListPartsQuery, - object: Option<Object>, - version: Option<Version>, - upload_id: Uuid, -) -> Result<(Vec<PartInfo>, Option<u64>), Error> { - // Check results - let object = object.ok_or(Error::NoSuchKey)?; - - let obj_version = object - .versions() - .iter() - .find(|v| v.uuid == upload_id && v.is_uploading()) - .ok_or(Error::NoSuchUpload)?; - - let version = version.ok_or(Error::NoSuchKey)?; - - // Cut the beginning of our 2 vectors if required - let (etags, blocks) = match &query.part_number_marker { - Some(marker) => { - let next = marker + 1; - - let part_idx = into_ok_or_err( - version - .parts_etags - .items() - .binary_search_by(|(part_num, _)| part_num.cmp(&next)), - ); - let parts = &version.parts_etags.items()[part_idx..]; - - let block_idx = into_ok_or_err( - version - .blocks - .items() - .binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)), - ); - let blocks = &version.blocks.items()[block_idx..]; - - (parts, blocks) - } - None => (version.parts_etags.items(), version.blocks.items()), - }; - - // Use the block vector to compute a (part_number, size) vector - let mut size = Vec::<(u64, u64)>::new(); - blocks.iter().for_each(|(key, val)| { - let mut new_size = val.size; - match size.pop() { - Some((part_number, size)) if part_number == key.part_number => new_size += size, - Some(v) => size.push(v), - None => (), - } - size.push((key.part_number, new_size)) - }); - - // Merge the etag vector and size vector to build a PartInfo vector - let max_parts = query.max_parts as usize; - let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable()); - - let mut info = Vec::<PartInfo>::with_capacity(max_parts); - - while info.len() < max_parts { - match (etag_iter.peek(), size_iter.peek()) { - (Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) { - Ordering::Less => { - debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query); - etag_iter.next(); - } - Ordering::Equal => { - info.push(PartInfo { - etag: etag.to_string(), - timestamp: obj_version.timestamp, - part_number: *ep, - size: *size, - }); - etag_iter.next(); - size_iter.next(); + mpu: &'a MultipartUpload, +) -> Result<(Vec<PartInfo<'a>>, Option<u64>), Error> { + assert!((1..=1000).contains(&query.max_parts)); // see s3/api_server.rs + + // Parse multipart upload part list, removing parts not yet finished + // and failed part uploads that were overwritten + let mut parts: Vec<PartInfo<'a>> = Vec::with_capacity(mpu.parts.items().len()); + for (pk, p) in mpu.parts.items().iter() { + if let (Some(etag), Some(size)) = (&p.etag, p.size) { + let part_info = PartInfo { + part_number: pk.part_number, + timestamp: pk.timestamp, + etag, + size, + }; + match parts.last_mut() { + Some(lastpart) if lastpart.part_number == pk.part_number => { + *lastpart = part_info; } - Ordering::Greater => { - debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query); - size_iter.next(); + _ => { + parts.push(part_info); } - }, - (None, None) => return Ok((info, None)), - _ => { - debug!( - "Additional block or ETag information ignored. Query: {:?}", - query - ); - return Ok((info, None)); } } } - match info.last() { - Some(part_info) => { - let pagination = Some(part_info.part_number); - Ok((info, pagination)) - } - None => Ok((info, None)), + // Cut the beginning if we have a marker + if let Some(marker) = &query.part_number_marker { + let next = marker + 1; + let part_idx = parts + .binary_search_by(|part| part.part_number.cmp(&next)) + .unwrap_or_else(|x| x); + parts = parts.split_off(part_idx); + } + + // Cut the end if we have too many parts + if parts.len() > query.max_parts as usize { + parts.truncate(query.max_parts as usize); + let pagination = Some(parts.last().unwrap().part_number); + return Ok((parts, pagination)); } + + Ok((parts, None)) } /* @@ -570,8 +527,8 @@ fn fetch_part_info( /// This key can be the prefix in the base case, or intermediate /// points in the dataset if we are continuing a previous listing. impl ListObjectsQuery { - fn build_accumulator(&self) -> Accumulator<String, ObjectInfo> { - Accumulator::<String, ObjectInfo>::new(self.common.page_size) + fn build_accumulator(&self) -> ObjectAccumulator { + ObjectAccumulator::new(self.common.page_size) } fn begin(&self) -> Result<RangeBegin, Error> { @@ -580,9 +537,10 @@ impl ListObjectsQuery { // In V2 mode, the continuation token is defined as an opaque // string in the spec, so we can do whatever we want with it. // In our case, it is defined as either [ or ] (for include + // or exclude), followed by a base64-encoded string // representing the key to start with. - (Some(token), _) => match &token[..1] { - "[" => Ok(RangeBegin::IncludingKey { + (Some(token), _) => match &token.get(..1) { + Some("[") => Ok(RangeBegin::IncludingKey { key: String::from_utf8( BASE64_STANDARD .decode(token[1..].as_bytes()) @@ -590,7 +548,7 @@ impl ListObjectsQuery { )?, fallback_key: None, }), - "]" => Ok(RangeBegin::AfterKey { + Some("]") => Ok(RangeBegin::AfterKey { key: String::from_utf8( BASE64_STANDARD .decode(token[1..].as_bytes()) @@ -631,8 +589,8 @@ impl ListObjectsQuery { } impl ListMultipartUploadsQuery { - fn build_accumulator(&self) -> Accumulator<Uuid, UploadInfo> { - Accumulator::<Uuid, UploadInfo>::new(self.common.page_size) + fn build_accumulator(&self) -> UploadAccumulator { + UploadAccumulator::new(self.common.page_size) } fn begin(&self) -> Result<RangeBegin, Error> { @@ -651,7 +609,7 @@ impl ListMultipartUploadsQuery { }), uuid => Ok(RangeBegin::AfterUpload { key: key_marker.to_string(), - upload: s3_put::decode_upload_id(uuid)?, + upload: s3_multipart::decode_upload_id(uuid)?, }), }, @@ -716,6 +674,7 @@ impl<K: std::cmp::Ord, V> Accumulator<K, V> { Some(p) => p, None => return None, }; + assert!(pfx.starts_with(&query.prefix)); // Try to register this prefix // If not possible, we can return early @@ -726,8 +685,11 @@ impl<K: std::cmp::Ord, V> Accumulator<K, V> { // We consume the whole common prefix from the iterator let mut last_pfx_key = &object.key; loop { - last_pfx_key = match objects.peek() { - Some(o) if o.key.starts_with(pfx) => &o.key, + match objects.peek() { + Some(o) if o.key.starts_with(pfx) => { + last_pfx_key = &o.key; + objects.next(); + } Some(_) => { return Some(ExtractionResult::Extracted { key: last_pfx_key.to_owned(), @@ -743,8 +705,6 @@ impl<K: std::cmp::Ord, V> Accumulator<K, V> { } } }; - - objects.next(); } } @@ -759,12 +719,11 @@ impl<K: std::cmp::Ord, V> Accumulator<K, V> { } // Otherwise, we need to check if we can add it - match self.is_full() { - true => false, - false => { - self.common_prefixes.insert(key); - true - } + if self.is_full() { + false + } else { + self.common_prefixes.insert(key); + true } } @@ -772,12 +731,11 @@ impl<K: std::cmp::Ord, V> Accumulator<K, V> { // It is impossible to add twice a key, this is an error assert!(!self.keys.contains_key(&key)); - match self.is_full() { - true => false, - false => { - self.keys.insert(key, value); - true - } + if self.is_full() { + false + } else { + self.keys.insert(key, value); + true } } } @@ -794,6 +752,7 @@ impl ExtractAccumulator for ObjectAccumulator { } let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); + assert!(object.key.starts_with(&query.prefix)); let version = match object.versions().iter().find(|x| x.is_data()) { Some(v) => v, @@ -843,7 +802,7 @@ impl ExtractAccumulator for UploadAccumulator { let mut uploads_for_key = object .versions() .iter() - .filter(|x| x.is_uploading()) + .filter(|x| x.is_uploading(Some(true))) .collect::<Vec<&ObjectVersion>>(); // S3 logic requires lexicographically sorted upload ids. @@ -918,14 +877,6 @@ impl ExtractAccumulator for UploadAccumulator { * Utility functions */ -/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable -fn into_ok_or_err<T>(r: Result<T, T>) -> T { - match r { - Ok(r) => r, - Err(r) => r, - } -} - /// Returns the common prefix of the object given the query prefix and delimiter fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> { match &query.delimiter { @@ -951,7 +902,6 @@ fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value { #[cfg(test)] mod tests { use super::*; - use garage_model::s3::version_table::*; use garage_util::*; use std::iter::FromIterator; @@ -991,10 +941,13 @@ mod tests { ObjectVersion { uuid: Uuid::from(uuid), timestamp: TS, - state: ObjectVersionState::Uploading(ObjectVersionHeaders { - content_type: "text/plain".to_string(), - other: BTreeMap::<String, String>::new(), - }), + state: ObjectVersionState::Uploading { + multipart: true, + headers: ObjectVersionHeaders { + content_type: "text/plain".to_string(), + other: BTreeMap::<String, String>::new(), + }, + }, } } @@ -1169,83 +1122,77 @@ mod tests { Ok(()) } - fn version() -> Version { + fn mpu() -> MultipartUpload { let uuid = Uuid::from([0x08; 32]); - let blocks = vec![ + let parts = vec![ ( - VersionBlockKey { + MpuPartKey { part_number: 1, - offset: 1, + timestamp: TS, }, - VersionBlock { - hash: uuid, - size: 3, + MpuPart { + version: uuid, + size: Some(3), + etag: Some("etag1".into()), }, ), ( - VersionBlockKey { - part_number: 1, - offset: 2, + MpuPartKey { + part_number: 2, + timestamp: TS, }, - VersionBlock { - hash: uuid, - size: 2, + MpuPart { + version: uuid, + size: None, + etag: None, }, ), ( - VersionBlockKey { - part_number: 2, - offset: 1, + MpuPartKey { + part_number: 3, + timestamp: TS, }, - VersionBlock { - hash: uuid, - size: 8, + MpuPart { + version: uuid, + size: Some(10), + etag: Some("etag2".into()), }, ), ( - VersionBlockKey { + MpuPartKey { part_number: 5, - offset: 1, + timestamp: TS, }, - VersionBlock { - hash: uuid, - size: 7, + MpuPart { + version: uuid, + size: Some(7), + etag: Some("etag3".into()), }, ), ( - VersionBlockKey { + MpuPartKey { part_number: 8, - offset: 1, + timestamp: TS, }, - VersionBlock { - hash: uuid, - size: 5, + MpuPart { + version: uuid, + size: Some(5), + etag: Some("etag4".into()), }, ), ]; - let etags = vec![ - (1, "etag1".to_string()), - (3, "etag2".to_string()), - (5, "etag3".to_string()), - (8, "etag4".to_string()), - (9, "etag5".to_string()), - ]; - Version { - bucket_id: uuid, - key: "a".to_string(), - uuid, + MultipartUpload { + upload_id: uuid, + timestamp: TS, deleted: false.into(), - blocks: crdt::Map::<VersionBlockKey, VersionBlock>::from_iter(blocks), - parts_etags: crdt::Map::<u64, String>::from_iter(etags), + parts: crdt::Map::<MpuPartKey, MpuPart>::from_iter(parts), + bucket_id: uuid, + key: "a".into(), } } - fn obj() -> Object { - Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])]) - } - #[test] fn test_fetch_part_info() -> Result<(), Error> { let uuid = Uuid::from([0x08; 32]); @@ -1258,82 +1205,85 @@ mod tests { max_parts: 2, }; - assert!( - fetch_part_info(&query, None, None, uuid).is_err(), - "No object and version should fail" - ); - assert!( - fetch_part_info(&query, Some(obj()), None, uuid).is_err(), - "No version should faild" - ); - assert!( - fetch_part_info(&query, None, Some(version()), uuid).is_err(), - "No object should fail" - ); + let mpu = mpu(); // Start from the beginning but with limited size to trigger pagination - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; - assert_eq!(pagination.unwrap(), 5); + let (info, pagination) = fetch_part_info(&query, &mpu)?; + assert_eq!(pagination.unwrap(), 3); assert_eq!( info, vec![ PartInfo { - etag: "etag1".to_string(), + etag: "etag1", timestamp: TS, part_number: 1, - size: 5 + size: 3 }, PartInfo { - etag: "etag3".to_string(), + etag: "etag2", timestamp: TS, - part_number: 5, - size: 7 + part_number: 3, + size: 10 }, ] ); // Use previous pagination to make a new request query.part_number_marker = Some(pagination.unwrap()); - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!( info, - vec![PartInfo { - etag: "etag4".to_string(), - timestamp: TS, - part_number: 8, - size: 5 - },] + vec![ + PartInfo { + etag: "etag3", + timestamp: TS, + part_number: 5, + size: 7 + }, + PartInfo { + etag: "etag4", + timestamp: TS, + part_number: 8, + size: 5 + }, + ] ); // Trying to access a part that is way larger than registered ones query.part_number_marker = Some(9999); - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!(info, vec![]); // Try without any limitation query.max_parts = 1000; query.part_number_marker = None; - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + let (info, pagination) = fetch_part_info(&query, &mpu)?; assert!(pagination.is_none()); assert_eq!( info, vec![ PartInfo { - etag: "etag1".to_string(), + etag: "etag1", timestamp: TS, part_number: 1, - size: 5 + size: 3 + }, + PartInfo { + etag: "etag2", + timestamp: TS, + part_number: 3, + size: 10 }, PartInfo { - etag: "etag3".to_string(), + etag: "etag3", timestamp: TS, part_number: 5, size: 7 }, PartInfo { - etag: "etag4".to_string(), + etag: "etag4", timestamp: TS, part_number: 8, size: 5 diff --git a/src/api/s3/mod.rs b/src/api/s3/mod.rs index 7b56d4d8..cbdb94ab 100644 --- a/src/api/s3/mod.rs +++ b/src/api/s3/mod.rs @@ -6,7 +6,9 @@ mod copy; pub mod cors; mod delete; pub mod get; +mod lifecycle; mod list; +mod multipart; mod post_object; mod put; mod website; diff --git a/src/api/s3/multipart.rs b/src/api/s3/multipart.rs new file mode 100644 index 00000000..6b786318 --- /dev/null +++ b/src/api/s3/multipart.rs @@ -0,0 +1,468 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use futures::prelude::*; +use hyper::body::Body; +use hyper::{Request, Response}; +use md5::{Digest as Md5Digest, Md5}; + +use garage_table::*; +use garage_util::async_hash::*; +use garage_util::data::*; + +use garage_model::bucket_table::Bucket; +use garage_model::garage::Garage; +use garage_model::s3::block_ref_table::*; +use garage_model::s3::mpu_table::*; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::s3::error::*; +use crate::s3::put::*; +use crate::s3::xml as s3_xml; +use crate::signature::verify_signed_content; + +// ---- + +pub async fn handle_create_multipart_upload( + garage: Arc<Garage>, + req: &Request<Body>, + bucket_name: &str, + bucket_id: Uuid, + key: &String, +) -> Result<Response<Body>, Error> { + let existing_object = garage.object_table.get(&bucket_id, &key).await?; + + let upload_id = gen_uuid(); + let timestamp = next_timestamp(existing_object.as_ref()); + + let headers = get_headers(req.headers())?; + + // Create object in object table + let object_version = ObjectVersion { + uuid: upload_id, + timestamp, + state: ObjectVersionState::Uploading { + multipart: true, + headers, + }, + }; + let object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&object).await?; + + // Create multipart upload in mpu table + // This multipart upload will hold references to uploaded parts + // (which are entries in the Version table) + let mpu = MultipartUpload::new(upload_id, timestamp, bucket_id, key.into(), false); + garage.mpu_table.insert(&mpu).await?; + + // Send success response + let result = s3_xml::InitiateMultipartUploadResult { + xmlns: (), + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key.to_string()), + upload_id: s3_xml::Value(hex::encode(upload_id)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_put_part( + garage: Arc<Garage>, + req: Request<Body>, + bucket_id: Uuid, + key: &str, + part_number: u64, + upload_id: &str, + content_sha256: Option<Hash>, +) -> Result<Response<Body>, Error> { + let upload_id = decode_upload_id(upload_id)?; + + let content_md5 = match req.headers().get("content-md5") { + Some(x) => Some(x.to_str()?.to_string()), + None => None, + }; + + // Read first chuck, and at the same time try to get object to see if it exists + let key = key.to_string(); + + let body = req.into_body().map_err(Error::from); + let mut chunker = StreamChunker::new(body, garage.config.block_size); + + let ((_, _, mut mpu), first_block) = futures::try_join!( + get_upload(&garage, &bucket_id, &key, &upload_id), + chunker.next(), + )?; + + // Check object is valid and part can be accepted + let first_block = first_block.ok_or_bad_request("Empty body")?; + + // Calculate part identity: timestamp, version id + let version_uuid = gen_uuid(); + let mpu_part_key = MpuPartKey { + part_number, + timestamp: mpu.next_timestamp(part_number), + }; + + // The following consists in many steps that can each fail. + // Keep track that some cleanup will be needed if things fail + // before everything is finished (cleanup is done using the Drop trait). + let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner { + garage: garage.clone(), + upload_id, + version_uuid, + })); + + // Create version and link version from MPU + mpu.parts.clear(); + mpu.parts.put( + mpu_part_key, + MpuPart { + version: version_uuid, + etag: None, + size: None, + }, + ); + garage.mpu_table.insert(&mpu).await?; + + let version = Version::new( + version_uuid, + VersionBacklink::MultipartUpload { upload_id }, + false, + ); + garage.version_table.insert(&version).await?; + + // Copy data to version + let first_block_hash = async_blake2sum(first_block.clone()).await; + + let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks( + &garage, + &version, + part_number, + first_block, + first_block_hash, + &mut chunker, + ) + .await?; + + // Verify that checksums map + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + )?; + + // Store part etag in version + let data_md5sum_hex = hex::encode(data_md5sum); + mpu.parts.put( + mpu_part_key, + MpuPart { + version: version_uuid, + etag: Some(data_md5sum_hex.clone()), + size: Some(total_size), + }, + ); + garage.mpu_table.insert(&mpu).await?; + + // We were not interrupted, everything went fine. + // We won't have to clean up on drop. + interrupted_cleanup.cancel(); + + let response = Response::builder() + .header("ETag", format!("\"{}\"", data_md5sum_hex)) + .body(Body::empty()) + .unwrap(); + Ok(response) +} + +struct InterruptedCleanup(Option<InterruptedCleanupInner>); +struct InterruptedCleanupInner { + garage: Arc<Garage>, + upload_id: Uuid, + version_uuid: Uuid, +} + +impl InterruptedCleanup { + fn cancel(&mut self) { + drop(self.0.take()); + } +} +impl Drop for InterruptedCleanup { + fn drop(&mut self) { + if let Some(info) = self.0.take() { + tokio::spawn(async move { + let version = Version::new( + info.version_uuid, + VersionBacklink::MultipartUpload { + upload_id: info.upload_id, + }, + true, + ); + if let Err(e) = info.garage.version_table.insert(&version).await { + warn!("Cannot cleanup after aborted UploadPart: {}", e); + } + }); + } + } +} + +pub async fn handle_complete_multipart_upload( + garage: Arc<Garage>, + req: Request<Body>, + bucket_name: &str, + bucket: &Bucket, + key: &str, + upload_id: &str, + content_sha256: Option<Hash>, +) -> Result<Response<Body>, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; + let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) + .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; + debug!( + "CompleteMultipartUpload list of parts: {:?}", + body_list_of_parts + ); + + let upload_id = decode_upload_id(upload_id)?; + + // Get object and multipart upload + let key = key.to_string(); + let (object, mut object_version, mpu) = + get_upload(&garage, &bucket.id, &key, &upload_id).await?; + + if mpu.parts.is_empty() { + return Err(Error::bad_request("No data was uploaded")); + } + + let headers = match object_version.state { + ObjectVersionState::Uploading { headers, .. } => headers, + _ => unreachable!(), + }; + + // Check that part numbers are an increasing sequence. + // (it doesn't need to start at 1 nor to be a continuous sequence, + // see discussion in #192) + if body_list_of_parts.is_empty() { + return Err(Error::EntityTooSmall); + } + if !body_list_of_parts + .iter() + .zip(body_list_of_parts.iter().skip(1)) + .all(|(p1, p2)| p1.part_number < p2.part_number) + { + return Err(Error::InvalidPartOrder); + } + + // Check that the list of parts they gave us corresponds to parts we have here + debug!("Parts stored in multipart upload: {:?}", mpu.parts.items()); + let mut have_parts = HashMap::new(); + for (pk, pv) in mpu.parts.items().iter() { + have_parts.insert(pk.part_number, pv); + } + let mut parts = vec![]; + for req_part in body_list_of_parts.iter() { + match have_parts.get(&req_part.part_number) { + Some(part) if part.etag.as_ref() == Some(&req_part.etag) && part.size.is_some() => { + parts.push(*part) + } + _ => return Err(Error::InvalidPart), + } + } + + let grg = &garage; + let parts_versions = futures::future::try_join_all(parts.iter().map(|p| async move { + grg.version_table + .get(&p.version, &EmptyKey) + .await? + .ok_or_internal_error("Part version missing from version table") + })) + .await?; + + // Create final version and block refs + let mut final_version = Version::new( + upload_id, + VersionBacklink::Object { + bucket_id: bucket.id, + key: key.to_string(), + }, + false, + ); + for (part_number, part_version) in parts_versions.iter().enumerate() { + if part_version.deleted.get() { + return Err(Error::InvalidPart); + } + for (vbk, vb) in part_version.blocks.items().iter() { + final_version.blocks.put( + VersionBlockKey { + part_number: (part_number + 1) as u64, + offset: vbk.offset, + }, + *vb, + ); + } + } + garage.version_table.insert(&final_version).await?; + + let block_refs = final_version.blocks.items().iter().map(|(_, b)| BlockRef { + block: b.hash, + version: upload_id, + deleted: false.into(), + }); + garage.block_ref_table.insert_many(block_refs).await?; + + // Calculate etag of final object + // To understand how etags are calculated, read more here: + // https://teppen.io/2018/06/23/aws_s3_etags/ + let mut etag_md5_hasher = Md5::new(); + for part in parts.iter() { + etag_md5_hasher.update(part.etag.as_ref().unwrap().as_bytes()); + } + let etag = format!( + "{}-{}", + hex::encode(etag_md5_hasher.finalize()), + parts.len() + ); + + // Calculate total size of final object + let total_size = parts.iter().map(|x| x.size.unwrap()).sum(); + + if let Err(e) = check_quotas(&garage, bucket, total_size, Some(&object)).await { + object_version.state = ObjectVersionState::Aborted; + let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + return Err(e); + } + + // Write final object version + object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + ObjectVersionMeta { + headers, + size: total_size, + etag: etag.clone(), + }, + final_version.blocks.items()[0].1.hash, + )); + + let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + // Send response saying ok we're done + let result = s3_xml::CompleteMultipartUploadResult { + xmlns: (), + location: None, + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key), + etag: s3_xml::Value(format!("\"{}\"", etag)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_abort_multipart_upload( + garage: Arc<Garage>, + bucket_id: Uuid, + key: &str, + upload_id: &str, +) -> Result<Response<Body>, Error> { + let upload_id = decode_upload_id(upload_id)?; + + let (_, mut object_version, _) = + get_upload(&garage, &bucket_id, &key.to_string(), &upload_id).await?; + + object_version.state = ObjectVersionState::Aborted; + let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + Ok(Response::new(Body::from(vec![]))) +} + +// ======== helpers ============ + +#[allow(clippy::ptr_arg)] +pub(crate) async fn get_upload( + garage: &Garage, + bucket_id: &Uuid, + key: &String, + upload_id: &Uuid, +) -> Result<(Object, ObjectVersion, MultipartUpload), Error> { + let (object, mpu) = futures::try_join!( + garage.object_table.get(bucket_id, key).map_err(Error::from), + garage + .mpu_table + .get(upload_id, &EmptyKey) + .map_err(Error::from), + )?; + + let object = object.ok_or(Error::NoSuchUpload)?; + let mpu = mpu.ok_or(Error::NoSuchUpload)?; + + let object_version = object + .versions() + .iter() + .find(|v| v.uuid == *upload_id && v.is_uploading(Some(true))) + .ok_or(Error::NoSuchUpload)? + .clone(); + + Ok((object, object_version, mpu)) +} + +pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> { + let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?; + if id_bin.len() != 32 { + return Err(Error::NoSuchUpload); + } + let mut uuid = [0u8; 32]; + uuid.copy_from_slice(&id_bin[..]); + Ok(Uuid::from(uuid)) +} + +#[derive(Debug)] +struct CompleteMultipartUploadPart { + etag: String, + part_number: u64, +} + +fn parse_complete_multipart_upload_body( + xml: &roxmltree::Document, +) -> Option<Vec<CompleteMultipartUploadPart>> { + let mut parts = vec![]; + + let root = xml.root(); + let cmu = root.first_child()?; + if !cmu.has_tag_name("CompleteMultipartUpload") { + return None; + } + + for item in cmu.children() { + // Only parse <Part> nodes + if !item.is_element() { + continue; + } + + if item.has_tag_name("Part") { + let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?; + let part_number = item + .children() + .find(|e| e.has_tag_name("PartNumber"))? + .text()?; + parts.push(CompleteMultipartUploadPart { + etag: etag.trim_matches('"').to_string(), + part_number: part_number.parse().ok()?, + }); + } else { + return None; + } + } + + Some(parts) +} diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index 350ab884..606facc4 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -1,8 +1,9 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use base64::prelude::*; use futures::prelude::*; +use futures::try_join; use hyper::body::{Body, Bytes}; use hyper::header::{HeaderMap, HeaderValue}; use hyper::{Request, Response}; @@ -30,14 +31,12 @@ use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; use crate::s3::error::*; -use crate::s3::xml as s3_xml; -use crate::signature::verify_signed_content; pub async fn handle_put( garage: Arc<Garage>, req: Request<Body>, bucket: &Bucket, - key: &str, + key: &String, content_sha256: Option<Hash>, ) -> Result<Response<Body>, Error> { // Retrieve interesting headers from request @@ -70,16 +69,24 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( headers: ObjectVersionHeaders, body: S, bucket: &Bucket, - key: &str, + key: &String, content_md5: Option<String>, content_sha256: Option<FixedBytes32>, ) -> Result<(Uuid, String), Error> { + let mut chunker = StreamChunker::new(body, garage.config.block_size); + let (first_block_opt, existing_object) = try_join!( + chunker.next(), + garage + .object_table + .get(&bucket.id, key) + .map_err(Error::from), + )?; + + let first_block = first_block_opt.unwrap_or_default(); + // Generate identity of new version let version_uuid = gen_uuid(); - let version_timestamp = now_msec(); - - let mut chunker = StreamChunker::new(body, garage.config.block_size); - let first_block = chunker.next().await?.unwrap_or_default(); + let version_timestamp = next_timestamp(existing_object.as_ref()); // If body is small enough, store it directly in the object table // as "inline data". We can then return immediately. @@ -99,7 +106,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( content_sha256, )?; - check_quotas(&garage, bucket, key, size).await?; + check_quotas(&garage, bucket, size, existing_object.as_ref()).await?; let object_version = ObjectVersion { uuid: version_uuid, @@ -123,20 +130,23 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( // The following consists in many steps that can each fail. // Keep track that some cleanup will be needed if things fail // before everything is finished (cleanup is done using the Drop trait). - let mut interrupted_cleanup = InterruptedCleanup(Some(( - garage.clone(), - bucket.id, - key.into(), + let mut interrupted_cleanup = InterruptedCleanup(Some(InterruptedCleanupInner { + garage: garage.clone(), + bucket_id: bucket.id, + key: key.into(), version_uuid, version_timestamp, - ))); + })); // Write version identifier in object table so that we have a trace // that we are uploading something let mut object_version = ObjectVersion { uuid: version_uuid, timestamp: version_timestamp, - state: ObjectVersionState::Uploading(headers.clone()), + state: ObjectVersionState::Uploading { + headers: headers.clone(), + multipart: false, + }, }; let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); garage.object_table.insert(&object).await?; @@ -145,7 +155,14 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( // Write this entry now, even with empty block list, // to prevent block_ref entries from being deleted (they can be deleted // if the reference a version that isn't found in the version table) - let version = Version::new(version_uuid, bucket.id, key.into(), false); + let version = Version::new( + version_uuid, + VersionBacklink::Object { + bucket_id: bucket.id, + key: key.into(), + }, + false, + ); garage.version_table.insert(&version).await?; // Transfer data and verify checksum @@ -168,7 +185,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( content_sha256, )?; - check_quotas(&garage, bucket, key, total_size).await?; + check_quotas(&garage, bucket, total_size, existing_object.as_ref()).await?; // Save final object state, marked as Complete let md5sum_hex = hex::encode(data_md5sum); @@ -192,7 +209,7 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>( /// Validate MD5 sum against content-md5 header /// and sha256sum against signed content-sha256 -fn ensure_checksum_matches( +pub(crate) fn ensure_checksum_matches( data_md5sum: &[u8], data_sha256sum: garage_util::data::FixedBytes32, content_md5: Option<&str>, @@ -218,22 +235,22 @@ fn ensure_checksum_matches( } /// Check that inserting this object with this size doesn't exceed bucket quotas -async fn check_quotas( +pub(crate) async fn check_quotas( garage: &Arc<Garage>, bucket: &Bucket, - key: &str, size: u64, + prev_object: Option<&Object>, ) -> Result<(), Error> { let quotas = bucket.state.as_option().unwrap().quotas.get(); if quotas.max_objects.is_none() && quotas.max_size.is_none() { return Ok(()); }; - let key = key.to_string(); - let (prev_object, counters) = futures::try_join!( - garage.object_table.get(&bucket.id, &key), - garage.object_counter_table.table.get(&bucket.id, &EmptyKey), - )?; + let counters = garage + .object_counter_table + .table + .get(&bucket.id, &EmptyKey) + .await?; let counters = counters .map(|x| x.filtered_values(&garage.system.ring.borrow())) @@ -267,7 +284,7 @@ async fn check_quotas( if cnt_size_diff > 0 && current_size + cnt_size_diff > ms as i64 { return Err(Error::forbidden(format!( "Bucket size quota is reached, maximum total size of objects for this bucket: {}. The bucket is already {} bytes, and this object would add {} bytes.", - ms, current_size, size + ms, current_size, cnt_size_diff ))); } } @@ -275,7 +292,7 @@ async fn check_quotas( Ok(()) } -async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( +pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> + Unpin>( garage: &Garage, version: &Version, part_number: u64, @@ -381,7 +398,7 @@ async fn put_block_meta( Ok(()) } -struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> { +pub(crate) struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> { stream: S, read_all: bool, block_size: usize, @@ -389,7 +406,7 @@ struct StreamChunker<S: Stream<Item = Result<Bytes, Error>>> { } impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> { - fn new(stream: S, block_size: usize) -> Self { + pub(crate) fn new(stream: S, block_size: usize) -> Self { Self { stream, read_all: false, @@ -398,7 +415,7 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> { } } - async fn next(&mut self) -> Result<Option<Bytes>, Error> { + pub(crate) async fn next(&mut self) -> Result<Option<Bytes>, Error> { while !self.read_all && self.buf.len() < self.block_size { if let Some(block) = self.stream.next().await { let bytes = block?; @@ -425,7 +442,14 @@ pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> { .unwrap() } -struct InterruptedCleanup(Option<(Arc<Garage>, Uuid, String, Uuid, u64)>); +struct InterruptedCleanup(Option<InterruptedCleanupInner>); +struct InterruptedCleanupInner { + garage: Arc<Garage>, + bucket_id: Uuid, + key: String, + version_uuid: Uuid, + version_timestamp: u64, +} impl InterruptedCleanup { fn cancel(&mut self) { @@ -434,15 +458,15 @@ impl InterruptedCleanup { } impl Drop for InterruptedCleanup { fn drop(&mut self) { - if let Some((garage, bucket_id, key, version_uuid, version_ts)) = self.0.take() { + if let Some(info) = self.0.take() { tokio::spawn(async move { let object_version = ObjectVersion { - uuid: version_uuid, - timestamp: version_ts, + uuid: info.version_uuid, + timestamp: info.version_timestamp, state: ObjectVersionState::Aborted, }; - let object = Object::new(bucket_id, key, vec![object_version]); - if let Err(e) = garage.object_table.insert(&object).await { + let object = Object::new(info.bucket_id, info.key, vec![object_version]); + if let Err(e) = info.garage.object_table.insert(&object).await { warn!("Cannot cleanup after aborted PutObject: {}", e); } }); @@ -450,326 +474,9 @@ impl Drop for InterruptedCleanup { } } -// ---- - -pub async fn handle_create_multipart_upload( - garage: Arc<Garage>, - req: &Request<Body>, - bucket_name: &str, - bucket_id: Uuid, - key: &str, -) -> Result<Response<Body>, Error> { - let version_uuid = gen_uuid(); - let headers = get_headers(req.headers())?; - - // Create object in object table - let object_version = ObjectVersion { - uuid: version_uuid, - timestamp: now_msec(), - state: ObjectVersionState::Uploading(headers), - }; - let object = Object::new(bucket_id, key.to_string(), vec![object_version]); - garage.object_table.insert(&object).await?; - - // Insert empty version so that block_ref entries refer to something - // (they are inserted concurrently with blocks in the version table, so - // there is the possibility that they are inserted before the version table - // is created, in which case it is allowed to delete them, e.g. in repair_*) - let version = Version::new(version_uuid, bucket_id, key.into(), false); - garage.version_table.insert(&version).await?; - - // Send success response - let result = s3_xml::InitiateMultipartUploadResult { - xmlns: (), - bucket: s3_xml::Value(bucket_name.to_string()), - key: s3_xml::Value(key.to_string()), - upload_id: s3_xml::Value(hex::encode(version_uuid)), - }; - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::new(Body::from(xml.into_bytes()))) -} - -pub async fn handle_put_part( - garage: Arc<Garage>, - req: Request<Body>, - bucket_id: Uuid, - key: &str, - part_number: u64, - upload_id: &str, - content_sha256: Option<Hash>, -) -> Result<Response<Body>, Error> { - let version_uuid = decode_upload_id(upload_id)?; - - let content_md5 = match req.headers().get("content-md5") { - Some(x) => Some(x.to_str()?.to_string()), - None => None, - }; - - // Read first chuck, and at the same time try to get object to see if it exists - let key = key.to_string(); - - let body = req.into_body().map_err(Error::from); - let mut chunker = StreamChunker::new(body, garage.config.block_size); - - let (object, version, first_block) = futures::try_join!( - garage - .object_table - .get(&bucket_id, &key) - .map_err(Error::from), - garage - .version_table - .get(&version_uuid, &EmptyKey) - .map_err(Error::from), - chunker.next(), - )?; - - // Check object is valid and multipart block can be accepted - let first_block = first_block.ok_or_bad_request("Empty body")?; - let object = object.ok_or_bad_request("Object not found")?; - - if !object - .versions() - .iter() - .any(|v| v.uuid == version_uuid && v.is_uploading()) - { - return Err(Error::NoSuchUpload); - } - - // Check part hasn't already been uploaded - if let Some(v) = version { - if v.has_part_number(part_number) { - return Err(Error::bad_request(format!( - "Part number {} has already been uploaded", - part_number - ))); - } - } - - // Copy block to store - let version = Version::new(version_uuid, bucket_id, key, false); - - let first_block_hash = async_blake2sum(first_block.clone()).await; - - let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( - &garage, - &version, - part_number, - first_block, - first_block_hash, - &mut chunker, - ) - .await?; - - // Verify that checksums map - ensure_checksum_matches( - data_md5sum.as_slice(), - data_sha256sum, - content_md5.as_deref(), - content_sha256, - )?; - - // Store part etag in version - let data_md5sum_hex = hex::encode(data_md5sum); - let mut version = version; - version - .parts_etags - .put(part_number, data_md5sum_hex.clone()); - garage.version_table.insert(&version).await?; - - let response = Response::builder() - .header("ETag", format!("\"{}\"", data_md5sum_hex)) - .body(Body::empty()) - .unwrap(); - Ok(response) -} - -pub async fn handle_complete_multipart_upload( - garage: Arc<Garage>, - req: Request<Body>, - bucket_name: &str, - bucket: &Bucket, - key: &str, - upload_id: &str, - content_sha256: Option<Hash>, -) -> Result<Response<Body>, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; - let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) - .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; - debug!( - "CompleteMultipartUpload list of parts: {:?}", - body_list_of_parts - ); - - let version_uuid = decode_upload_id(upload_id)?; - - // Get object and version - let key = key.to_string(); - let (object, version) = futures::try_join!( - garage.object_table.get(&bucket.id, &key), - garage.version_table.get(&version_uuid, &EmptyKey), - )?; - - let object = object.ok_or(Error::NoSuchKey)?; - let mut object_version = object - .versions() - .iter() - .find(|v| v.uuid == version_uuid && v.is_uploading()) - .cloned() - .ok_or(Error::NoSuchUpload)?; - - let version = version.ok_or(Error::NoSuchKey)?; - if version.blocks.is_empty() { - return Err(Error::bad_request("No data was uploaded")); - } - - let headers = match object_version.state { - ObjectVersionState::Uploading(headers) => headers, - _ => unreachable!(), - }; - - // Check that part numbers are an increasing sequence. - // (it doesn't need to start at 1 nor to be a continuous sequence, - // see discussion in #192) - if body_list_of_parts.is_empty() { - return Err(Error::EntityTooSmall); - } - if !body_list_of_parts - .iter() - .zip(body_list_of_parts.iter().skip(1)) - .all(|(p1, p2)| p1.part_number < p2.part_number) - { - return Err(Error::InvalidPartOrder); - } - - // Garage-specific restriction, see #204: part numbers must be - // consecutive starting at 1 - if body_list_of_parts[0].part_number != 1 - || !body_list_of_parts - .iter() - .zip(body_list_of_parts.iter().skip(1)) - .all(|(p1, p2)| p1.part_number + 1 == p2.part_number) - { - return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into())); - } - - // Check that the list of parts they gave us corresponds to the parts we have here - debug!("Expected parts from request: {:?}", body_list_of_parts); - debug!("Parts stored in version: {:?}", version.parts_etags.items()); - let parts = version - .parts_etags - .items() - .iter() - .map(|pair| (&pair.0, &pair.1)); - let same_parts = body_list_of_parts - .iter() - .map(|x| (&x.part_number, &x.etag)) - .eq(parts); - if !same_parts { - return Err(Error::InvalidPart); - } - - // Check that all blocks belong to one of the parts - let block_parts = version - .blocks - .items() - .iter() - .map(|(bk, _)| bk.part_number) - .collect::<BTreeSet<_>>(); - let same_parts = body_list_of_parts - .iter() - .map(|x| x.part_number) - .eq(block_parts.into_iter()); - if !same_parts { - return Err(Error::bad_request( - "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again." - )); - } - - // Calculate etag of final object - // To understand how etags are calculated, read more here: - // https://teppen.io/2018/06/23/aws_s3_etags/ - let num_parts = body_list_of_parts.len(); - let mut etag_md5_hasher = Md5::new(); - for (_, etag) in version.parts_etags.items().iter() { - etag_md5_hasher.update(etag.as_bytes()); - } - let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts); - - // Calculate total size of final object - let total_size = version.blocks.items().iter().map(|x| x.1.size).sum(); - - if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await { - object_version.state = ObjectVersionState::Aborted; - let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); - garage.object_table.insert(&final_object).await?; - - return Err(e); - } - - // Write final object version - object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( - ObjectVersionMeta { - headers, - size: total_size, - etag: etag.clone(), - }, - version.blocks.items()[0].1.hash, - )); - - let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); - garage.object_table.insert(&final_object).await?; - - // Send response saying ok we're done - let result = s3_xml::CompleteMultipartUploadResult { - xmlns: (), - location: None, - bucket: s3_xml::Value(bucket_name.to_string()), - key: s3_xml::Value(key), - etag: s3_xml::Value(format!("\"{}\"", etag)), - }; - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::new(Body::from(xml.into_bytes()))) -} - -pub async fn handle_abort_multipart_upload( - garage: Arc<Garage>, - bucket_id: Uuid, - key: &str, - upload_id: &str, -) -> Result<Response<Body>, Error> { - let version_uuid = decode_upload_id(upload_id)?; - - let object = garage - .object_table - .get(&bucket_id, &key.to_string()) - .await?; - let object = object.ok_or(Error::NoSuchKey)?; - - let object_version = object - .versions() - .iter() - .find(|v| v.uuid == version_uuid && v.is_uploading()); - let mut object_version = match object_version { - None => return Err(Error::NoSuchUpload), - Some(x) => x.clone(), - }; +// ============ helpers ============ - object_version.state = ObjectVersionState::Aborted; - let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); - garage.object_table.insert(&final_object).await?; - - Ok(Response::new(Body::from(vec![]))) -} - -fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> { +pub(crate) fn get_mime_type(headers: &HeaderMap<HeaderValue>) -> Result<String, Error> { Ok(headers .get(hyper::header::CONTENT_TYPE) .map(|x| x.to_str()) @@ -822,53 +529,10 @@ pub(crate) fn get_headers(headers: &HeaderMap<HeaderValue>) -> Result<ObjectVers }) } -pub fn decode_upload_id(id: &str) -> Result<Uuid, Error> { - let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?; - if id_bin.len() != 32 { - return Err(Error::NoSuchUpload); - } - let mut uuid = [0u8; 32]; - uuid.copy_from_slice(&id_bin[..]); - Ok(Uuid::from(uuid)) -} - -#[derive(Debug)] -struct CompleteMultipartUploadPart { - etag: String, - part_number: u64, -} - -fn parse_complete_multipart_upload_body( - xml: &roxmltree::Document, -) -> Option<Vec<CompleteMultipartUploadPart>> { - let mut parts = vec![]; - - let root = xml.root(); - let cmu = root.first_child()?; - if !cmu.has_tag_name("CompleteMultipartUpload") { - return None; - } - - for item in cmu.children() { - // Only parse <Part> nodes - if !item.is_element() { - continue; - } - - if item.has_tag_name("Part") { - let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?; - let part_number = item - .children() - .find(|e| e.has_tag_name("PartNumber"))? - .text()?; - parts.push(CompleteMultipartUploadPart { - etag: etag.trim_matches('"').to_string(), - part_number: part_number.parse().ok()?, - }); - } else { - return None; - } - } - - Some(parts) +pub(crate) fn next_timestamp(existing_object: Option<&Object>) -> u64 { + existing_object + .as_ref() + .and_then(|obj| obj.versions().iter().map(|v| v.timestamp).max()) + .map(|t| std::cmp::max(t + 1, now_msec())) + .unwrap_or_else(now_msec) } diff --git a/src/api/s3/website.rs b/src/api/s3/website.rs index 77738971..7f2ab925 100644 --- a/src/api/s3/website.rs +++ b/src/api/s3/website.rs @@ -43,14 +43,11 @@ pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error pub async fn handle_delete_website( garage: Arc<Garage>, - bucket_id: Uuid, + mut bucket: Bucket, ) -> Result<Response<Body>, Error> { - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let param = bucket.params_mut().unwrap(); + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; param.website_config.update(None); garage.bucket_table.insert(&bucket).await?; @@ -62,7 +59,7 @@ pub async fn handle_delete_website( pub async fn handle_put_website( garage: Arc<Garage>, - bucket_id: Uuid, + mut bucket: Bucket, req: Request<Body>, content_sha256: Option<Hash>, ) -> Result<Response<Body>, Error> { @@ -72,12 +69,9 @@ pub async fn handle_put_website( verify_signed_content(content_sha256, &body[..])?; } - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - - let param = bucket.params_mut().unwrap(); + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; conf.validate()?; diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml index df16959b..f6aa5f64 100644 --- a/src/block/Cargo.toml +++ b/src/block/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_block" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -24,6 +24,7 @@ opentelemetry = "0.17" arc-swap = "1.5" async-trait = "0.1.7" bytes = "1.0" +bytesize = "1.2" hex = "0.4" tracing = "0.1" rand = "0.8" diff --git a/src/block/block.rs b/src/block/block.rs index 935aa900..20f57aa5 100644 --- a/src/block/block.rs +++ b/src/block/block.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use bytes::Bytes; use serde::{Deserialize, Serialize}; use zstd::stream::{decode_all as zstd_decode, Encoder}; @@ -19,6 +21,14 @@ pub enum DataBlock { Compressed(Bytes), } +#[derive(Debug)] +pub enum DataBlockPath { + /// Uncompressed data fail + Plain(PathBuf), + /// Compressed data fail + Compressed(PathBuf), +} + impl DataBlock { /// Query whether this block is compressed pub fn is_compressed(&self) -> bool { diff --git a/src/block/layout.rs b/src/block/layout.rs new file mode 100644 index 00000000..e8339405 --- /dev/null +++ b/src/block/layout.rs @@ -0,0 +1,337 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +use garage_util::config::DataDirEnum; +use garage_util::data::Hash; +use garage_util::error::{Error, OkOrMessage}; +use garage_util::migrate::*; + +type Idx = u16; + +const DRIVE_NPART: usize = 1024; + +const HASH_DRIVE_BYTES: (usize, usize) = (2, 3); + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub(crate) struct DataLayout { + pub(crate) data_dirs: Vec<DataDir>, + + /// Primary storage location (index in data_dirs) for each partition + /// = the location where the data is supposed to be, blocks are always + /// written there (copies in other dirs may be deleted if they exist) + pub(crate) part_prim: Vec<Idx>, + /// Secondary storage locations for each partition = locations + /// where data blocks might be, we check from these dirs when reading + pub(crate) part_sec: Vec<Vec<Idx>>, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)] +pub(crate) struct DataDir { + pub(crate) path: PathBuf, + pub(crate) state: DataDirState, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)] +pub(crate) enum DataDirState { + Active { capacity: u64 }, + ReadOnly, +} + +impl DataLayout { + pub(crate) fn initialize(dirs: &DataDirEnum) -> Result<Self, Error> { + let data_dirs = make_data_dirs(dirs)?; + + // Split partitions proportionnally to capacity for all drives + // to affect primary storage location + let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>(); + assert!(total_cap > 0); + + let mut part_prim = Vec::with_capacity(DRIVE_NPART); + let mut cum_cap = 0; + for (i, dd) in data_dirs.iter().enumerate() { + if let DataDirState::Active { capacity } = dd.state { + cum_cap += capacity; + let n_total = (cum_cap * DRIVE_NPART as u64) / total_cap; + part_prim.resize(n_total as usize, i as Idx); + } + } + assert_eq!(cum_cap, total_cap); + assert_eq!(part_prim.len(), DRIVE_NPART); + + // If any of the storage locations is non-empty, it probably existed before + // this algorithm was added, so add it as a secondary storage location for all partitions + // to make sure existing files are not lost + let mut part_sec = vec![vec![]; DRIVE_NPART]; + for (i, dd) in data_dirs.iter().enumerate() { + if dir_not_empty(&dd.path)? { + for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) { + if *prim != i as Idx { + sec.push(i as Idx); + } + } + } + } + + Ok(Self { + data_dirs, + part_prim, + part_sec, + }) + } + + pub(crate) fn update(&mut self, dirs: &DataDirEnum) -> Result<(), Error> { + // Make list of new data directories, exit if nothing changed + let data_dirs = make_data_dirs(dirs)?; + if data_dirs == self.data_dirs { + return Ok(()); + } + + let total_cap = data_dirs.iter().filter_map(|x| x.capacity()).sum::<u64>(); + assert!(total_cap > 0); + + // Compute mapping of old indices to new indices + let old2new = self + .data_dirs + .iter() + .map(|x| { + data_dirs + .iter() + .position(|y| y.path == x.path) + .map(|x| x as Idx) + }) + .collect::<Vec<_>>(); + + // Compute secondary location list for partitions based on existing + // folders, translating indices from old to new + let mut part_sec = self + .part_sec + .iter() + .map(|dl| { + dl.iter() + .filter_map(|old| old2new.get(*old as usize).copied().flatten()) + .collect::<Vec<_>>() + }) + .collect::<Vec<_>>(); + + // Compute a vector that, for each data dir, + // contains the list of partitions primarily stored on that drive + let mut dir_prim = vec![vec![]; data_dirs.len()]; + for (ipart, prim) in self.part_prim.iter().enumerate() { + if let Some(new) = old2new.get(*prim as usize).copied().flatten() { + dir_prim[new as usize].push(ipart); + } + } + + // Compute the target number of partitions per data directory + let mut cum_cap = 0; + let mut npart_per_dir = vec![0; data_dirs.len()]; + for (idir, dd) in data_dirs.iter().enumerate() { + if let DataDirState::Active { capacity } = dd.state { + let begin = (cum_cap * DRIVE_NPART as u64) / total_cap; + cum_cap += capacity; + let end = (cum_cap * DRIVE_NPART as u64) / total_cap; + npart_per_dir[idir] = (end - begin) as usize; + } + } + assert_eq!(cum_cap, total_cap); + assert_eq!(npart_per_dir.iter().sum::<usize>(), DRIVE_NPART); + + // For all directories that have too many primary partitions, + // move that partition to secondary + for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate() + { + while parts.len() > *tgt_npart { + let part = parts.pop().unwrap(); + if !part_sec[part].contains(&(idir as Idx)) { + part_sec[part].push(idir as Idx); + } + } + } + + // Calculate the vector of primary partition dir index + let mut part_prim = vec![None; DRIVE_NPART]; + for (idir, parts) in dir_prim.iter().enumerate() { + for part in parts.iter() { + assert!(part_prim[*part].is_none()); + part_prim[*part] = Some(idir as Idx) + } + } + + // Calculate a vector of unassigned partitions + let mut unassigned = part_prim + .iter() + .enumerate() + .filter(|(_, dir)| dir.is_none()) + .map(|(ipart, _)| ipart) + .collect::<Vec<_>>(); + + // For all directories that don't have enough primary partitions, + // add partitions from unassigned + for (idir, (parts, tgt_npart)) in dir_prim.iter_mut().zip(npart_per_dir.iter()).enumerate() + { + if parts.len() < *tgt_npart { + let required = *tgt_npart - parts.len(); + assert!(unassigned.len() >= required); + for _ in 0..required { + let new_part = unassigned.pop().unwrap(); + part_prim[new_part] = Some(idir as Idx); + part_sec[new_part].retain(|x| *x != idir as Idx); + } + } + } + + // Sanity checks + assert!(part_prim.iter().all(|x| x.is_some())); + assert!(unassigned.is_empty()); + + // Transform part_prim from vec of Option<Idx> to vec of Idx + let part_prim = part_prim + .into_iter() + .map(|x| x.unwrap()) + .collect::<Vec<_>>(); + assert!(part_prim.iter().all(|p| data_dirs + .get(*p as usize) + .and_then(|x| x.capacity()) + .unwrap_or(0) + > 0)); + + // If any of the newly added storage locations is non-empty, + // it might have been removed and added again and might contain data, + // so add it as a secondary storage location for all partitions + // to make sure existing files are not lost + for (i, dd) in data_dirs.iter().enumerate() { + if self.data_dirs.iter().any(|ed| ed.path == dd.path) { + continue; + } + if dir_not_empty(&dd.path)? { + for (sec, prim) in part_sec.iter_mut().zip(part_prim.iter()) { + if *prim != i as Idx && !sec.contains(&(i as Idx)) { + sec.push(i as Idx); + } + } + } + } + + // Apply newly generated config + *self = Self { + data_dirs, + part_prim, + part_sec, + }; + Ok(()) + } + + pub(crate) fn primary_block_dir(&self, hash: &Hash) -> PathBuf { + let ipart = self.partition_from(hash); + let idir = self.part_prim[ipart] as usize; + self.block_dir_from(hash, &self.data_dirs[idir].path) + } + + pub(crate) fn secondary_block_dirs<'a>( + &'a self, + hash: &'a Hash, + ) -> impl Iterator<Item = PathBuf> + 'a { + let ipart = self.partition_from(hash); + self.part_sec[ipart] + .iter() + .map(move |idir| self.block_dir_from(hash, &self.data_dirs[*idir as usize].path)) + } + + fn partition_from(&self, hash: &Hash) -> usize { + u16::from_be_bytes([ + hash.as_slice()[HASH_DRIVE_BYTES.0], + hash.as_slice()[HASH_DRIVE_BYTES.1], + ]) as usize % DRIVE_NPART + } + + fn block_dir_from(&self, hash: &Hash, dir: &PathBuf) -> PathBuf { + let mut path = dir.clone(); + path.push(hex::encode(&hash.as_slice()[0..1])); + path.push(hex::encode(&hash.as_slice()[1..2])); + path + } + + pub(crate) fn without_secondary_locations(&self) -> Self { + Self { + data_dirs: self.data_dirs.clone(), + part_prim: self.part_prim.clone(), + part_sec: self.part_sec.iter().map(|_| vec![]).collect::<Vec<_>>(), + } + } +} + +impl InitialFormat for DataLayout { + const VERSION_MARKER: &'static [u8] = b"G09bmdl"; +} + +impl DataDir { + pub fn capacity(&self) -> Option<u64> { + match self.state { + DataDirState::Active { capacity } => Some(capacity), + _ => None, + } + } +} + +fn make_data_dirs(dirs: &DataDirEnum) -> Result<Vec<DataDir>, Error> { + let mut data_dirs = vec![]; + match dirs { + DataDirEnum::Single(path) => data_dirs.push(DataDir { + path: path.clone(), + state: DataDirState::Active { + capacity: 1_000_000_000, // whatever, doesn't matter + }, + }), + DataDirEnum::Multiple(dirs) => { + let mut ok = false; + for dir in dirs.iter() { + let state = match &dir.capacity { + Some(cap) if dir.read_only == false => { + let capacity = cap.parse::<bytesize::ByteSize>() + .ok_or_message("invalid capacity value")?.as_u64(); + if capacity == 0 { + return Err(Error::Message(format!("data directory {} should have non-zero capacity", dir.path.to_string_lossy()))); + } + ok = true; + DataDirState::Active { + capacity, + } + } + None if dir.read_only == true => { + DataDirState::ReadOnly + } + _ => return Err(Error::Message(format!("data directories in data_dir should have a capacity value or be marked read_only, not the case for {}", dir.path.to_string_lossy()))), + }; + data_dirs.push(DataDir { + path: dir.path.clone(), + state, + }); + } + if !ok { + return Err(Error::Message( + "incorrect data_dir configuration, no primary writable directory specified" + .into(), + )); + } + } + } + Ok(data_dirs) +} + +fn dir_not_empty(path: &PathBuf) -> Result<bool, Error> { + for entry in std::fs::read_dir(&path)? { + let dir = entry?; + if dir.file_type()?.is_dir() + && dir + .file_name() + .into_string() + .ok() + .and_then(|hex| hex::decode(&hex).ok()) + .is_some() + { + return Ok(true); + } + } + Ok(false) +} diff --git a/src/block/lib.rs b/src/block/lib.rs index d2814f77..c9ff2845 100644 --- a/src/block/lib.rs +++ b/src/block/lib.rs @@ -6,5 +6,6 @@ pub mod repair; pub mod resync; mod block; +mod layout; mod metrics; mod rc; diff --git a/src/block/manager.rs b/src/block/manager.rs index 3ece9a8a..2d1b5c67 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -3,7 +3,7 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use arc_swap::ArcSwapOption; +use arc_swap::{ArcSwap, ArcSwapOption}; use async_trait::async_trait; use bytes::Bytes; use rand::prelude::*; @@ -25,10 +25,11 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream}; use garage_db as db; use garage_util::background::{vars, BackgroundRunner}; +use garage_util::config::DataDirEnum; use garage_util::data::*; use garage_util::error::*; use garage_util::metrics::RecordDuration; -use garage_util::persister::PersisterShared; +use garage_util::persister::{Persister, PersisterShared}; use garage_util::time::msec_to_rfc3339; use garage_rpc::rpc_helper::OrderTag; @@ -38,6 +39,7 @@ use garage_rpc::*; use garage_table::replication::{TableReplication, TableShardedReplication}; use crate::block::*; +use crate::layout::*; use crate::metrics::*; use crate::rc::*; use crate::repair::*; @@ -77,12 +79,16 @@ impl Rpc for BlockRpc { pub struct BlockManager { /// Replication strategy, allowing to find on which node blocks should be located pub replication: TableShardedReplication, - /// Directory in which block are stored - pub data_dir: PathBuf, + /// Data layout + pub(crate) data_layout: ArcSwap<DataLayout>, + /// Data layout persister + pub(crate) data_layout_persister: Persister<DataLayout>, + + data_fsync: bool, compression_level: Option<i32>, - mutation_lock: [Mutex<BlockManagerLocked>; 256], + mutation_lock: Vec<Mutex<BlockManagerLocked>>, pub(crate) rc: BlockRc, pub resync: BlockResyncManager, @@ -105,6 +111,9 @@ pub struct BlockResyncErrorInfo { pub next_try: u64, } +// The number of different mutexes used to parallelize write access to data blocks +const MUTEX_COUNT: usize = 256; + // This custom struct contains functions that must only be ran // when the lock is held. We ensure that it is the case by storing // it INSIDE a Mutex. @@ -113,11 +122,29 @@ struct BlockManagerLocked(); impl BlockManager { pub fn new( db: &db::Db, - data_dir: PathBuf, + data_dir: DataDirEnum, + data_fsync: bool, compression_level: Option<i32>, replication: TableShardedReplication, system: Arc<System>, - ) -> Arc<Self> { + ) -> Result<Arc<Self>, Error> { + // Load or compute layout, i.e. assignment of data blocks to the different data directories + let data_layout_persister: Persister<DataLayout> = + Persister::new(&system.metadata_dir, "data_layout"); + let data_layout = match data_layout_persister.load() { + Ok(mut layout) => { + layout + .update(&data_dir) + .ok_or_message("invalid data_dir config")?; + layout + } + Err(_) => DataLayout::initialize(&data_dir).ok_or_message("invalid data_dir config")?, + }; + data_layout_persister + .save(&data_layout) + .expect("cannot save data_layout"); + + // Open metadata tables let rc = db .open_tree("block_local_rc") .expect("Unable to open block_local_rc tree"); @@ -140,9 +167,14 @@ impl BlockManager { let block_manager = Arc::new(Self { replication, - data_dir, + data_layout: ArcSwap::new(Arc::new(data_layout)), + data_layout_persister, + data_fsync, compression_level, - mutation_lock: [(); 256].map(|_| Mutex::new(BlockManagerLocked())), + mutation_lock: vec![(); MUTEX_COUNT] + .iter() + .map(|_| Mutex::new(BlockManagerLocked())) + .collect::<Vec<_>>(), rc, resync, system, @@ -154,7 +186,7 @@ impl BlockManager { block_manager.endpoint.set_handler(block_manager.clone()); block_manager.scrub_persister.set_with(|_| ()).unwrap(); - block_manager + Ok(block_manager) } pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) { @@ -201,44 +233,10 @@ impl BlockManager { hash: &Hash, order_tag: Option<OrderTag>, ) -> Result<(DataBlockHeader, ByteStream), Error> { - let who = self.replication.read_nodes(hash); - let who = self.system.rpc.request_order(&who); - - for node in who.iter() { - let node_id = NodeID::from(*node); - let rpc = self.endpoint.call_streaming( - &node_id, - BlockRpc::GetBlock(*hash, order_tag), - PRIO_NORMAL | PRIO_SECONDARY, - ); - tokio::select! { - res = rpc => { - let res = match res { - Ok(res) => res, - Err(e) => { - debug!("Node {:?} returned error: {}", node, e); - continue; - } - }; - let (header, stream) = match res.into_parts() { - (Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream), - _ => { - debug!("Node {:?} returned a malformed response", node); - continue; - } - }; - return Ok((header, stream)); - } - _ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => { - debug!("Node {:?} didn't return block in time, trying next.", node); - } - }; - } - - Err(Error::Message(format!( - "Unable to read block {:?}: no node returned a valid block", - hash - ))) + self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move { + Ok((header, stream)) + }) + .await } /// Ask nodes that might have a (possibly compressed) block for it @@ -248,6 +246,24 @@ impl BlockManager { hash: &Hash, order_tag: Option<OrderTag>, ) -> Result<DataBlock, Error> { + self.rpc_get_raw_block_internal(hash, order_tag, |header, stream| async move { + read_stream_to_end(stream) + .await + .map(|data| DataBlock::from_parts(header, data)) + }) + .await + } + + async fn rpc_get_raw_block_internal<F, Fut, T>( + &self, + hash: &Hash, + order_tag: Option<OrderTag>, + f: F, + ) -> Result<T, Error> + where + F: Fn(DataBlockHeader, ByteStream) -> Fut, + Fut: futures::Future<Output = Result<T, Error>>, + { let who = self.replication.read_nodes(hash); let who = self.system.rpc.request_order(&who); @@ -263,34 +279,41 @@ impl BlockManager { let res = match res { Ok(res) => res, Err(e) => { - debug!("Node {:?} returned error: {}", node, e); + debug!("Get block {:?}: node {:?} could not be contacted: {}", hash, node, e); continue; } }; let (header, stream) = match res.into_parts() { (Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream), - _ => { - debug!("Node {:?} returned a malformed response", node); + (Ok(_), _) => { + debug!("Get block {:?}: node {:?} returned a malformed response", hash, node); + continue; + } + (Err(e), _) => { + debug!("Get block {:?}: node {:?} returned error: {}", hash, node, e); continue; } }; - match read_stream_to_end(stream).await { - Ok(bytes) => return Ok(DataBlock::from_parts(header, bytes)), + match f(header, stream).await { + Ok(ret) => return Ok(ret), Err(e) => { - debug!("Error reading stream from node {:?}: {}", node, e); + debug!("Get block {:?}: error reading stream from node {:?}: {}", hash, node, e); } } } + // TODO: sleep less long (fail early), initiate a second request earlier + // if the first one doesn't succeed rapidly + // TODO: keep first request running when initiating a new one and take the + // one that finishes earlier _ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => { - debug!("Node {:?} didn't return block in time, trying next.", node); + debug!("Get block {:?}: node {:?} didn't return block in time, trying next.", hash, node); } }; } - Err(Error::Message(format!( - "Unable to read block {:?}: no node returned a valid block", - hash - ))) + let msg = format!("Get block {:?}: no node returned a valid block", hash); + debug!("{}", msg); + Err(Error::Message(msg)) } // ---- Public interface ---- @@ -468,8 +491,6 @@ impl BlockManager { pub(crate) async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<(), Error> { let tracer = opentelemetry::global::tracer("garage"); - let write_size = data.inner_buffer().len() as u64; - self.lock_mutate(hash) .await .write_block(hash, data, self) @@ -479,8 +500,6 @@ impl BlockManager { )) .await?; - self.metrics.bytes_written.add(write_size); - Ok(()) } @@ -507,36 +526,42 @@ impl BlockManager { /// Read block from disk, verifying it's integrity pub(crate) async fn read_block(&self, hash: &Hash) -> Result<DataBlock, Error> { - let data = self - .read_block_internal(hash) - .bound_record_duration(&self.metrics.block_read_duration) - .await?; - - self.metrics - .bytes_read - .add(data.inner_buffer().len() as u64); - - Ok(data) + let tracer = opentelemetry::global::tracer("garage"); + async { + match self.find_block(hash).await { + Some(p) => self.read_block_from(hash, &p).await, + None => { + // Not found but maybe we should have had it ?? + self.resync + .put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?; + return Err(Error::Message(format!( + "block {:?} not found on node", + hash + ))); + } + } + } + .bound_record_duration(&self.metrics.block_read_duration) + .with_context(Context::current_with_span( + tracer.start("BlockManager::read_block"), + )) + .await } - async fn read_block_internal(&self, hash: &Hash) -> Result<DataBlock, Error> { - let mut path = self.block_path(hash); - let compressed = match self.is_block_compressed(hash).await { - Ok(c) => c, - Err(e) => { - // Not found but maybe we should have had it ?? - self.resync - .put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?; - return Err(Into::into(e)); - } + pub(crate) async fn read_block_from( + &self, + hash: &Hash, + block_path: &DataBlockPath, + ) -> Result<DataBlock, Error> { + let (path, compressed) = match block_path { + DataBlockPath::Plain(p) => (p, false), + DataBlockPath::Compressed(p) => (p, true), }; - if compressed { - path.set_extension("zst"); - } - let mut f = fs::File::open(&path).await?; + let mut f = fs::File::open(&path).await?; let mut data = vec![]; f.read_to_end(&mut data).await?; + self.metrics.bytes_read.add(data.len() as u64); drop(f); let data = if compressed { @@ -548,29 +573,27 @@ impl BlockManager { if data.verify(*hash).is_err() { self.metrics.corruption_counter.add(1); + warn!( + "Block {:?} is corrupted. Renaming to .corrupted and resyncing.", + hash + ); self.lock_mutate(hash) .await - .move_block_to_corrupted(hash, self) + .move_block_to_corrupted(block_path) .await?; self.resync.put_to_resync(hash, Duration::from_millis(0))?; + return Err(Error::CorruptData(*hash)); } Ok(data) } - /// Check if this node has a block and whether it needs it - pub(crate) async fn check_block_status(&self, hash: &Hash) -> Result<BlockStatus, Error> { - self.lock_mutate(hash) - .await - .check_block_status(hash, self) - .await - } - /// Check if this node should have a block, but don't actually have it async fn need_block(&self, hash: &Hash) -> Result<bool, Error> { - let BlockStatus { exists, needed } = self.check_block_status(hash).await?; - Ok(needed.is_nonzero() && !exists) + let rc = self.rc.get_block_rc(hash)?; + let exists = self.find_block(hash).await.is_some(); + Ok(rc.is_nonzero() && !exists) } /// Delete block if it is not needed anymore @@ -581,59 +604,65 @@ impl BlockManager { .await } - /// Utility: gives the path of the directory in which a block should be found - fn block_dir(&self, hash: &Hash) -> PathBuf { - let mut path = self.data_dir.clone(); - path.push(hex::encode(&hash.as_slice()[0..1])); - path.push(hex::encode(&hash.as_slice()[1..2])); - path - } - - /// Utility: give the full path where a block should be found, minus extension if block is - /// compressed - fn block_path(&self, hash: &Hash) -> PathBuf { - let mut path = self.block_dir(hash); - path.push(hex::encode(hash.as_ref())); - path - } + /// Find the path where a block is currently stored + pub(crate) async fn find_block(&self, hash: &Hash) -> Option<DataBlockPath> { + let data_layout = self.data_layout.load_full(); + let dirs = Some(data_layout.primary_block_dir(hash)) + .into_iter() + .chain(data_layout.secondary_block_dirs(hash)); + let filename = hex::encode(hash.as_ref()); - /// Utility: check if block is stored compressed. Error if block is not stored - async fn is_block_compressed(&self, hash: &Hash) -> Result<bool, Error> { - let mut path = self.block_path(hash); + for dir in dirs { + let mut path = dir; + path.push(&filename); - // If compression is disabled on node - check for the raw block - // first and then a compressed one (as compression may have been - // previously enabled). - match self.compression_level { - None => { + if self.compression_level.is_none() { + // If compression is disabled on node - check for the raw block + // first and then a compressed one (as compression may have been + // previously enabled). if fs::metadata(&path).await.is_ok() { - return Ok(false); + return Some(DataBlockPath::Plain(path)); } - path.set_extension("zst"); - - fs::metadata(&path).await.map(|_| true).map_err(Into::into) - } - _ => { + if fs::metadata(&path).await.is_ok() { + return Some(DataBlockPath::Compressed(path)); + } + } else { path.set_extension("zst"); - if fs::metadata(&path).await.is_ok() { - return Ok(true); + return Some(DataBlockPath::Compressed(path)); } - path.set_extension(""); - - fs::metadata(&path).await.map(|_| false).map_err(Into::into) + if fs::metadata(&path).await.is_ok() { + return Some(DataBlockPath::Plain(path)); + } } } + + None + } + + /// Rewrite a block at the primary location for its path and delete the old path. + /// Returns the number of bytes read/written + pub(crate) async fn fix_block_location( + &self, + hash: &Hash, + wrong_path: DataBlockPath, + ) -> Result<usize, Error> { + self.lock_mutate(hash) + .await + .fix_block_location(hash, wrong_path, self) + .await } async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> { let tracer = opentelemetry::global::tracer("garage"); - self.mutation_lock[hash.as_slice()[0] as usize] + let ilock = u16::from_be_bytes([hash.as_slice()[0], hash.as_slice()[1]]) as usize + % self.mutation_lock.len(); + self.mutation_lock[ilock] .lock() .with_context(Context::current_with_span( - tracer.start("Acquire mutation_lock"), + tracer.start(format!("Acquire mutation_lock #{}", ilock)), )) .await } @@ -646,7 +675,7 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager { BlockRpc::PutBlock { hash, header } => Resp::new( self.handle_put_block(*hash, *header, message.take_stream()) .await - .map(|_| BlockRpc::Ok), + .map(|()| BlockRpc::Ok), ), BlockRpc::GetBlock(h, order_tag) => self.handle_get_block(h, *order_tag).await, BlockRpc::NeedBlockQuery(h) => { @@ -657,66 +686,78 @@ impl StreamingEndpointHandler<BlockRpc> for BlockManager { } } -pub(crate) struct BlockStatus { - pub(crate) exists: bool, - pub(crate) needed: RcEntry, -} - impl BlockManagerLocked { - async fn check_block_status( + async fn write_block( &self, hash: &Hash, + data: &DataBlock, mgr: &BlockManager, - ) -> Result<BlockStatus, Error> { - let exists = mgr.is_block_compressed(hash).await.is_ok(); - let needed = mgr.rc.get_block_rc(hash)?; - - Ok(BlockStatus { exists, needed }) + ) -> Result<(), Error> { + let existing_path = mgr.find_block(hash).await; + self.write_block_inner(hash, data, mgr, existing_path).await } - async fn write_block( + async fn write_block_inner( &self, hash: &Hash, data: &DataBlock, mgr: &BlockManager, + existing_path: Option<DataBlockPath>, ) -> Result<(), Error> { let compressed = data.is_compressed(); let data = data.inner_buffer(); - let mut path = mgr.block_dir(hash); - let directory = path.clone(); - path.push(hex::encode(hash)); + let directory = mgr.data_layout.load().primary_block_dir(hash); - fs::create_dir_all(&directory).await?; + let mut tgt_path = directory.clone(); + tgt_path.push(hex::encode(hash)); + if compressed { + tgt_path.set_extension("zst"); + } - let to_delete = match (mgr.is_block_compressed(hash).await, compressed) { - (Ok(true), _) => return Ok(()), - (Ok(false), false) => return Ok(()), - (Ok(false), true) => { - let path_to_delete = path.clone(); - path.set_extension("zst"); - Some(path_to_delete) - } - (Err(_), compressed) => { - if compressed { - path.set_extension("zst"); - } - None - } + let to_delete = match (existing_path, compressed) { + // If the block is stored in the wrong directory, + // write it again at the correct path and delete the old path + (Some(DataBlockPath::Plain(p)), false) if p != tgt_path => Some(p), + (Some(DataBlockPath::Compressed(p)), true) if p != tgt_path => Some(p), + + // If the block is already stored not compressed but we have a compressed + // copy, write the compressed copy and delete the uncompressed one + (Some(DataBlockPath::Plain(plain_path)), true) => Some(plain_path), + + // If the block is already stored compressed, + // keep the stored copy, we have nothing to do + (Some(DataBlockPath::Compressed(_)), _) => return Ok(()), + + // If the block is already stored not compressed, + // and we don't have a compressed copy either, + // keep the stored copy, we have nothing to do + (Some(DataBlockPath::Plain(_)), false) => return Ok(()), + + // If the block isn't stored already, just store what is given to us + (None, _) => None, }; + assert!(to_delete.as_ref() != Some(&tgt_path)); - let mut path_tmp = path.clone(); + let mut path_tmp = tgt_path.clone(); let tmp_extension = format!("tmp{}", hex::encode(thread_rng().gen::<[u8; 4]>())); path_tmp.set_extension(tmp_extension); + fs::create_dir_all(&directory).await?; + let mut delete_on_drop = DeleteOnDrop(Some(path_tmp.clone())); let mut f = fs::File::create(&path_tmp).await?; f.write_all(data).await?; - f.sync_all().await?; + mgr.metrics.bytes_written.add(data.len() as u64); + + if mgr.data_fsync { + f.sync_all().await?; + } + drop(f); - fs::rename(path_tmp, path).await?; + fs::rename(path_tmp, tgt_path).await?; delete_on_drop.cancel(); @@ -724,52 +765,67 @@ impl BlockManagerLocked { fs::remove_file(to_delete).await?; } - // We want to ensure that when this function returns, data is properly persisted - // to disk. The first step is the sync_all above that does an fsync on the data file. - // Now, we do an fsync on the containing directory, to ensure that the rename - // is persisted properly. See: - // http://thedjbway.b0llix.net/qmail/syncdir.html - let dir = fs::OpenOptions::new() - .read(true) - .mode(0) - .open(directory) - .await?; - dir.sync_all().await?; - drop(dir); + if mgr.data_fsync { + // We want to ensure that when this function returns, data is properly persisted + // to disk. The first step is the sync_all above that does an fsync on the data file. + // Now, we do an fsync on the containing directory, to ensure that the rename + // is persisted properly. See: + // http://thedjbway.b0llix.net/qmail/syncdir.html + let dir = fs::OpenOptions::new() + .read(true) + .mode(0) + .open(directory) + .await?; + dir.sync_all().await?; + drop(dir); + } Ok(()) } - async fn move_block_to_corrupted(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> { - warn!( - "Block {:?} is corrupted. Renaming to .corrupted and resyncing.", - hash - ); - let mut path = mgr.block_path(hash); - let mut path2 = path.clone(); - if mgr.is_block_compressed(hash).await? { - path.set_extension("zst"); - path2.set_extension("zst.corrupted"); - } else { - path2.set_extension("corrupted"); - } + async fn move_block_to_corrupted(&self, block_path: &DataBlockPath) -> Result<(), Error> { + let (path, path2) = match block_path { + DataBlockPath::Plain(p) => { + let mut p2 = p.clone(); + p2.set_extension("corrupted"); + (p, p2) + } + DataBlockPath::Compressed(p) => { + let mut p2 = p.clone(); + p2.set_extension("zst.corrupted"); + (p, p2) + } + }; + fs::rename(path, path2).await?; Ok(()) } async fn delete_if_unneeded(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> { - let BlockStatus { exists, needed } = self.check_block_status(hash, mgr).await?; - - if exists && needed.is_deletable() { - let mut path = mgr.block_path(hash); - if mgr.is_block_compressed(hash).await? { - path.set_extension("zst"); + let rc = mgr.rc.get_block_rc(hash)?; + if rc.is_deletable() { + while let Some(path) = mgr.find_block(hash).await { + let path = match path { + DataBlockPath::Plain(p) | DataBlockPath::Compressed(p) => p, + }; + fs::remove_file(path).await?; + mgr.metrics.delete_counter.add(1); } - fs::remove_file(path).await?; - mgr.metrics.delete_counter.add(1); } Ok(()) } + + async fn fix_block_location( + &self, + hash: &Hash, + wrong_path: DataBlockPath, + mgr: &BlockManager, + ) -> Result<usize, Error> { + let data = mgr.read_block_from(hash, &wrong_path).await?; + self.write_block_inner(hash, &data, mgr, Some(wrong_path)) + .await?; + Ok(data.inner_buffer().len()) + } } async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> { diff --git a/src/block/rc.rs b/src/block/rc.rs index 94cb5eea..b6afb277 100644 --- a/src/block/rc.rs +++ b/src/block/rc.rs @@ -56,7 +56,7 @@ impl BlockRc { /// deletion time has passed pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> { let now = now_msec(); - self.rc.db().transaction(|mut tx| { + self.rc.db().transaction(|tx| { let rcval = RcEntry::parse_opt(tx.get(&self.rc, hash)?); match rcval { RcEntry::Deletable { at_time } if now > at_time => { @@ -64,7 +64,7 @@ impl BlockRc { } _ => (), }; - tx.commit(()) + Ok(()) })?; Ok(()) } diff --git a/src/block/repair.rs b/src/block/repair.rs index 71093d69..77ee0d14 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -17,6 +17,7 @@ use garage_util::persister::PersisterShared; use garage_util::time::*; use garage_util::tranquilizer::Tranquilizer; +use crate::block::*; use crate::manager::*; // Full scrub every 25 days with a random element of 10 days mixed in below @@ -136,7 +137,7 @@ impl Worker for RepairWorker { // Lists all blocks on disk and adds them to the resync queue. // This allows us to find blocks we are storing but don't actually need, // so that we can offload them if necessary and then delete them locally. - if let Some(hash) = bi.next().await? { + if let Some((_path, hash)) = bi.next().await? { self.manager .resync .put_to_resync(&hash, Duration::from_secs(0))?; @@ -175,7 +176,9 @@ mod v081 { } mod v082 { + use garage_util::data::Hash; use serde::{Deserialize, Serialize}; + use std::path::PathBuf; use super::v081; @@ -185,6 +188,27 @@ mod v082 { pub(crate) time_last_complete_scrub: u64, pub(crate) time_next_run_scrub: u64, pub(crate) corruptions_detected: u64, + #[serde(default)] + pub(crate) checkpoint: Option<BlockStoreIterator>, + } + + #[derive(Serialize, Deserialize, Clone)] + pub struct BlockStoreIterator { + pub todo: Vec<BsiTodo>, + } + + #[derive(Serialize, Deserialize, Clone)] + pub enum BsiTodo { + Directory { + path: PathBuf, + progress_min: u64, + progress_max: u64, + }, + File { + path: PathBuf, + hash: Hash, + progress: u64, + }, } impl garage_util::migrate::Migrate for ScrubWorkerPersisted { @@ -199,6 +223,7 @@ mod v082 { time_last_complete_scrub: old.time_last_complete_scrub, time_next_run_scrub: randomize_next_scrub_run_time(old.time_last_complete_scrub), corruptions_detected: old.corruptions_detected, + checkpoint: None, } } } @@ -235,14 +260,23 @@ impl Default for ScrubWorkerPersisted { time_next_run_scrub: randomize_next_scrub_run_time(now_msec()), tranquility: INITIAL_SCRUB_TRANQUILITY, corruptions_detected: 0, + checkpoint: None, } } } #[derive(Default)] enum ScrubWorkerState { - Running(BlockStoreIterator), - Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub + Running { + iterator: BlockStoreIterator, + // time of the last checkpoint + t_cp: u64, + }, + Paused { + iterator: BlockStoreIterator, + // time at which the scrub should be resumed + t_resume: u64, + }, #[default] Finished, } @@ -261,10 +295,17 @@ impl ScrubWorker { rx_cmd: mpsc::Receiver<ScrubWorkerCommand>, persister: PersisterShared<ScrubWorkerPersisted>, ) -> Self { + let work = match persister.get_with(|x| x.checkpoint.clone()) { + None => ScrubWorkerState::Finished, + Some(iterator) => ScrubWorkerState::Running { + iterator, + t_cp: now_msec(), + }, + }; Self { manager, rx_cmd, - work: ScrubWorkerState::Finished, + work, tranquilizer: Tranquilizer::new(30), persister, } @@ -277,7 +318,16 @@ impl ScrubWorker { ScrubWorkerState::Finished => { info!("Scrub worker initializing, now performing datastore scrub"); let iterator = BlockStoreIterator::new(&self.manager); - ScrubWorkerState::Running(iterator) + if let Err(e) = self + .persister + .set_with(|x| x.checkpoint = Some(iterator.clone())) + { + error!("Could not save scrub checkpoint: {}", e); + } + ScrubWorkerState::Running { + iterator, + t_cp: now_msec(), + } } work => { error!("Cannot start scrub worker: already running!"); @@ -287,8 +337,18 @@ impl ScrubWorker { } ScrubWorkerCommand::Pause(dur) => { self.work = match std::mem::take(&mut self.work) { - ScrubWorkerState::Running(it) | ScrubWorkerState::Paused(it, _) => { - ScrubWorkerState::Paused(it, now_msec() + dur.as_millis() as u64) + ScrubWorkerState::Running { iterator, .. } + | ScrubWorkerState::Paused { iterator, .. } => { + if let Err(e) = self + .persister + .set_with(|x| x.checkpoint = Some(iterator.clone())) + { + error!("Could not save scrub checkpoint: {}", e); + } + ScrubWorkerState::Paused { + iterator, + t_resume: now_msec() + dur.as_millis() as u64, + } } work => { error!("Cannot pause scrub worker: not running!"); @@ -298,7 +358,10 @@ impl ScrubWorker { } ScrubWorkerCommand::Resume => { self.work = match std::mem::take(&mut self.work) { - ScrubWorkerState::Paused(it, _) => ScrubWorkerState::Running(it), + ScrubWorkerState::Paused { iterator, .. } => ScrubWorkerState::Running { + iterator, + t_cp: now_msec(), + }, work => { error!("Cannot resume scrub worker: not paused!"); work @@ -307,7 +370,10 @@ impl ScrubWorker { } ScrubWorkerCommand::Cancel => { self.work = match std::mem::take(&mut self.work) { - ScrubWorkerState::Running(_) | ScrubWorkerState::Paused(_, _) => { + ScrubWorkerState::Running { .. } | ScrubWorkerState::Paused { .. } => { + if let Err(e) = self.persister.set_with(|x| x.checkpoint = None) { + error!("Could not save scrub checkpoint: {}", e); + } ScrubWorkerState::Finished } work => { @@ -343,12 +409,15 @@ impl Worker for ScrubWorker { ..Default::default() }; match &self.work { - ScrubWorkerState::Running(bsi) => { - s.progress = Some(format!("{:.2}%", bsi.progress() * 100.)); + ScrubWorkerState::Running { iterator, .. } => { + s.progress = Some(format!("{:.2}%", iterator.progress() * 100.)); } - ScrubWorkerState::Paused(bsi, rt) => { - s.progress = Some(format!("{:.2}%", bsi.progress() * 100.)); - s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))]; + ScrubWorkerState::Paused { iterator, t_resume } => { + s.progress = Some(format!("{:.2}%", iterator.progress() * 100.)); + s.freeform = vec![format!( + "Scrub paused, resumes at {}", + msec_to_rfc3339(*t_resume) + )]; } ScrubWorkerState::Finished => { s.freeform = vec![ @@ -374,9 +443,11 @@ impl Worker for ScrubWorker { }; match &mut self.work { - ScrubWorkerState::Running(bsi) => { + ScrubWorkerState::Running { iterator, t_cp } => { self.tranquilizer.reset(); - if let Some(hash) = bsi.next().await? { + let now = now_msec(); + + if let Some((_path, hash)) = iterator.next().await? { match self.manager.read_block(&hash).await { Err(Error::CorruptData(_)) => { error!("Found corrupt data block during scrub: {:?}", hash); @@ -385,16 +456,23 @@ impl Worker for ScrubWorker { Err(e) => return Err(e), _ => (), }; + + if now - *t_cp > 60 * 1000 { + self.persister + .set_with(|p| p.checkpoint = Some(iterator.clone()))?; + *t_cp = now; + } + Ok(self .tranquilizer .tranquilize_worker(self.persister.get_with(|p| p.tranquility))) } else { - let now = now_msec(); let next_scrub_timestamp = randomize_next_scrub_run_time(now); self.persister.set_with(|p| { p.time_last_complete_scrub = now; p.time_next_run_scrub = next_scrub_timestamp; + p.checkpoint = None; })?; self.work = ScrubWorkerState::Finished; self.tranquilizer.clear(); @@ -413,8 +491,8 @@ impl Worker for ScrubWorker { async fn wait_for_work(&mut self) -> WorkerState { let (wait_until, command) = match &self.work { - ScrubWorkerState::Running(_) => return WorkerState::Busy, - ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume), + ScrubWorkerState::Running { .. } => return WorkerState::Busy, + ScrubWorkerState::Paused { t_resume, .. } => (*t_resume, ScrubWorkerCommand::Resume), ScrubWorkerState::Finished => ( self.persister.get_with(|p| p.time_next_run_scrub), ScrubWorkerCommand::Start, @@ -437,110 +515,250 @@ impl Worker for ScrubWorker { } match &self.work { - ScrubWorkerState::Running(_) => WorkerState::Busy, + ScrubWorkerState::Running { .. } => WorkerState::Busy, _ => WorkerState::Idle, } } } // ---- ---- ---- -// UTILITY FOR ENUMERATING THE BLOCK STORE +// THIRD KIND OF REPAIR: REBALANCING DATA BLOCKS +// between multiple storage locations. +// This is a one-shot repair operation that can be launched, +// checks everything, and then exits. // ---- ---- ---- -struct BlockStoreIterator { - path: Vec<ReadingDir>, +pub struct RebalanceWorker { + manager: Arc<BlockManager>, + block_iter: BlockStoreIterator, + t_started: u64, + t_finished: Option<u64>, + moved: usize, + moved_bytes: u64, } -enum ReadingDir { - Pending(PathBuf), - Read { - subpaths: Vec<fs::DirEntry>, - pos: usize, - }, +impl RebalanceWorker { + pub fn new(manager: Arc<BlockManager>) -> Self { + let block_iter = BlockStoreIterator::new(&manager); + Self { + manager, + block_iter, + t_started: now_msec(), + t_finished: None, + moved: 0, + moved_bytes: 0, + } + } +} + +#[async_trait] +impl Worker for RebalanceWorker { + fn name(&self) -> String { + "Block rebalance worker".into() + } + + fn status(&self) -> WorkerStatus { + let t_cur = self.t_finished.unwrap_or_else(|| now_msec()); + let rate = self.moved_bytes / std::cmp::max(1, (t_cur - self.t_started) / 1000); + let mut freeform = vec![ + format!("Blocks moved: {}", self.moved), + format!( + "Bytes moved: {} ({}/s)", + bytesize::ByteSize::b(self.moved_bytes), + bytesize::ByteSize::b(rate) + ), + format!("Started: {}", msec_to_rfc3339(self.t_started)), + ]; + if let Some(t_fin) = self.t_finished { + freeform.push(format!("Finished: {}", msec_to_rfc3339(t_fin))) + } + WorkerStatus { + progress: Some(format!("{:.2}%", self.block_iter.progress() * 100.)), + freeform, + ..Default::default() + } + } + + async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { + if let Some((path, hash)) = self.block_iter.next().await? { + let prim_loc = self.manager.data_layout.load().primary_block_dir(&hash); + if path.ancestors().all(|x| x != prim_loc) { + let block_path = match path.extension() { + None => DataBlockPath::Plain(path.clone()), + Some(x) if x.to_str() == Some("zst") => DataBlockPath::Compressed(path.clone()), + _ => { + warn!("not rebalancing file: {}", path.to_string_lossy()); + return Ok(WorkerState::Busy); + } + }; + // block is not in its primary location, + // move it there (reading and re-writing does the trick) + debug!("rebalance: moving block {:?} => {:?}", block_path, prim_loc); + let block_len = self.manager.fix_block_location(&hash, block_path).await?; + self.moved += 1; + self.moved_bytes += block_len as u64; + } + Ok(WorkerState::Busy) + } else { + // all blocks are in their primary location: + // - the ones we moved now are + // - the ones written in the meantime always were, because we only + // write to primary locations + // so we can safely remove all secondary locations from the data layout + let new_layout = self + .manager + .data_layout + .load_full() + .without_secondary_locations(); + self.manager + .data_layout_persister + .save_async(&new_layout) + .await?; + self.manager.data_layout.store(Arc::new(new_layout)); + self.t_finished = Some(now_msec()); + Ok(WorkerState::Done) + } + } + + async fn wait_for_work(&mut self) -> WorkerState { + unreachable!() + } } +// ---- ---- ---- +// UTILITY FOR ENUMERATING THE BLOCK STORE +// ---- ---- ---- + +const PROGRESS_FP: u64 = 1_000_000_000; + impl BlockStoreIterator { fn new(manager: &BlockManager) -> Self { - let root_dir = manager.data_dir.clone(); - Self { - path: vec![ReadingDir::Pending(root_dir)], + let data_layout = manager.data_layout.load_full(); + + let mut dir_cap = vec![0; data_layout.data_dirs.len()]; + for prim in data_layout.part_prim.iter() { + dir_cap[*prim as usize] += 1; + } + for sec_vec in data_layout.part_sec.iter() { + for sec in sec_vec.iter() { + dir_cap[*sec as usize] += 1; + } } + let sum_cap = dir_cap.iter().sum::<usize>() as u64; + + let mut cum_cap = 0; + let mut todo = vec![]; + for (dir, cap) in data_layout.data_dirs.iter().zip(dir_cap.into_iter()) { + let progress_min = (cum_cap * PROGRESS_FP) / sum_cap; + let progress_max = ((cum_cap + cap as u64) * PROGRESS_FP) / sum_cap; + cum_cap += cap as u64; + + todo.push(BsiTodo::Directory { + path: dir.path.clone(), + progress_min, + progress_max, + }); + } + // entries are processed back-to-front (because of .pop()), + // so reverse entries to process them in increasing progress bounds + todo.reverse(); + + let ret = Self { todo }; + debug_assert!(ret.progress_invariant()); + + ret } /// Returns progress done, between 0 and 1 fn progress(&self) -> f32 { - if self.path.is_empty() { - 1.0 - } else { - let mut ret = 0.0; - let mut next_div = 1; - for p in self.path.iter() { - match p { - ReadingDir::Pending(_) => break, - ReadingDir::Read { subpaths, pos } => { - next_div *= subpaths.len(); - ret += ((*pos - 1) as f32) / (next_div as f32); - } - } - } - ret - } + self.todo + .last() + .map(|x| match x { + BsiTodo::Directory { progress_min, .. } => *progress_min, + BsiTodo::File { progress, .. } => *progress, + }) + .map(|x| x as f32 / PROGRESS_FP as f32) + .unwrap_or(1.0) } - async fn next(&mut self) -> Result<Option<Hash>, Error> { + async fn next(&mut self) -> Result<Option<(PathBuf, Hash)>, Error> { loop { - let last_path = match self.path.last_mut() { + match self.todo.pop() { None => return Ok(None), - Some(lp) => lp, - }; - - if let ReadingDir::Pending(path) = last_path { - let mut reader = fs::read_dir(&path).await?; - let mut subpaths = vec![]; - while let Some(ent) = reader.next_entry().await? { - subpaths.push(ent); - } - *last_path = ReadingDir::Read { subpaths, pos: 0 }; - } + Some(BsiTodo::Directory { + path, + progress_min, + progress_max, + }) => { + let istart = self.todo.len(); + + let mut reader = fs::read_dir(&path).await?; + while let Some(ent) = reader.next_entry().await? { + let name = if let Ok(n) = ent.file_name().into_string() { + n + } else { + continue; + }; + let ft = ent.file_type().await?; + if ft.is_dir() && hex::decode(&name).is_ok() { + self.todo.push(BsiTodo::Directory { + path: ent.path(), + progress_min: 0, + progress_max: 0, + }); + } else if ft.is_file() { + let filename = name.split_once('.').map(|(f, _)| f).unwrap_or(&name); + if filename.len() == 64 { + if let Ok(h) = hex::decode(filename) { + let mut hash = [0u8; 32]; + hash.copy_from_slice(&h); + self.todo.push(BsiTodo::File { + path: ent.path(), + hash: hash.into(), + progress: 0, + }); + } + } + } + } - let (subpaths, pos) = match *last_path { - ReadingDir::Read { - ref subpaths, - ref mut pos, - } => (subpaths, pos), - ReadingDir::Pending(_) => unreachable!(), - }; - - let data_dir_ent = match subpaths.get(*pos) { - None => { - self.path.pop(); - continue; - } - Some(ent) => { - *pos += 1; - ent + let count = self.todo.len() - istart; + for (i, ent) in self.todo[istart..].iter_mut().enumerate() { + let p1 = progress_min + + ((progress_max - progress_min) * i as u64) / count as u64; + let p2 = progress_min + + ((progress_max - progress_min) * (i + 1) as u64) / count as u64; + match ent { + BsiTodo::Directory { + progress_min, + progress_max, + .. + } => { + *progress_min = p1; + *progress_max = p2; + } + BsiTodo::File { progress, .. } => { + *progress = p1; + } + } + } + self.todo[istart..].reverse(); + debug_assert!(self.progress_invariant()); } - }; - - let name = data_dir_ent.file_name(); - let name = if let Ok(n) = name.into_string() { - n - } else { - continue; - }; - let ent_type = data_dir_ent.file_type().await?; - - let name = name.strip_suffix(".zst").unwrap_or(&name); - if name.len() == 2 && hex::decode(name).is_ok() && ent_type.is_dir() { - let path = data_dir_ent.path(); - self.path.push(ReadingDir::Pending(path)); - } else if name.len() == 64 { - if let Ok(h) = hex::decode(name) { - let mut hash = [0u8; 32]; - hash.copy_from_slice(&h); - return Ok(Some(hash.into())); + Some(BsiTodo::File { path, hash, .. }) => { + return Ok(Some((path, hash))); } } } } + + // for debug_assert! + fn progress_invariant(&self) -> bool { + let iter = self.todo.iter().map(|x| match x { + BsiTodo::Directory { progress_min, .. } => progress_min, + BsiTodo::File { progress, .. } => progress, + }); + let iter_1 = iter.clone().skip(1); + iter.zip(iter_1).all(|(prev, next)| prev >= next) + } } diff --git a/src/block/resync.rs b/src/block/resync.rs index ea280ad4..9c1da4a7 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -41,7 +41,7 @@ pub(crate) const RESYNC_RETRY_DELAY: Duration = Duration::from_secs(60); pub(crate) const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6; // No more than 4 resync workers can be running in the system -pub(crate) const MAX_RESYNC_WORKERS: usize = 4; +pub(crate) const MAX_RESYNC_WORKERS: usize = 8; // Resync tranquility is initially set to 2, but can be changed in the CLI // and the updated version is persisted over Garage restarts const INITIAL_RESYNC_TRANQUILITY: u32 = 2; @@ -359,20 +359,23 @@ impl BlockResyncManager { } async fn resync_block(&self, manager: &BlockManager, hash: &Hash) -> Result<(), Error> { - let BlockStatus { exists, needed } = manager.check_block_status(hash).await?; + let existing_path = manager.find_block(hash).await; + let exists = existing_path.is_some(); + let rc = manager.rc.get_block_rc(hash)?; - if exists != needed.is_needed() || exists != needed.is_nonzero() { + if exists != rc.is_needed() || exists != rc.is_nonzero() { debug!( "Resync block {:?}: exists {}, nonzero rc {}, deletable {}", hash, exists, - needed.is_nonzero(), - needed.is_deletable(), + rc.is_nonzero(), + rc.is_deletable(), ); } - if exists && needed.is_deletable() { + if exists && rc.is_deletable() { info!("Resync block {:?}: offloading and deleting", hash); + let existing_path = existing_path.unwrap(); let mut who = manager.replication.write_nodes(hash); if who.len() < manager.replication.write_quorum() { @@ -419,7 +422,7 @@ impl BlockResyncManager { .add(1, &[KeyValue::new("to", format!("{:?}", node))]); } - let block = manager.read_block(hash).await?; + let block = manager.read_block_from(hash, &existing_path).await?; let (header, bytes) = block.into_parts(); let put_block_message = Req::new(BlockRpc::PutBlock { hash: *hash, @@ -451,7 +454,7 @@ impl BlockResyncManager { manager.rc.clear_deleted_block_rc(hash)?; } - if needed.is_nonzero() && !exists { + if rc.is_nonzero() && !exists { info!( "Resync block {:?}: fetching absent but needed block (refcount > 0)", hash diff --git a/src/db/Cargo.toml b/src/db/Cargo.toml index 67a4fd5e..67af4a7c 100644 --- a/src/db/Cargo.toml +++ b/src/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_db" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -11,29 +11,24 @@ readme = "../../README.md" [lib] path = "lib.rs" -[[bin]] -name = "convert" -path = "bin/convert.rs" -required-features = ["cli"] - [dependencies] err-derive = "0.3" hexdump = "0.1" tracing = "0.1" heed = { version = "0.11", default-features = false, features = ["lmdb"], optional = true } -rusqlite = { version = "0.28", optional = true } +rusqlite = { version = "0.29", optional = true } sled = { version = "0.34", optional = true } # cli deps clap = { version = "4.1", optional = true, features = ["derive", "env"] } -pretty_env_logger = { version = "0.4", optional = true } +pretty_env_logger = { version = "0.5", optional = true } [dev-dependencies] mktemp = "0.5" [features] -default = [ "sled" ] +default = [ "sled", "lmdb", "sqlite" ] bundled-libs = [ "rusqlite?/bundled" ] cli = ["clap", "pretty_env_logger"] lmdb = [ "heed" ] diff --git a/src/db/bin/convert.rs b/src/db/bin/convert.rs deleted file mode 100644 index bbde2048..00000000 --- a/src/db/bin/convert.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::path::PathBuf; - -use garage_db::*; - -use clap::Parser; - -/// K2V command line interface -#[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] -struct Args { - /// Input DB path - #[clap(short = 'i')] - input_path: PathBuf, - /// Input DB engine - #[clap(short = 'a')] - input_engine: String, - - /// Output DB path - #[clap(short = 'o')] - output_path: PathBuf, - /// Output DB engine - #[clap(short = 'b')] - output_engine: String, -} - -fn main() { - let args = Args::parse(); - pretty_env_logger::init(); - - match do_conversion(args) { - Ok(()) => println!("Success!"), - Err(e) => eprintln!("Error: {}", e), - } -} - -fn do_conversion(args: Args) -> Result<()> { - let input = open_db(args.input_path, args.input_engine)?; - let output = open_db(args.output_path, args.output_engine)?; - output.import(&input)?; - Ok(()) -} - -fn open_db(path: PathBuf, engine: String) -> Result<Db> { - match engine.as_str() { - "sled" => { - let db = sled_adapter::sled::Config::default().path(&path).open()?; - Ok(sled_adapter::SledDb::init(db)) - } - "sqlite" | "sqlite3" | "rusqlite" => { - let db = sqlite_adapter::rusqlite::Connection::open(&path)?; - Ok(sqlite_adapter::SqliteDb::init(db)) - } - "lmdb" | "heed" => { - std::fs::create_dir_all(&path).map_err(|e| { - Error(format!("Unable to create LMDB data directory: {}", e).into()) - })?; - - let map_size = lmdb_adapter::recommended_map_size(); - - let db = lmdb_adapter::heed::EnvOpenOptions::new() - .max_dbs(100) - .map_size(map_size) - .open(&path) - .unwrap(); - Ok(lmdb_adapter::LmdbDb::init(db)) - } - e => Err(Error(format!("Invalid DB engine: {}", e).into())), - } -} diff --git a/src/db/counted_tree_hack.rs b/src/db/counted_tree_hack.rs index bbe943a2..a4ce12e0 100644 --- a/src/db/counted_tree_hack.rs +++ b/src/db/counted_tree_hack.rs @@ -85,7 +85,7 @@ impl CountedTree { let old_some = expected_old.is_some(); let new_some = new.is_some(); - let tx_res = self.0.tree.db().transaction(|mut tx| { + let tx_res = self.0.tree.db().transaction(|tx| { let old_val = tx.get(&self.0.tree, &key)?; let is_same = match (&old_val, &expected_old) { (None, None) => true, @@ -101,9 +101,9 @@ impl CountedTree { tx.remove(&self.0.tree, &key)?; } } - tx.commit(()) + Ok(()) } else { - tx.abort(()) + Err(TxError::Abort(())) } }); diff --git a/src/db/lib.rs b/src/db/lib.rs index 11cae4e3..fe44b01e 100644 --- a/src/db/lib.rs +++ b/src/db/lib.rs @@ -2,9 +2,6 @@ #[cfg(feature = "sqlite")] extern crate tracing; -#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))] -compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite."); - #[cfg(feature = "lmdb")] pub mod lmdb_adapter; #[cfg(feature = "sled")] @@ -25,10 +22,15 @@ use std::sync::Arc; use err_derive::Error; +pub(crate) type OnCommit = Vec<Box<dyn FnOnce()>>; + #[derive(Clone)] pub struct Db(pub(crate) Arc<dyn IDb>); -pub struct Transaction<'a>(&'a mut dyn ITx); +pub struct Transaction<'a> { + tx: &'a mut dyn ITx, + on_commit: OnCommit, +} #[derive(Clone)] pub struct Tree(Arc<dyn IDb>, usize); @@ -88,7 +90,7 @@ impl Db { pub fn transaction<R, E, F>(&self, fun: F) -> TxResult<R, E> where - F: Fn(Transaction<'_>) -> TxResult<R, E>, + F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, { let f = TxFn { function: fun, @@ -101,14 +103,17 @@ impl Db { .expect("Transaction did not store result"); match tx_res { - Ok(()) => { - assert!(matches!(ret, Ok(_))); - ret - } - Err(TxError::Abort(())) => { - assert!(matches!(ret, Err(TxError::Abort(_)))); - ret - } + Ok(on_commit) => match ret { + Ok(value) => { + on_commit.into_iter().for_each(|f| f()); + Ok(value) + } + _ => unreachable!(), + }, + Err(TxError::Abort(())) => match ret { + Err(TxError::Abort(e)) => Err(TxError::Abort(e)), + _ => unreachable!(), + }, Err(TxError::Db(e2)) => match ret { // Ok was stored -> the error occured when finalizing // transaction @@ -142,7 +147,7 @@ impl Db { let ex_tree = other.open_tree(&name)?; - let tx_res = self.transaction(|mut tx| { + let tx_res = self.transaction(|tx| { let mut i = 0; for item in ex_tree.iter().map_err(TxError::Abort)? { let (k, v) = item.map_err(TxError::Abort)?; @@ -152,7 +157,7 @@ impl Db { println!("{}: imported {}", name, i); } } - tx.commit(i) + Ok(i) }); let total = match tx_res { Err(TxError::Db(e)) => return Err(e), @@ -252,11 +257,11 @@ impl Tree { impl<'a> Transaction<'a> { #[inline] pub fn get<T: AsRef<[u8]>>(&self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> { - self.0.get(tree.1, key.as_ref()) + self.tx.get(tree.1, key.as_ref()) } #[inline] pub fn len(&self, tree: &Tree) -> TxOpResult<usize> { - self.0.len(tree.1) + self.tx.len(tree.1) } /// Returns the old value if there was one @@ -267,21 +272,21 @@ impl<'a> Transaction<'a> { key: T, value: U, ) -> TxOpResult<Option<Value>> { - self.0.insert(tree.1, key.as_ref(), value.as_ref()) + self.tx.insert(tree.1, key.as_ref(), value.as_ref()) } /// Returns the old value if there was one #[inline] pub fn remove<T: AsRef<[u8]>>(&mut self, tree: &Tree, key: T) -> TxOpResult<Option<Value>> { - self.0.remove(tree.1, key.as_ref()) + self.tx.remove(tree.1, key.as_ref()) } #[inline] pub fn iter(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> { - self.0.iter(tree.1) + self.tx.iter(tree.1) } #[inline] pub fn iter_rev(&self, tree: &Tree) -> TxOpResult<TxValueIter<'_>> { - self.0.iter_rev(tree.1) + self.tx.iter_rev(tree.1) } #[inline] @@ -292,7 +297,7 @@ impl<'a> Transaction<'a> { { let sb = range.start_bound(); let eb = range.end_bound(); - self.0.range(tree.1, get_bound(sb), get_bound(eb)) + self.tx.range(tree.1, get_bound(sb), get_bound(eb)) } #[inline] pub fn range_rev<K, R>(&self, tree: &Tree, range: R) -> TxOpResult<TxValueIter<'_>> @@ -302,19 +307,12 @@ impl<'a> Transaction<'a> { { let sb = range.start_bound(); let eb = range.end_bound(); - self.0.range_rev(tree.1, get_bound(sb), get_bound(eb)) + self.tx.range_rev(tree.1, get_bound(sb), get_bound(eb)) } - // ---- - #[inline] - pub fn abort<R, E>(self, e: E) -> TxResult<R, E> { - Err(TxError::Abort(e)) - } - - #[inline] - pub fn commit<R, E>(self, r: R) -> TxResult<R, E> { - Ok(r) + pub fn on_commit<F: FnOnce() + 'static>(&mut self, f: F) { + self.on_commit.push(Box::new(f)); } } @@ -351,7 +349,7 @@ pub(crate) trait IDb: Send + Sync { high: Bound<&'r [u8]>, ) -> Result<ValueIter<'_>>; - fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()>; + fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()>; } pub(crate) trait ITx { @@ -383,14 +381,14 @@ pub(crate) trait ITxFn { } pub(crate) enum TxFnResult { - Ok, + Ok(OnCommit), Abort, DbErr, } struct TxFn<F, R, E> where - F: Fn(Transaction<'_>) -> TxResult<R, E>, + F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, { function: F, result: Cell<Option<TxResult<R, E>>>, @@ -398,12 +396,16 @@ where impl<F, R, E> ITxFn for TxFn<F, R, E> where - F: Fn(Transaction<'_>) -> TxResult<R, E>, + F: Fn(&mut Transaction<'_>) -> TxResult<R, E>, { fn try_on(&self, tx: &mut dyn ITx) -> TxFnResult { - let res = (self.function)(Transaction(tx)); + let mut tx = Transaction { + tx, + on_commit: vec![], + }; + let res = (self.function)(&mut tx); let res2 = match &res { - Ok(_) => TxFnResult::Ok, + Ok(_) => TxFnResult::Ok(tx.on_commit), Err(TxError::Abort(_)) => TxFnResult::Abort, Err(TxError::Db(_)) => TxFnResult::DbErr, }; diff --git a/src/db/lmdb_adapter.rs b/src/db/lmdb_adapter.rs index ecbc3b81..59fa132d 100644 --- a/src/db/lmdb_adapter.rs +++ b/src/db/lmdb_adapter.rs @@ -9,8 +9,8 @@ use heed::types::ByteSlice; use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database}; use crate::{ - Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult, - TxValueIter, Value, ValueIter, + Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, + TxResult, TxValueIter, Value, ValueIter, }; pub use heed; @@ -186,7 +186,7 @@ impl IDb for LmdbDb { // ---- - fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> { + fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { let trees = self.trees.read().unwrap(); let mut tx = LmdbTx { trees: &trees.0[..], @@ -199,9 +199,9 @@ impl IDb for LmdbDb { let res = f.try_on(&mut tx); match res { - TxFnResult::Ok => { + TxFnResult::Ok(on_commit) => { tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?; - Ok(()) + Ok(on_commit) } TxFnResult::Abort => { tx.tx.abort().map_err(Error::from).map_err(TxError::Db)?; diff --git a/src/db/sled_adapter.rs b/src/db/sled_adapter.rs index cf61867d..84f2001b 100644 --- a/src/db/sled_adapter.rs +++ b/src/db/sled_adapter.rs @@ -10,8 +10,8 @@ use sled::transaction::{ }; use crate::{ - Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult, - TxValueIter, Value, ValueIter, + Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, + TxResult, TxValueIter, Value, ValueIter, }; pub use sled; @@ -38,7 +38,15 @@ pub struct SledDb { } impl SledDb { + #[deprecated( + since = "0.9.0", + note = "The Sled database is now deprecated and will be removed in Garage v1.0. Please migrate to LMDB or Sqlite as soon as possible." + )] pub fn init(db: sled::Db) -> Db { + tracing::warn!("-------------------- IMPORTANT WARNING !!! ----------------------"); + tracing::warn!("The Sled database is now deprecated and will be removed in Garage v1.0."); + tracing::warn!("Please migrate to LMDB or Sqlite as soon as possible."); + tracing::warn!("-----------------------------------------------------------------------"); let s = Self { db, trees: RwLock::new((Vec::new(), HashMap::new())), @@ -158,7 +166,7 @@ impl IDb for SledDb { // ---- - fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> { + fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { let trees = self.trees.read().unwrap(); let res = trees.0.transaction(|txtrees| { let mut tx = SledTx { @@ -166,9 +174,9 @@ impl IDb for SledDb { err: Cell::new(None), }; match f.try_on(&mut tx) { - TxFnResult::Ok => { + TxFnResult::Ok(on_commit) => { assert!(tx.err.into_inner().is_none()); - Ok(()) + Ok(on_commit) } TxFnResult::Abort => { assert!(tx.err.into_inner().is_none()); @@ -181,7 +189,7 @@ impl IDb for SledDb { } }); match res { - Ok(()) => Ok(()), + Ok(on_commit) => Ok(on_commit), Err(TransactionError::Abort(())) => Err(TxError::Abort(())), Err(TransactionError::Storage(s)) => Err(TxError::Db(s.into())), } diff --git a/src/db/sqlite_adapter.rs b/src/db/sqlite_adapter.rs index 63b4506e..9f967c66 100644 --- a/src/db/sqlite_adapter.rs +++ b/src/db/sqlite_adapter.rs @@ -9,8 +9,8 @@ use std::sync::{Arc, Mutex, MutexGuard}; use rusqlite::{params, Connection, Rows, Statement, Transaction}; use crate::{ - Db, Error, IDb, ITx, ITxFn, Result, TxError, TxFnResult, TxOpError, TxOpResult, TxResult, - TxValueIter, Value, ValueIter, + Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult, + TxResult, TxValueIter, Value, ValueIter, }; pub use rusqlite; @@ -261,7 +261,7 @@ impl IDb for SqliteDb { // ---- - fn transaction(&self, f: &dyn ITxFn) -> TxResult<(), ()> { + fn transaction(&self, f: &dyn ITxFn) -> TxResult<OnCommit, ()> { trace!("transaction: lock db"); let mut this = self.0.lock().unwrap(); trace!("transaction: lock acquired"); @@ -277,9 +277,9 @@ impl IDb for SqliteDb { trees: &this_mut_ref.trees, }; let res = match f.try_on(&mut tx) { - TxFnResult::Ok => { + TxFnResult::Ok(on_commit) => { tx.tx.commit().map_err(Error::from).map_err(TxError::Db)?; - Ok(()) + Ok(on_commit) } TxFnResult::Abort => { tx.tx.rollback().map_err(Error::from).map_err(TxError::Db)?; diff --git a/src/db/test.rs b/src/db/test.rs index 40e6c41e..cd99eafa 100644 --- a/src/db/test.rs +++ b/src/db/test.rs @@ -13,26 +13,26 @@ fn test_suite(db: Db) { assert!(tree.insert(ka, va).unwrap().is_none()); assert_eq!(tree.get(ka).unwrap().unwrap(), va); - let res = db.transaction::<_, (), _>(|mut tx| { + let res = db.transaction::<_, (), _>(|tx| { assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), va); assert_eq!(tx.insert(&tree, ka, vb).unwrap().unwrap(), va); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); - tx.commit(12) + Ok(12) }); assert!(matches!(res, Ok(12))); assert_eq!(tree.get(ka).unwrap().unwrap(), vb); - let res = db.transaction::<(), _, _>(|mut tx| { + let res = db.transaction::<(), _, _>(|tx| { assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vb); assert_eq!(tx.insert(&tree, ka, vc).unwrap().unwrap(), vb); assert_eq!(tx.get(&tree, ka).unwrap().unwrap(), vc); - tx.abort(42) + Err(TxError::Abort(42)) }); assert!(matches!(res, Err(TxError::Abort(42)))); assert_eq!(tree.get(ka).unwrap().unwrap(), vb); diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index a41faaad..7c3a79cb 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -33,7 +33,7 @@ garage_web.workspace = true backtrace = "0.3" bytes = "1.0" -bytesize = "1.1" +bytesize = "1.2" timeago = { version = "0.4", default-features = false } parse_duration = "2.1" hex = "0.4" @@ -53,7 +53,7 @@ futures = "0.3" futures-util = "0.3" tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } -netapp = "0.5" +netapp = "0.10" opentelemetry = { version = "0.17", features = [ "rt-tokio" ] } opentelemetry-prometheus = { version = "0.10", optional = true } @@ -78,7 +78,7 @@ k2v-client.workspace = true [features] -default = [ "bundled-libs", "metrics", "sled", "k2v" ] +default = [ "bundled-libs", "metrics", "sled", "lmdb", "sqlite", "k2v" ] k2v = [ "garage_util/k2v", "garage_api/k2v" ] diff --git a/src/garage/admin/block.rs b/src/garage/admin/block.rs index e9e3ff96..c4a45738 100644 --- a/src/garage/admin/block.rs +++ b/src/garage/admin/block.rs @@ -34,6 +34,7 @@ impl AdminRpcHandler { .get_range(&hash, None, None, 10000, Default::default()) .await?; let mut versions = vec![]; + let mut uploads = vec![]; for br in block_refs { if let Some(v) = self .garage @@ -41,6 +42,11 @@ impl AdminRpcHandler { .get(&br.version, &EmptyKey) .await? { + if let VersionBacklink::MultipartUpload { upload_id } = &v.backlink { + if let Some(u) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? { + uploads.push(u); + } + } versions.push(Ok(v)); } else { versions.push(Err(br.version)); @@ -50,6 +56,7 @@ impl AdminRpcHandler { hash, refcount, versions, + uploads, }) } @@ -93,6 +100,7 @@ impl AdminRpcHandler { } let mut obj_dels = 0; + let mut mpu_dels = 0; let mut ver_dels = 0; for hash in blocks { @@ -105,56 +113,80 @@ impl AdminRpcHandler { .await?; for br in block_refs { - let version = match self + if let Some(version) = self .garage .version_table .get(&br.version, &EmptyKey) .await? { - Some(v) => v, - None => continue, - }; + self.handle_block_purge_version_backlink( + &version, + &mut obj_dels, + &mut mpu_dels, + ) + .await?; - if let Some(object) = self - .garage - .object_table - .get(&version.bucket_id, &version.key) - .await? - { - let ov = object.versions().iter().rev().find(|v| v.is_complete()); - if let Some(ov) = ov { - if ov.uuid == br.version { - let del_uuid = gen_uuid(); - let deleted_object = Object::new( - version.bucket_id, - version.key.clone(), - vec![ObjectVersion { - uuid: del_uuid, - timestamp: ov.timestamp + 1, - state: ObjectVersionState::Complete( - ObjectVersionData::DeleteMarker, - ), - }], - ); - self.garage.object_table.insert(&deleted_object).await?; - obj_dels += 1; - } + if !version.deleted.get() { + let deleted_version = Version::new(version.uuid, version.backlink, true); + self.garage.version_table.insert(&deleted_version).await?; + ver_dels += 1; } } - - if !version.deleted.get() { - let deleted_version = - Version::new(version.uuid, version.bucket_id, version.key.clone(), true); - self.garage.version_table.insert(&deleted_version).await?; - ver_dels += 1; - } } } + Ok(AdminRpc::Ok(format!( - "{} blocks were purged: {} object deletion markers added, {} versions marked deleted", + "Purged {} blocks, {} versions, {} objects, {} multipart uploads", blocks.len(), + ver_dels, obj_dels, - ver_dels + mpu_dels, ))) } + + async fn handle_block_purge_version_backlink( + &self, + version: &Version, + obj_dels: &mut usize, + mpu_dels: &mut usize, + ) -> Result<(), Error> { + let (bucket_id, key, ov_id) = match &version.backlink { + VersionBacklink::Object { bucket_id, key } => (*bucket_id, key.clone(), version.uuid), + VersionBacklink::MultipartUpload { upload_id } => { + if let Some(mut mpu) = self.garage.mpu_table.get(upload_id, &EmptyKey).await? { + if !mpu.deleted.get() { + mpu.parts.clear(); + mpu.deleted.set(); + self.garage.mpu_table.insert(&mpu).await?; + *mpu_dels += 1; + } + (mpu.bucket_id, mpu.key.clone(), *upload_id) + } else { + return Ok(()); + } + } + }; + + if let Some(object) = self.garage.object_table.get(&bucket_id, &key).await? { + let ov = object.versions().iter().rev().find(|v| v.is_complete()); + if let Some(ov) = ov { + if ov.uuid == ov_id { + let del_uuid = gen_uuid(); + let deleted_object = Object::new( + bucket_id, + key, + vec![ObjectVersion { + uuid: del_uuid, + timestamp: ov.timestamp + 1, + state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), + }], + ); + self.garage.object_table.insert(&deleted_object).await?; + *obj_dels += 1; + } + } + } + + Ok(()) + } } diff --git a/src/garage/admin/bucket.rs b/src/garage/admin/bucket.rs index 11bb8730..0781cb8b 100644 --- a/src/garage/admin/bucket.rs +++ b/src/garage/admin/bucket.rs @@ -73,6 +73,15 @@ impl AdminRpcHandler { .map(|x| x.filtered_values(&self.garage.system.ring.borrow())) .unwrap_or_default(); + let mpu_counters = self + .garage + .mpu_counter_table + .table + .get(&bucket_id, &EmptyKey) + .await? + .map(|x| x.filtered_values(&self.garage.system.ring.borrow())) + .unwrap_or_default(); + let mut relevant_keys = HashMap::new(); for (k, _) in bucket .state @@ -112,6 +121,7 @@ impl AdminRpcHandler { bucket, relevant_keys, counters, + mpu_counters, }) } diff --git a/src/garage/admin/key.rs b/src/garage/admin/key.rs index cab13bcf..1c92670c 100644 --- a/src/garage/admin/key.rs +++ b/src/garage/admin/key.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use garage_table::*; -use garage_model::helper::error::Error; +use garage_model::helper::error::*; use garage_model::key_table::*; use crate::cli::*; @@ -14,7 +14,7 @@ impl AdminRpcHandler { match cmd { KeyOperation::List => self.handle_list_keys().await, KeyOperation::Info(query) => self.handle_key_info(query).await, - KeyOperation::New(query) => self.handle_create_key(query).await, + KeyOperation::Create(query) => self.handle_create_key(query).await, KeyOperation::Rename(query) => self.handle_rename_key(query).await, KeyOperation::Delete(query) => self.handle_delete_key(query).await, KeyOperation::Allow(query) => self.handle_allow_key(query).await, @@ -41,12 +41,17 @@ impl AdminRpcHandler { Ok(AdminRpc::KeyList(key_ids)) } - async fn handle_key_info(&self, query: &KeyOpt) -> Result<AdminRpc, Error> { - let key = self + async fn handle_key_info(&self, query: &KeyInfoOpt) -> Result<AdminRpc, Error> { + let mut key = self .garage .key_helper() .get_existing_matching_key(&query.key_pattern) .await?; + + if !query.show_secret { + key.state.as_option_mut().unwrap().secret_key = "(redacted)".into(); + } + self.key_info_result(key).await } @@ -118,11 +123,17 @@ impl AdminRpcHandler { } async fn handle_import_key(&self, query: &KeyImportOpt) -> Result<AdminRpc, Error> { + if !query.yes { + return Err(Error::BadRequest("This command is intended to re-import keys that were previously generated by Garage. If you want to create a new key, use `garage key new` instead. Add the --yes flag if you really want to re-import a key.".to_string())); + } + let prev_key = self.garage.key_table.get(&EmptyKey, &query.key_id).await?; if prev_key.is_some() { return Err(Error::BadRequest(format!("Key {} already exists in data store. Even if it is deleted, we can't let you create a new key with the same ID. Sorry.", query.key_id))); } - let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name); + + let imported_key = Key::import(&query.key_id, &query.secret_key, &query.name) + .ok_or_bad_request("Invalid key format")?; self.garage.key_table.insert(&imported_key).await?; self.key_info_result(imported_key).await diff --git a/src/garage/admin/mod.rs b/src/garage/admin/mod.rs index 2709f08a..b6f9c426 100644 --- a/src/garage/admin/mod.rs +++ b/src/garage/admin/mod.rs @@ -28,6 +28,7 @@ use garage_model::garage::Garage; use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::key_table::*; use garage_model::migrate::Migrate; +use garage_model::s3::mpu_table::MultipartUpload; use garage_model::s3::version_table::Version; use crate::cli::*; @@ -53,6 +54,7 @@ pub enum AdminRpc { bucket: Bucket, relevant_keys: HashMap<String, Key>, counters: HashMap<String, i64>, + mpu_counters: HashMap<String, i64>, }, KeyList(Vec<(String, String)>), KeyInfo(Key, HashMap<Uuid, Bucket>), @@ -67,6 +69,7 @@ pub enum AdminRpc { hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>, + uploads: Vec<MultipartUpload>, }, } @@ -274,7 +277,7 @@ impl AdminRpcHandler { // Gather storage node and free space statistics let layout = &self.garage.system.ring.borrow().layout; let mut node_partition_count = HashMap::<Uuid, u64>::new(); - for short_id in layout.ring_assignation_data.iter() { + for short_id in layout.ring_assignment_data.iter() { let id = layout.node_id_vec[*short_id as usize]; *node_partition_count.entry(id).or_default() += 1; } diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index cb7a898c..48359614 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -85,7 +85,7 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) -> )); } _ => { - let new_role = match layout.staging.get(&adv.id) { + let new_role = match layout.staging_roles.get(&adv.id) { Some(NodeRoleV(Some(_))) => "(pending)", _ => "NO ROLE ASSIGNED", }; @@ -190,8 +190,9 @@ pub async fn cmd_admin( bucket, relevant_keys, counters, + mpu_counters, } => { - print_bucket_info(&bucket, &relevant_keys, &counters); + print_bucket_info(&bucket, &relevant_keys, &counters, &mpu_counters); } AdminRpc::KeyList(kl) => { print_key_list(kl); @@ -215,8 +216,9 @@ pub async fn cmd_admin( hash, refcount, versions, + uploads, } => { - print_block_info(hash, refcount, versions); + print_block_info(hash, refcount, versions, uploads); } r => { error!("Unexpected response: {:?}", r); diff --git a/src/garage/cli/convert_db.rs b/src/garage/cli/convert_db.rs new file mode 100644 index 00000000..3c6ce69c --- /dev/null +++ b/src/garage/cli/convert_db.rs @@ -0,0 +1,69 @@ +use std::path::PathBuf; + +use structopt::StructOpt; + +use garage_db::*; + +/// K2V command line interface +#[derive(StructOpt, Debug)] +pub struct ConvertDbOpt { + /// Input database path (not the same as metadata_dir, see + /// https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0) + #[structopt(short = "i")] + input_path: PathBuf, + /// Input database engine (sled, lmdb or sqlite; limited by db engines + /// enabled in this build) + #[structopt(short = "a")] + input_engine: String, + + /// Output database path + #[structopt(short = "o")] + output_path: PathBuf, + /// Output database engine + #[structopt(short = "b")] + output_engine: String, +} + +pub(crate) fn do_conversion(args: ConvertDbOpt) -> Result<()> { + let input = open_db(args.input_path, args.input_engine)?; + let output = open_db(args.output_path, args.output_engine)?; + output.import(&input)?; + Ok(()) +} + +fn open_db(path: PathBuf, engine: String) -> Result<Db> { + match engine.as_str() { + #[cfg(feature = "sled")] + "sled" => { + let db = sled_adapter::sled::Config::default().path(&path).open()?; + Ok(sled_adapter::SledDb::init(db)) + } + #[cfg(feature = "sqlite")] + "sqlite" | "sqlite3" | "rusqlite" => { + let db = sqlite_adapter::rusqlite::Connection::open(&path)?; + db.pragma_update(None, "journal_mode", &"WAL")?; + db.pragma_update(None, "synchronous", &"NORMAL")?; + Ok(sqlite_adapter::SqliteDb::init(db)) + } + #[cfg(feature = "lmdb")] + "lmdb" | "heed" => { + std::fs::create_dir_all(&path).map_err(|e| { + Error(format!("Unable to create LMDB data directory: {}", e).into()) + })?; + + let map_size = lmdb_adapter::recommended_map_size(); + + let mut env_builder = lmdb_adapter::heed::EnvOpenOptions::new(); + env_builder.max_dbs(100); + env_builder.map_size(map_size); + unsafe { + env_builder.flag(lmdb_adapter::heed::flags::Flags::MdbNoMetaSync); + } + let db = env_builder.open(&path)?; + Ok(lmdb_adapter::LmdbDb::init(db)) + } + e => Err(Error( + format!("Invalid or unsupported DB engine: {}", e).into(), + )), + } +} diff --git a/src/garage/cli/layout.rs b/src/garage/cli/layout.rs index dc5315a1..ce2b11e0 100644 --- a/src/garage/cli/layout.rs +++ b/src/garage/cli/layout.rs @@ -1,3 +1,5 @@ +use bytesize::ByteSize; + use format_table::format_table; use garage_util::crdt::Crdt; use garage_util::error::*; @@ -14,8 +16,8 @@ pub async fn cli_layout_command_dispatch( rpc_host: NodeID, ) -> Result<(), Error> { match cmd { - LayoutOperation::Assign(configure_opt) => { - cmd_assign_role(system_rpc_endpoint, rpc_host, configure_opt).await + LayoutOperation::Assign(assign_opt) => { + cmd_assign_role(system_rpc_endpoint, rpc_host, assign_opt).await } LayoutOperation::Remove(remove_opt) => { cmd_remove_role(system_rpc_endpoint, rpc_host, remove_opt).await @@ -27,6 +29,9 @@ pub async fn cli_layout_command_dispatch( LayoutOperation::Revert(revert_opt) => { cmd_revert_layout(system_rpc_endpoint, rpc_host, revert_opt).await } + LayoutOperation::Config(config_opt) => { + cmd_config_layout(system_rpc_endpoint, rpc_host, config_opt).await + } } } @@ -60,14 +65,14 @@ pub async fn cmd_assign_role( .collect::<Result<Vec<_>, _>>()?; let mut roles = layout.roles.clone(); - roles.merge(&layout.staging); + roles.merge(&layout.staging_roles); for replaced in args.replace.iter() { let replaced_node = find_matching_node(layout.node_ids().iter().cloned(), replaced)?; match roles.get(&replaced_node) { Some(NodeRoleV(Some(_))) => { layout - .staging + .staging_roles .merge(&roles.update_mutator(replaced_node, NodeRoleV(None))); } _ => { @@ -83,7 +88,7 @@ pub async fn cmd_assign_role( return Err(Error::Message( "-c and -g are mutually exclusive, please configure node either with c>0 to act as a storage node or with -g to act as a gateway node".into())); } - if args.capacity == Some(0) { + if args.capacity == Some(ByteSize::b(0)) { return Err(Error::Message("Invalid capacity value: 0".into())); } @@ -91,7 +96,7 @@ pub async fn cmd_assign_role( let new_entry = match roles.get(&added_node) { Some(NodeRoleV(Some(old))) => { let capacity = match args.capacity { - Some(c) => Some(c), + Some(c) => Some(c.as_u64()), None if args.gateway => None, None => old.capacity, }; @@ -108,7 +113,7 @@ pub async fn cmd_assign_role( } _ => { let capacity = match args.capacity { - Some(c) => Some(c), + Some(c) => Some(c.as_u64()), None if args.gateway => None, None => return Err(Error::Message( "Please specify a capacity with the -c flag, or set node explicitly as gateway with -g".into())), @@ -125,7 +130,7 @@ pub async fn cmd_assign_role( }; layout - .staging + .staging_roles .merge(&roles.update_mutator(added_node, NodeRoleV(Some(new_entry)))); } @@ -145,13 +150,13 @@ pub async fn cmd_remove_role( let mut layout = fetch_layout(rpc_cli, rpc_host).await?; let mut roles = layout.roles.clone(); - roles.merge(&layout.staging); + roles.merge(&layout.staging_roles); let deleted_node = find_matching_node(roles.items().iter().map(|(id, _, _)| *id), &args.node_id)?; layout - .staging + .staging_roles .merge(&roles.update_mutator(deleted_node, NodeRoleV(None))); send_layout(rpc_cli, rpc_host, layout).await?; @@ -166,40 +171,45 @@ pub async fn cmd_show_layout( rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID, ) -> Result<(), Error> { - let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + let layout = fetch_layout(rpc_cli, rpc_host).await?; println!("==== CURRENT CLUSTER LAYOUT ===="); - if !print_cluster_layout(&layout) { - println!("No nodes currently have a role in the cluster."); - println!("See `garage status` to view available nodes."); - } + print_cluster_layout(&layout, "No nodes currently have a role in the cluster.\nSee `garage status` to view available nodes."); println!(); println!("Current cluster layout version: {}", layout.version); - if print_staging_role_changes(&layout) { - layout.roles.merge(&layout.staging); - - println!(); - println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ===="); - if !print_cluster_layout(&layout) { - println!("No nodes have a role in the new layout."); - } - println!(); + let has_role_changes = print_staging_role_changes(&layout); + if has_role_changes { + let v = layout.version; + let res_apply = layout.apply_staged_changes(Some(v + 1)); // this will print the stats of what partitions // will move around when we apply - if layout.calculate_partition_assignation() { - println!("To enact the staged role changes, type:"); - println!(); - println!(" garage layout apply --version {}", layout.version + 1); - println!(); - println!( - "You can also revert all proposed changes with: garage layout revert --version {}", - layout.version + 1 - ); - } else { - println!("Not enough nodes have an assigned role to maintain enough copies of data."); - println!("This new layout cannot yet be applied."); + match res_apply { + Ok((layout, msg)) => { + println!(); + println!("==== NEW CLUSTER LAYOUT AFTER APPLYING CHANGES ===="); + print_cluster_layout(&layout, "No nodes have a role in the new layout."); + println!(); + + for line in msg.iter() { + println!("{}", line); + } + println!("To enact the staged role changes, type:"); + println!(); + println!(" garage layout apply --version {}", v + 1); + println!(); + println!( + "You can also revert all proposed changes with: garage layout revert --version {}", + v + 1) + } + Err(e) => { + println!("Error while trying to compute the assignment: {}", e); + println!("This new layout cannot yet be applied."); + println!( + "You can also revert all proposed changes with: garage layout revert --version {}", + v + 1) + } } } @@ -213,11 +223,14 @@ pub async fn cmd_apply_layout( ) -> Result<(), Error> { let layout = fetch_layout(rpc_cli, rpc_host).await?; - let layout = layout.apply_staged_changes(apply_opt.version)?; + let (layout, msg) = layout.apply_staged_changes(apply_opt.version)?; + for line in msg.iter() { + println!("{}", line); + } send_layout(rpc_cli, rpc_host, layout).await?; - println!("New cluster layout with updated role assignation has been applied in cluster."); + println!("New cluster layout with updated role assignment has been applied in cluster."); println!("Data will now be moved around between nodes accordingly."); Ok(()) @@ -238,6 +251,52 @@ pub async fn cmd_revert_layout( Ok(()) } +pub async fn cmd_config_layout( + rpc_cli: &Endpoint<SystemRpc, ()>, + rpc_host: NodeID, + config_opt: ConfigLayoutOpt, +) -> Result<(), Error> { + let mut layout = fetch_layout(rpc_cli, rpc_host).await?; + + let mut did_something = false; + match config_opt.redundancy { + None => (), + Some(r_str) => { + let r = r_str + .parse::<ZoneRedundancy>() + .ok_or_message("invalid zone redundancy value")?; + if let ZoneRedundancy::AtLeast(r_int) = r { + if r_int > layout.replication_factor { + return Err(Error::Message(format!( + "The zone redundancy must be smaller or equal to the \ + replication factor ({}).", + layout.replication_factor + ))); + } else if r_int < 1 { + return Err(Error::Message( + "The zone redundancy must be at least 1.".into(), + )); + } + } + + layout + .staging_parameters + .update(LayoutParameters { zone_redundancy: r }); + println!("The zone redundancy parameter has been set to '{}'.", r); + did_something = true; + } + } + + if !did_something { + return Err(Error::Message( + "Please specify an action for `garage layout config`".into(), + )); + } + + send_layout(rpc_cli, rpc_host, layout).await?; + Ok(()) +} + // --- utility --- pub async fn fetch_layout( @@ -268,59 +327,84 @@ pub async fn send_layout( Ok(()) } -pub fn print_cluster_layout(layout: &ClusterLayout) -> bool { - let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; +pub fn print_cluster_layout(layout: &ClusterLayout, empty_msg: &str) { + let mut table = vec!["ID\tTags\tZone\tCapacity\tUsable capacity".to_string()]; for (id, _, role) in layout.roles.items().iter() { let role = match &role.0 { Some(r) => r, _ => continue, }; let tags = role.tags.join(","); - table.push(format!( - "{:?}\t{}\t{}\t{}", - id, - tags, - role.zone, - role.capacity_string() - )); + let usage = layout.get_node_usage(id).unwrap_or(0); + let capacity = layout.get_node_capacity(id).unwrap_or(0); + if capacity > 0 { + table.push(format!( + "{:?}\t{}\t{}\t{}\t{} ({:.1}%)", + id, + tags, + role.zone, + role.capacity_string(), + ByteSize::b(usage as u64 * layout.partition_size).to_string_as(false), + (100.0 * usage as f32 * layout.partition_size as f32) / (capacity as f32) + )); + } else { + table.push(format!( + "{:?}\t{}\t{}\t{}", + id, + tags, + role.zone, + role.capacity_string() + )); + }; } - if table.len() == 1 { - false - } else { + if table.len() > 1 { format_table(table); - true + println!(); + println!("Zone redundancy: {}", layout.parameters.zone_redundancy); + } else { + println!("{}", empty_msg); } } pub fn print_staging_role_changes(layout: &ClusterLayout) -> bool { - let has_changes = layout - .staging + let has_role_changes = layout + .staging_roles .items() .iter() .any(|(k, _, v)| layout.roles.get(k) != Some(v)); + let has_layout_changes = *layout.staging_parameters.get() != layout.parameters; - if has_changes { + if has_role_changes || has_layout_changes { println!(); println!("==== STAGED ROLE CHANGES ===="); - let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; - for (id, _, role) in layout.staging.items().iter() { - if layout.roles.get(id) == Some(role) { - continue; - } - if let Some(role) = &role.0 { - let tags = role.tags.join(","); - table.push(format!( - "{:?}\t{}\t{}\t{}", - id, - tags, - role.zone, - role.capacity_string() - )); - } else { - table.push(format!("{:?}\tREMOVED", id)); + if has_role_changes { + let mut table = vec!["ID\tTags\tZone\tCapacity".to_string()]; + for (id, _, role) in layout.staging_roles.items().iter() { + if layout.roles.get(id) == Some(role) { + continue; + } + if let Some(role) = &role.0 { + let tags = role.tags.join(","); + table.push(format!( + "{:?}\t{}\t{}\t{}", + id, + tags, + role.zone, + role.capacity_string() + )); + } else { + table.push(format!("{:?}\tREMOVED", id)); + } } + format_table(table); + println!(); + } + if has_layout_changes { + println!( + "Zone redundancy: {}", + layout.staging_parameters.get().zone_redundancy + ); } - format_table(table); true } else { false diff --git a/src/garage/cli/mod.rs b/src/garage/cli/mod.rs index 17a2d8ce..e131f62c 100644 --- a/src/garage/cli/mod.rs +++ b/src/garage/cli/mod.rs @@ -4,6 +4,8 @@ pub(crate) mod layout; pub(crate) mod structs; pub(crate) mod util; +pub(crate) mod convert_db; + pub(crate) use cmd::*; pub(crate) use init::*; pub(crate) use layout::*; diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 6e585e53..aba57551 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -3,6 +3,8 @@ use structopt::StructOpt; use garage_util::version::garage_version; +use crate::cli::convert_db; + #[derive(StructOpt, Debug)] pub enum Command { /// Run Garage server @@ -17,7 +19,7 @@ pub enum Command { #[structopt(name = "node", version = garage_version())] Node(NodeOperation), - /// Operations on the assignation of node roles in the cluster layout + /// Operations on the assignment of node roles in the cluster layout #[structopt(name = "layout", version = garage_version())] Layout(LayoutOperation), @@ -54,6 +56,10 @@ pub enum Command { /// Low-level debug operations on data blocks #[structopt(name = "block", version = garage_version())] Block(BlockOperation), + + /// Convert metadata db between database engine formats + #[structopt(name = "convert-db", version = garage_version())] + ConvertDb(convert_db::ConvertDbOpt), } #[derive(StructOpt, Debug)] @@ -91,6 +97,10 @@ pub enum LayoutOperation { #[structopt(name = "remove", version = garage_version())] Remove(RemoveRoleOpt), + /// Configure parameters value for the layout computation + #[structopt(name = "config", version = garage_version())] + Config(ConfigLayoutOpt), + /// Show roles currently assigned to nodes and changes staged for commit #[structopt(name = "show", version = garage_version())] Show, @@ -114,9 +124,9 @@ pub struct AssignRoleOpt { #[structopt(short = "z", long = "zone")] pub(crate) zone: Option<String>, - /// Capacity (in relative terms, use 1 to represent your smallest server) + /// Storage capacity, in bytes (supported suffixes: B, KB, MB, GB, TB, PB) #[structopt(short = "c", long = "capacity")] - pub(crate) capacity: Option<u32>, + pub(crate) capacity: Option<bytesize::ByteSize>, /// Gateway-only node #[structopt(short = "g", long = "gateway")] @@ -138,6 +148,13 @@ pub struct RemoveRoleOpt { } #[derive(StructOpt, Debug)] +pub struct ConfigLayoutOpt { + /// Zone redundancy parameter ('none'/'max' or integer) + #[structopt(short = "r", long = "redundancy")] + pub(crate) redundancy: Option<String>, +} + +#[derive(StructOpt, Debug)] pub struct ApplyLayoutOpt { /// Version number of new configuration: this command will fail if /// it is not exactly 1 + the previous configuration's version @@ -317,11 +334,11 @@ pub enum KeyOperation { /// Get key info #[structopt(name = "info", version = garage_version())] - Info(KeyOpt), + Info(KeyInfoOpt), /// Create new key - #[structopt(name = "new", version = garage_version())] - New(KeyNewOpt), + #[structopt(name = "create", version = garage_version())] + Create(KeyNewOpt), /// Rename key #[structopt(name = "rename", version = garage_version())] @@ -345,15 +362,18 @@ pub enum KeyOperation { } #[derive(Serialize, Deserialize, StructOpt, Debug)] -pub struct KeyOpt { +pub struct KeyInfoOpt { /// ID or name of the key pub key_pattern: String, + /// Whether to display the secret key + #[structopt(long = "show-secret")] + pub show_secret: bool, } #[derive(Serialize, Deserialize, StructOpt, Debug)] pub struct KeyNewOpt { /// Name of the key - #[structopt(long = "name", default_value = "Unnamed key")] + #[structopt(default_value = "Unnamed key")] pub name: String, } @@ -397,6 +417,10 @@ pub struct KeyImportOpt { /// Key name #[structopt(short = "n", default_value = "Imported key")] pub name: String, + + /// Confirm key import + #[structopt(long = "yes")] + pub yes: bool, } #[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] @@ -432,24 +456,30 @@ pub struct RepairOpt { #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] pub enum RepairWhat { - /// Only do a full sync of metadata tables + /// Do a full sync of metadata tables #[structopt(name = "tables", version = garage_version())] Tables, - /// Only repair (resync/rebalance) the set of stored blocks + /// Repair (resync/rebalance) the set of stored blocks in the cluster #[structopt(name = "blocks", version = garage_version())] Blocks, - /// Only redo the propagation of object deletions to the version table (slow) + /// Repropagate object deletions to the version table #[structopt(name = "versions", version = garage_version())] Versions, - /// Only redo the propagation of version deletions to the block ref table (extremely slow) + /// Repropagate object deletions to the multipart upload table + #[structopt(name = "mpu", version = garage_version())] + MultipartUploads, + /// Repropagate version deletions to the block ref table #[structopt(name = "block_refs", version = garage_version())] BlockRefs, - /// Verify integrity of all blocks on disc (extremely slow, i/o intensive) + /// Verify integrity of all blocks on disc #[structopt(name = "scrub", version = garage_version())] Scrub { #[structopt(subcommand)] cmd: ScrubCmd, }, + /// Rebalance data blocks among HDDs on individual nodes + #[structopt(name = "rebalance", version = garage_version())] + Rebalance, } #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 1140cf22..2232d395 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -12,8 +12,9 @@ use garage_block::manager::BlockResyncErrorInfo; use garage_model::bucket_table::*; use garage_model::key_table::*; -use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS}; -use garage_model::s3::version_table::Version; +use garage_model::s3::mpu_table::{self, MultipartUpload}; +use garage_model::s3::object_table; +use garage_model::s3::version_table::*; use crate::cli::structs::WorkerListOpt; @@ -135,6 +136,7 @@ pub fn print_bucket_info( bucket: &Bucket, relevant_keys: &HashMap<String, Key>, counters: &HashMap<String, i64>, + mpu_counters: &HashMap<String, i64>, ) { let key_name = |k| { relevant_keys @@ -148,7 +150,7 @@ pub fn print_bucket_info( Deletable::Deleted => println!("Bucket is deleted."), Deletable::Present(p) => { let size = - bytesize::ByteSize::b(counters.get(BYTES).cloned().unwrap_or_default() as u64); + bytesize::ByteSize::b(*counters.get(object_table::BYTES).unwrap_or(&0) as u64); println!( "\nSize: {} ({})", size.to_string_as(true), @@ -156,14 +158,22 @@ pub fn print_bucket_info( ); println!( "Objects: {}", - counters.get(OBJECTS).cloned().unwrap_or_default() + *counters.get(object_table::OBJECTS).unwrap_or(&0) + ); + println!( + "Unfinished uploads (multipart and non-multipart): {}", + *counters.get(object_table::UNFINISHED_UPLOADS).unwrap_or(&0) ); println!( "Unfinished multipart uploads: {}", - counters - .get(UNFINISHED_UPLOADS) - .cloned() - .unwrap_or_default() + *mpu_counters.get(mpu_table::UPLOADS).unwrap_or(&0) + ); + let mpu_size = + bytesize::ByteSize::b(*mpu_counters.get(mpu_table::BYTES).unwrap_or(&0) as u64); + println!( + "Size of unfinished multipart uploads: {} ({})", + mpu_size.to_string_as(true), + mpu_size.to_string_as(false), ); println!("\nWebsite access: {}", p.website_config.get().is_some()); @@ -390,29 +400,49 @@ pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) { format_table(table); } -pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) { +pub fn print_block_info( + hash: Hash, + refcount: u64, + versions: Vec<Result<Version, Uuid>>, + uploads: Vec<MultipartUpload>, +) { println!("Block hash: {}", hex::encode(hash.as_slice())); println!("Refcount: {}", refcount); println!(); - let mut table = vec!["Version\tBucket\tKey\tDeleted".into()]; + let mut table = vec!["Version\tBucket\tKey\tMPU\tDeleted".into()]; let mut nondeleted_count = 0; for v in versions.iter() { match v { Ok(ver) => { - table.push(format!( - "{:?}\t{:?}\t{}\t{:?}", - ver.uuid, - ver.bucket_id, - ver.key, - ver.deleted.get() - )); + match &ver.backlink { + VersionBacklink::Object { bucket_id, key } => { + table.push(format!( + "{:?}\t{:?}\t{}\t\t{:?}", + ver.uuid, + bucket_id, + key, + ver.deleted.get() + )); + } + VersionBacklink::MultipartUpload { upload_id } => { + let upload = uploads.iter().find(|x| x.upload_id == *upload_id); + table.push(format!( + "{:?}\t{:?}\t{}\t{:?}\t{:?}", + ver.uuid, + upload.map(|u| u.bucket_id).unwrap_or_default(), + upload.map(|u| u.key.as_str()).unwrap_or_default(), + upload_id, + ver.deleted.get() + )); + } + } if !ver.deleted.get() { nondeleted_count += 1; } } Err(vh) => { - table.push(format!("{:?}\t\t\tyes", vh)); + table.push(format!("{:?}\t\t\t\tyes", vh)); } } } diff --git a/src/garage/main.rs b/src/garage/main.rs index e8aee892..66403d05 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -17,6 +17,9 @@ compile_error!("Either bundled-libs or system-libs Cargo feature must be enabled #[cfg(all(feature = "bundled-libs", feature = "system-libs"))] compile_error!("Only one of bundled-libs and system-libs Cargo features must be enabled"); +#[cfg(not(any(feature = "lmdb", feature = "sled", feature = "sqlite")))] +compile_error!("Must activate the Cargo feature for at least one DB engine: lmdb, sled or sqlite."); + use std::net::SocketAddr; use std::path::PathBuf; @@ -173,6 +176,9 @@ async fn main() { Command::OfflineRepair(repair_opt) => { repair::offline::offline_repair(opt.config_file, opt.secrets, repair_opt).await } + Command::ConvertDb(conv_opt) => { + cli::convert_db::do_conversion(conv_opt).map_err(From::from) + } Command::Node(NodeOperation::NodeId(node_id_opt)) => { node_id_command(opt.config_file, node_id_opt.quiet) } diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 0e14ed51..9e4de873 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -5,11 +5,16 @@ use async_trait::async_trait; use tokio::sync::watch; use garage_block::repair::ScrubWorkerCommand; + use garage_model::garage::Garage; use garage_model::s3::block_ref_table::*; +use garage_model::s3::mpu_table::*; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; + +use garage_table::replication::*; use garage_table::*; + use garage_util::background::*; use garage_util::error::Error; use garage_util::migrate::Migrate; @@ -32,11 +37,15 @@ pub async fn launch_online_repair( } RepairWhat::Versions => { info!("Repairing the versions table"); - bg.spawn_worker(RepairVersionsWorker::new(garage.clone())); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairVersions)); + } + RepairWhat::MultipartUploads => { + info!("Repairing the multipart uploads table"); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairMpu)); } RepairWhat::BlockRefs => { info!("Repairing the block refs table"); - bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone())); + bg.spawn_worker(TableRepairWorker::new(garage.clone(), RepairBlockRefs)); } RepairWhat::Blocks => { info!("Repairing the stored blocks"); @@ -61,76 +70,82 @@ pub async fn launch_online_repair( info!("Sending command to scrub worker: {:?}", cmd); garage.block_manager.send_scrub_command(cmd).await?; } + RepairWhat::Rebalance => { + info!("Rebalancing the stored blocks among storage locations"); + bg.spawn_worker(garage_block::repair::RebalanceWorker::new( + garage.block_manager.clone(), + )); + } } Ok(()) } // ---- -struct RepairVersionsWorker { +#[async_trait] +trait TableRepair: Send + Sync + 'static { + type T: TableSchema; + + fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication>; + + async fn process( + &mut self, + garage: &Garage, + entry: <<Self as TableRepair>::T as TableSchema>::E, + ) -> Result<bool, Error>; +} + +struct TableRepairWorker<T: TableRepair> { garage: Arc<Garage>, pos: Vec<u8>, counter: usize, + repairs: usize, + inner: T, } -impl RepairVersionsWorker { - fn new(garage: Arc<Garage>) -> Self { +impl<R: TableRepair> TableRepairWorker<R> { + fn new(garage: Arc<Garage>, inner: R) -> Self { Self { garage, + inner, pos: vec![], counter: 0, + repairs: 0, } } } #[async_trait] -impl Worker for RepairVersionsWorker { +impl<R: TableRepair> Worker for TableRepairWorker<R> { fn name(&self) -> String { - "Version repair worker".into() + format!("{} repair worker", R::T::TABLE_NAME) } fn status(&self) -> WorkerStatus { WorkerStatus { - progress: Some(self.counter.to_string()), + progress: Some(format!("{} ({})", self.counter, self.repairs)), ..Default::default() } } async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { - let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? { + let (item_bytes, next_pos) = match R::table(&self.garage).data.store.get_gt(&self.pos)? { Some((k, v)) => (v, k), None => { - info!("repair_versions: finished, done {}", self.counter); + info!( + "{}: finished, done {}, fixed {}", + self.name(), + self.counter, + self.repairs + ); return Ok(WorkerState::Done); } }; - let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?; - if !version.deleted.get() { - let object = self - .garage - .object_table - .get(&version.bucket_id, &version.key) - .await?; - let version_exists = match object { - Some(o) => o - .versions() - .iter() - .any(|x| x.uuid == version.uuid && x.state != ObjectVersionState::Aborted), - None => false, - }; - if !version_exists { - info!("Repair versions: marking version as deleted: {:?}", version); - self.garage - .version_table - .insert(&Version::new( - version.uuid, - version.bucket_id, - version.key, - true, - )) - .await?; - } + let entry = <R::T as TableSchema>::E::decode(&item_bytes) + .ok_or_message("Cannot decode table entry")?; + if self.inner.process(&self.garage, entry).await? { + self.repairs += 1; } self.counter += 1; @@ -146,77 +161,124 @@ impl Worker for RepairVersionsWorker { // ---- -struct RepairBlockrefsWorker { - garage: Arc<Garage>, - pos: Vec<u8>, - counter: usize, -} +struct RepairVersions; -impl RepairBlockrefsWorker { - fn new(garage: Arc<Garage>) -> Self { - Self { - garage, - pos: vec![], - counter: 0, +#[async_trait] +impl TableRepair for RepairVersions { + type T = VersionTable; + + fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> { + &garage.version_table + } + + async fn process(&mut self, garage: &Garage, version: Version) -> Result<bool, Error> { + if !version.deleted.get() { + let ref_exists = match &version.backlink { + VersionBacklink::Object { bucket_id, key } => garage + .object_table + .get(bucket_id, key) + .await? + .map(|o| { + o.versions().iter().any(|x| { + x.uuid == version.uuid && x.state != ObjectVersionState::Aborted + }) + }) + .unwrap_or(false), + VersionBacklink::MultipartUpload { upload_id } => garage + .mpu_table + .get(upload_id, &EmptyKey) + .await? + .map(|u| !u.deleted.get()) + .unwrap_or(false), + }; + + if !ref_exists { + info!("Repair versions: marking version as deleted: {:?}", version); + garage + .version_table + .insert(&Version::new(version.uuid, version.backlink, true)) + .await?; + return Ok(true); + } } + + Ok(false) } } +// ---- + +struct RepairBlockRefs; + #[async_trait] -impl Worker for RepairBlockrefsWorker { - fn name(&self) -> String { - "Block refs repair worker".into() - } +impl TableRepair for RepairBlockRefs { + type T = BlockRefTable; - fn status(&self) -> WorkerStatus { - WorkerStatus { - progress: Some(self.counter.to_string()), - ..Default::default() - } + fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> { + &garage.block_ref_table } - async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { - let (item_bytes, next_pos) = - match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { - Some((k, v)) => (v, k), - None => { - info!("repair_block_ref: finished, done {}", self.counter); - return Ok(WorkerState::Done); - } - }; - - let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?; + async fn process(&mut self, garage: &Garage, mut block_ref: BlockRef) -> Result<bool, Error> { if !block_ref.deleted.get() { - let version = self - .garage + let ref_exists = garage .version_table .get(&block_ref.version, &EmptyKey) - .await?; - // The version might not exist if it has been GC'ed - let ref_exists = version.map(|v| !v.deleted.get()).unwrap_or(false); + .await? + .map(|v| !v.deleted.get()) + .unwrap_or(false); + if !ref_exists { info!( "Repair block ref: marking block_ref as deleted: {:?}", block_ref ); - self.garage - .block_ref_table - .insert(&BlockRef { - block: block_ref.block, - version: block_ref.version, - deleted: true.into(), - }) - .await?; + block_ref.deleted.set(); + garage.block_ref_table.insert(&block_ref).await?; + return Ok(true); } } - self.counter += 1; - self.pos = next_pos; + Ok(false) + } +} - Ok(WorkerState::Busy) +// ---- + +struct RepairMpu; + +#[async_trait] +impl TableRepair for RepairMpu { + type T = MultipartUploadTable; + + fn table(garage: &Garage) -> &Table<Self::T, TableShardedReplication> { + &garage.mpu_table } - async fn wait_for_work(&mut self) -> WorkerState { - unreachable!() + async fn process(&mut self, garage: &Garage, mut mpu: MultipartUpload) -> Result<bool, Error> { + if !mpu.deleted.get() { + let ref_exists = garage + .object_table + .get(&mpu.bucket_id, &mpu.key) + .await? + .map(|o| { + o.versions() + .iter() + .any(|x| x.uuid == mpu.upload_id && x.is_uploading(Some(true))) + }) + .unwrap_or(false); + + if !ref_exists { + info!( + "Repair multipart uploads: marking mpu as deleted: {:?}", + mpu + ); + mpu.parts.clear(); + mpu.deleted.set(); + garage.mpu_table.insert(&mpu).await?; + return Ok(true); + } + } + + Ok(false) } } diff --git a/src/garage/server.rs b/src/garage/server.rs index 958089c6..3ad10b72 100644 --- a/src/garage/server.rs +++ b/src/garage/server.rs @@ -79,7 +79,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er "S3 API", tokio::spawn(S3ApiServer::run( garage.clone(), - *s3_bind_addr, + s3_bind_addr.clone(), config.s3_api.s3_region.clone(), wait_from(watch_cancel.clone()), )), @@ -94,7 +94,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er "K2V API", tokio::spawn(K2VApiServer::run( garage.clone(), - config.k2v_api.as_ref().unwrap().api_bind_addr, + config.k2v_api.as_ref().unwrap().api_bind_addr.clone(), config.s3_api.s3_region.clone(), wait_from(watch_cancel.clone()), )), @@ -110,7 +110,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er "Web", tokio::spawn(WebServer::run( garage.clone(), - web_config.bind_addr, + web_config.bind_addr.clone(), web_config.root_domain.clone(), wait_from(watch_cancel.clone()), )), @@ -121,7 +121,9 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er info!("Launching Admin API server..."); servers.push(( "Admin", - tokio::spawn(admin_server.run(*admin_bind_addr, wait_from(watch_cancel.clone()))), + tokio::spawn( + admin_server.run(admin_bind_addr.clone(), wait_from(watch_cancel.clone())), + ), )); } @@ -130,20 +132,27 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er warn!("This Garage version is built without the metrics feature"); } - // Stuff runs - - // When a cancel signal is sent, stuff stops - - // Collect stuff - for (desc, join_handle) in servers { - if let Err(e) = join_handle.await? { - error!("{} server exited with error: {}", desc, e); - } else { - info!("{} server exited without error.", desc); + if servers.is_empty() { + // Nothing runs except netapp (not in servers) + // Await shutdown signal before proceeding to shutting down netapp + wait_from(watch_cancel).await; + } else { + // Stuff runs + + // When a cancel signal is sent, stuff stops + + // Collect stuff + for (desc, join_handle) in servers { + if let Err(e) = join_handle.await? { + error!("{} server exited with error: {}", desc, e); + } else { + info!("{} server exited without error.", desc); + } } } // Remove RPC handlers for system to break reference cycles + info!("Deregistering RPC handlers for shutdown..."); garage.system.netapp.drop_all_handlers(); opentelemetry::global::shutdown_tracer_provider(); diff --git a/src/garage/tests/common/garage.rs b/src/garage/tests/common/garage.rs index 8aaf6f5b..d1f0867a 100644 --- a/src/garage/tests/common/garage.rs +++ b/src/garage/tests/common/garage.rs @@ -52,6 +52,7 @@ impl Instance { r#" metadata_dir = "{path}/meta" data_dir = "{path}/data" +db_engine = "lmdb" replication_mode = "1" @@ -141,7 +142,7 @@ api_bind_addr = "127.0.0.1:{admin_port}" self.command() .args(["layout", "assign"]) .arg(node_short_id) - .args(["-c", "1", "-z", "unzonned"]) + .args(["-c", "1G", "-z", "unzonned"]) .quiet() .expect_success_status("Could not assign garage node layout"); self.command() @@ -186,9 +187,9 @@ api_bind_addr = "127.0.0.1:{admin_port}" let mut key = Key::default(); let mut cmd = self.command(); - let base = cmd.args(["key", "new"]); + let base = cmd.args(["key", "create"]); let with_name = match maybe_name { - Some(name) => base.args(["--name", name]), + Some(name) => base.args([name]), None => base, }; diff --git a/src/garage/tests/common/mod.rs b/src/garage/tests/common/mod.rs index 54efd1ea..1273bad1 100644 --- a/src/garage/tests/common/mod.rs +++ b/src/garage/tests/common/mod.rs @@ -1,6 +1,7 @@ use aws_sdk_s3::config::Region; use aws_sdk_s3::Client; use ext::*; +#[cfg(feature = "k2v")] use k2v_client::K2vClient; #[macro_use] @@ -21,6 +22,7 @@ pub struct Context { pub key: garage::Key, pub client: Client, pub custom_request: CustomRequester, + #[cfg(feature = "k2v")] pub k2v: K2VContext, } @@ -35,6 +37,7 @@ impl Context { let key = garage.key(None); let client = client::build_client(&key); let custom_request = CustomRequester::new_s3(garage, &key); + #[cfg(feature = "k2v")] let k2v_request = CustomRequester::new_k2v(garage, &key); Context { @@ -42,6 +45,7 @@ impl Context { client, key, custom_request, + #[cfg(feature = "k2v")] k2v: K2VContext { request: k2v_request, }, @@ -72,6 +76,7 @@ impl Context { } /// Build a K2vClient for a given bucket + #[cfg(feature = "k2v")] pub fn k2v_client(&self, bucket: &str) -> K2vClient { let config = k2v_client::K2vClientConfig { region: REGION.to_string(), diff --git a/src/garage/tests/k2v/batch.rs b/src/garage/tests/k2v/batch.rs index 595d0ba8..71de91bf 100644 --- a/src/garage/tests/k2v/batch.rs +++ b/src/garage/tests/k2v/batch.rs @@ -6,7 +6,7 @@ use assert_json_diff::assert_json_eq; use base64::prelude::*; use serde_json::json; -use super::json_body; +use crate::json_body; use hyper::{Method, StatusCode}; #[tokio::test] diff --git a/src/garage/tests/k2v/item.rs b/src/garage/tests/k2v/item.rs index 588836c7..20add889 100644 --- a/src/garage/tests/k2v/item.rs +++ b/src/garage/tests/k2v/item.rs @@ -6,7 +6,7 @@ use assert_json_diff::assert_json_eq; use base64::prelude::*; use serde_json::json; -use super::json_body; +use crate::json_body; use hyper::{Method, StatusCode}; #[tokio::test] @@ -44,6 +44,7 @@ async fn test_items_and_indices() { let content = format!("{}: hello world", sk).into_bytes(); let content2 = format!("{}: hello universe", sk).into_bytes(); let content3 = format!("{}: concurrent value", sk).into_bytes(); + eprintln!("test iteration {}: {}", i, sk); // Put initially, no causality token let res = ctx @@ -89,7 +90,7 @@ async fn test_items_and_indices() { assert_eq!(res_body, content); // ReadIndex -- now there should be some stuff - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let res = ctx .k2v .request @@ -158,7 +159,7 @@ async fn test_items_and_indices() { assert_eq!(res_body, content2); // ReadIndex -- now there should be some stuff - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let res = ctx .k2v .request @@ -230,7 +231,7 @@ async fn test_items_and_indices() { ); // ReadIndex -- now there should be some stuff - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let res = ctx .k2v .request @@ -299,7 +300,7 @@ async fn test_items_and_indices() { assert_eq!(res.status(), StatusCode::NO_CONTENT); // ReadIndex -- now there should be some stuff - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let res = ctx .k2v .request diff --git a/src/garage/tests/k2v/mod.rs b/src/garage/tests/k2v/mod.rs index a009460e..241e3dc2 100644 --- a/src/garage/tests/k2v/mod.rs +++ b/src/garage/tests/k2v/mod.rs @@ -3,16 +3,3 @@ pub mod errorcodes; pub mod item; pub mod poll; pub mod simple; - -use hyper::{Body, Response}; - -pub async fn json_body(res: Response<Body>) -> serde_json::Value { - let res_body: serde_json::Value = serde_json::from_slice( - &hyper::body::to_bytes(res.into_body()) - .await - .unwrap() - .to_vec()[..], - ) - .unwrap(); - res_body -} diff --git a/src/garage/tests/k2v/poll.rs b/src/garage/tests/k2v/poll.rs index dd44aed9..452317c2 100644 --- a/src/garage/tests/k2v/poll.rs +++ b/src/garage/tests/k2v/poll.rs @@ -5,8 +5,8 @@ use std::time::Duration; use assert_json_diff::assert_json_eq; use serde_json::json; -use super::json_body; use crate::common; +use crate::json_body; #[tokio::test] async fn test_poll_item() { diff --git a/src/garage/tests/lib.rs b/src/garage/tests/lib.rs index e450baac..ab92bc0a 100644 --- a/src/garage/tests/lib.rs +++ b/src/garage/tests/lib.rs @@ -10,3 +10,16 @@ mod s3; mod k2v; #[cfg(feature = "k2v")] mod k2v_client; + +use hyper::{Body, Response}; + +pub async fn json_body(res: Response<Body>) -> serde_json::Value { + let res_body: serde_json::Value = serde_json::from_slice( + &hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec()[..], + ) + .unwrap(); + res_body +} diff --git a/src/garage/tests/s3/multipart.rs b/src/garage/tests/s3/multipart.rs index aeff94b4..09ae5e5b 100644 --- a/src/garage/tests/s3/multipart.rs +++ b/src/garage/tests/s3/multipart.rs @@ -6,6 +6,190 @@ const SZ_5MB: usize = 5 * 1024 * 1024; const SZ_10MB: usize = 10 * 1024 * 1024; #[tokio::test] +async fn test_multipart_upload() { + let ctx = common::context(); + let bucket = ctx.create_bucket("testmpu"); + + let u1 = vec![0x11; SZ_5MB]; + let u2 = vec![0x22; SZ_5MB]; + let u3 = vec![0x33; SZ_5MB]; + let u4 = vec![0x44; SZ_5MB]; + let u5 = vec![0x55; SZ_5MB]; + + let up = ctx + .client + .create_multipart_upload() + .bucket(&bucket) + .key("a") + .send() + .await + .unwrap(); + assert!(up.upload_id.is_some()); + + let uid = up.upload_id.as_ref().unwrap(); + + let p3 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(3) + .body(ByteStream::from(u3.clone())) + .send() + .await + .unwrap(); + + let _p1 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(1) + .body(ByteStream::from(u1)) + .send() + .await + .unwrap(); + + let _p4 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(4) + .body(ByteStream::from(u4)) + .send() + .await + .unwrap(); + + let p1bis = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(1) + .body(ByteStream::from(u2.clone())) + .send() + .await + .unwrap(); + + let p6 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(6) + .body(ByteStream::from(u5.clone())) + .send() + .await + .unwrap(); + + { + let r = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .unwrap(); + assert_eq!(r.parts.unwrap().len(), 4); + } + + let cmp = CompletedMultipartUpload::builder() + .parts( + CompletedPart::builder() + .part_number(1) + .e_tag(p1bis.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(3) + .e_tag(p3.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(6) + .e_tag(p6.e_tag.unwrap()) + .build(), + ) + .build(); + + ctx.client + .complete_multipart_upload() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .multipart_upload(cmp) + .send() + .await + .unwrap(); + + // The multipart upload must not appear anymore + assert!(ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .is_err()); + + { + // The object must appear as a regular object + let r = ctx + .client + .head_object() + .bucket(&bucket) + .key("a") + .send() + .await + .unwrap(); + + assert_eq!(r.content_length, (SZ_5MB * 3) as i64); + } + + { + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key("a") + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, &[&u2[..], &u3[..], &u5[..]].concat()); + } + + { + for (part_number, data) in [(1, &u2), (2, &u3), (3, &u5)] { + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key("a") + .part_number(part_number) + .send() + .await + .unwrap(); + + eprintln!("get_object with part_number = {}", part_number); + assert_eq!(o.content_length, SZ_5MB as i64); + assert_bytes_eq!(o.body, data); + } + } +} + +#[tokio::test] async fn test_uploadlistpart() { let ctx = common::context(); let bucket = ctx.create_bucket("uploadpart"); @@ -65,7 +249,8 @@ async fn test_uploadlistpart() { let ps = r.parts.unwrap(); assert_eq!(ps.len(), 1); - let fp = ps.iter().find(|x| x.part_number == 2).unwrap(); + assert_eq!(ps[0].part_number, 2); + let fp = &ps[0]; assert!(fp.last_modified.is_some()); assert_eq!( fp.e_tag.as_ref().unwrap(), @@ -100,13 +285,24 @@ async fn test_uploadlistpart() { let ps = r.parts.unwrap(); assert_eq!(ps.len(), 2); - let fp = ps.iter().find(|x| x.part_number == 1).unwrap(); + + assert_eq!(ps[0].part_number, 1); + let fp = &ps[0]; assert!(fp.last_modified.is_some()); assert_eq!( fp.e_tag.as_ref().unwrap(), "\"3c484266f9315485694556e6c693bfa2\"" ); assert_eq!(fp.size, SZ_5MB as i64); + + assert_eq!(ps[1].part_number, 2); + let sp = &ps[1]; + assert!(sp.last_modified.is_some()); + assert_eq!( + sp.e_tag.as_ref().unwrap(), + "\"3366bb9dcf710d6801b5926467d02e19\"" + ); + assert_eq!(sp.size, SZ_5MB as i64); } { @@ -123,12 +319,19 @@ async fn test_uploadlistpart() { .unwrap(); assert!(r.part_number_marker.is_none()); - assert!(r.next_part_number_marker.is_some()); + assert_eq!(r.next_part_number_marker.as_deref(), Some("1")); assert_eq!(r.max_parts, 1_i32); assert!(r.is_truncated); assert_eq!(r.key.unwrap(), "a"); assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str()); - assert_eq!(r.parts.unwrap().len(), 1); + let parts = r.parts.unwrap(); + assert_eq!(parts.len(), 1); + let fp = &parts[0]; + assert_eq!(fp.part_number, 1); + assert_eq!( + fp.e_tag.as_ref().unwrap(), + "\"3c484266f9315485694556e6c693bfa2\"" + ); let r2 = ctx .client @@ -147,10 +350,18 @@ async fn test_uploadlistpart() { r.next_part_number_marker.as_ref().unwrap() ); assert_eq!(r2.max_parts, 1_i32); - assert!(r2.is_truncated); assert_eq!(r2.key.unwrap(), "a"); assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str()); - assert_eq!(r2.parts.unwrap().len(), 1); + let parts = r2.parts.unwrap(); + assert_eq!(parts.len(), 1); + let fp = &parts[0]; + assert_eq!(fp.part_number, 2); + assert_eq!( + fp.e_tag.as_ref().unwrap(), + "\"3366bb9dcf710d6801b5926467d02e19\"" + ); + //assert!(r2.is_truncated); // WHY? (this was the test before) + assert!(!r2.is_truncated); } let cmp = CompletedMultipartUpload::builder() diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs index ab9b12b9..eeafb5fa 100644 --- a/src/garage/tests/s3/website.rs +++ b/src/garage/tests/s3/website.rs @@ -1,6 +1,6 @@ use crate::common; use crate::common::ext::*; -use crate::k2v::json_body; +use crate::json_body; use assert_json_diff::assert_json_eq; use aws_sdk_s3::{ @@ -72,7 +72,7 @@ async fn test_website() { res_body, json!({ "code": "InvalidRequest", - "message": "Bad request: Bucket 'my-website' is not authorized for website hosting", + "message": "Bad request: Domain 'my-website' is not managed by Garage", "region": "garage-integ-test", "path": "/check", }) @@ -91,24 +91,29 @@ async fn test_website() { BODY.as_ref() ); - let admin_req = || { - Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{0}/check?domain={1}", - ctx.garage.admin_port, - BCKT_NAME.to_string() - )) - .body(Body::empty()) - .unwrap() - }; - - let mut admin_resp = client.request(admin_req()).await.unwrap(); - assert_eq!(admin_resp.status(), StatusCode::OK); - assert_eq!( - to_bytes(admin_resp.body_mut()).await.unwrap().as_ref(), - format!("Bucket '{BCKT_NAME}' is authorized for website hosting").as_bytes() - ); + for bname in [ + BCKT_NAME.to_string(), + format!("{BCKT_NAME}.web.garage"), + format!("{BCKT_NAME}.s3.garage"), + ] { + let admin_req = || { + Request::builder() + .method("GET") + .uri(format!( + "http://127.0.0.1:{0}/check?domain={1}", + ctx.garage.admin_port, bname + )) + .body(Body::empty()) + .unwrap() + }; + + let mut admin_resp = client.request(admin_req()).await.unwrap(); + assert_eq!(admin_resp.status(), StatusCode::OK); + assert_eq!( + to_bytes(admin_resp.body_mut()).await.unwrap().as_ref(), + format!("Domain '{bname}' is managed by Garage").as_bytes() + ); + } ctx.garage .command() @@ -142,7 +147,7 @@ async fn test_website() { res_body, json!({ "code": "InvalidRequest", - "message": "Bad request: Bucket 'my-website' is not authorized for website hosting", + "message": "Bad request: Domain 'my-website' is not managed by Garage", "region": "garage-integ-test", "path": "/check", }) @@ -397,7 +402,7 @@ async fn test_website_s3_api() { } #[tokio::test] -async fn test_website_check_website_enabled() { +async fn test_website_check_domain() { let ctx = common::context(); let client = Client::new(); @@ -435,13 +440,13 @@ async fn test_website_check_website_enabled() { }; let admin_resp = client.request(admin_req()).await.unwrap(); - assert_eq!(admin_resp.status(), StatusCode::NOT_FOUND); + assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST); let res_body = json_body(admin_resp).await; assert_json_eq!( res_body, json!({ - "code": "NoSuchBucket", - "message": "Bucket not found: ", + "code": "InvalidRequest", + "message": "Bad request: Domain '' is not managed by Garage", "region": "garage-integ-test", "path": "/check", }) @@ -459,13 +464,13 @@ async fn test_website_check_website_enabled() { }; let admin_resp = client.request(admin_req()).await.unwrap(); - assert_eq!(admin_resp.status(), StatusCode::NOT_FOUND); + assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST); let res_body = json_body(admin_resp).await; assert_json_eq!( res_body, json!({ - "code": "NoSuchBucket", - "message": "Bucket not found: foobar", + "code": "InvalidRequest", + "message": "Bad request: Domain 'foobar' is not managed by Garage", "region": "garage-integ-test", "path": "/check", }) @@ -483,13 +488,13 @@ async fn test_website_check_website_enabled() { }; let admin_resp = client.request(admin_req()).await.unwrap(); - assert_eq!(admin_resp.status(), StatusCode::NOT_FOUND); + assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST); let res_body = json_body(admin_resp).await; assert_json_eq!( res_body, json!({ - "code": "NoSuchBucket", - "message": "Bucket not found: ☹", + "code": "InvalidRequest", + "message": "Bad request: Domain '☹' is not managed by Garage", "region": "garage-integ-test", "path": "/check", }) diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 6dc954d4..42b7ffdb 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_model" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -23,6 +23,7 @@ garage_util.workspace = true async-trait = "0.1.7" arc-swap = "1.0" blake2 = "0.10" +chrono = "0.4" err-derive = "0.3" hex = "0.4" base64 = "0.21" @@ -38,10 +39,10 @@ futures-util = "0.3" tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } opentelemetry = "0.17" -netapp = "0.5" +netapp = "0.10" [features] -default = [ "sled" ] +default = [ "sled", "lmdb", "sqlite" ] k2v = [ "garage_util/k2v" ] lmdb = [ "garage_db/lmdb" ] sled = [ "garage_db/sled" ] diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index ac163736..4c48a76f 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -48,6 +48,9 @@ mod v08 { pub website_config: crdt::Lww<Option<WebsiteConfig>>, /// CORS rules pub cors_config: crdt::Lww<Option<Vec<CorsRule>>>, + /// Lifecycle configuration + #[serde(default)] + pub lifecycle_config: crdt::Lww<Option<Vec<LifecycleRule>>>, /// Bucket quotas #[serde(default)] pub quotas: crdt::Lww<BucketQuotas>, @@ -69,6 +72,42 @@ mod v08 { pub expose_headers: Vec<String>, } + /// Lifecycle configuration rule + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct LifecycleRule { + /// The ID of the rule + pub id: Option<String>, + /// Whether the rule is active + pub enabled: bool, + /// The filter to check whether rule applies to a given object + pub filter: LifecycleFilter, + /// Number of days after which incomplete multipart uploads are aborted + pub abort_incomplete_mpu_days: Option<usize>, + /// Expiration policy for stored objects + pub expiration: Option<LifecycleExpiration>, + } + + /// A lifecycle filter is a set of conditions that must all be true. + /// For each condition, if it is None, it is not verified (always true), + /// and if it is Some(x), then it is verified for value x + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Default)] + pub struct LifecycleFilter { + /// If Some(x), object key has to start with prefix x + pub prefix: Option<String>, + /// If Some(x), object size has to be more than x + pub size_gt: Option<u64>, + /// If Some(x), object size has to be less than x + pub size_lt: Option<u64>, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub enum LifecycleExpiration { + /// Objects expire x days after they were created + AfterDays(usize), + /// Objects expire at date x (must be in yyyy-mm-dd format) + AtDate(String), + } + #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] pub struct BucketQuotas { /// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket) @@ -88,7 +127,7 @@ impl AutoCrdt for BucketQuotas { impl BucketParams { /// Create an empty BucketParams with no authorized keys and no website accesss - pub fn new() -> Self { + fn new() -> Self { BucketParams { creation_date: now_msec(), authorized_keys: crdt::Map::new(), @@ -96,6 +135,7 @@ impl BucketParams { local_aliases: crdt::LwwMap::new(), website_config: crdt::Lww::new(None), cors_config: crdt::Lww::new(None), + lifecycle_config: crdt::Lww::new(None), quotas: crdt::Lww::new(BucketQuotas::default()), } } @@ -111,10 +151,25 @@ impl Crdt for BucketParams { self.website_config.merge(&o.website_config); self.cors_config.merge(&o.cors_config); + self.lifecycle_config.merge(&o.lifecycle_config); self.quotas.merge(&o.quotas); } } +pub fn parse_lifecycle_date(date: &str) -> Result<chrono::NaiveDate, &'static str> { + use chrono::prelude::*; + + if let Ok(datetime) = NaiveDateTime::parse_from_str(date, "%Y-%m-%dT%H:%M:%SZ") { + if datetime.time() == NaiveTime::MIN { + Ok(datetime.date()) + } else { + Err("date must be at midnight") + } + } else { + NaiveDate::parse_from_str(date, "%Y-%m-%d").map_err(|_| "date has invalid format") + } +} + impl Default for Bucket { fn default() -> Self { Self::new() diff --git a/src/model/garage.rs b/src/model/garage.rs index 3daa1b33..8c9a3af3 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -7,6 +7,7 @@ use garage_db as db; use garage_util::background::*; use garage_util::config::*; use garage_util::error::*; +use garage_util::persister::PersisterShared; use garage_rpc::replication_mode::ReplicationMode; use garage_rpc::system::System; @@ -17,6 +18,8 @@ use garage_table::replication::TableShardedReplication; use garage_table::*; use crate::s3::block_ref_table::*; +use crate::s3::lifecycle_worker; +use crate::s3::mpu_table::*; use crate::s3::object_table::*; use crate::s3::version_table::*; @@ -57,11 +60,18 @@ pub struct Garage { pub object_table: Arc<Table<ObjectTable, TableShardedReplication>>, /// Counting table containing object counters pub object_counter_table: Arc<IndexCounter<Object>>, + /// Table containing S3 multipart uploads + pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>, + /// Counting table containing multipart object counters + pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>, /// Table containing S3 object versions pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, /// Table containing S3 block references (not blocks themselves) pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>, + /// Persister for lifecycle worker info + pub lifecycle_persister: PersisterShared<lifecycle_worker::LifecycleWorkerPersisted>, + #[cfg(feature = "k2v")] pub k2v: GarageK2V, } @@ -82,8 +92,22 @@ impl Garage { // Create meta dir and data dir if they don't exist already std::fs::create_dir_all(&config.metadata_dir) .ok_or_message("Unable to create Garage metadata directory")?; - std::fs::create_dir_all(&config.data_dir) - .ok_or_message("Unable to create Garage data directory")?; + match &config.data_dir { + DataDirEnum::Single(data_dir) => { + std::fs::create_dir_all(data_dir).ok_or_message(format!( + "Unable to create Garage data directory: {}", + data_dir.to_string_lossy() + ))?; + } + DataDirEnum::Multiple(data_dirs) => { + for dir in data_dirs { + std::fs::create_dir_all(&dir.path).ok_or_message(format!( + "Unable to create Garage data directory: {}", + dir.path.to_string_lossy() + ))?; + } + } + } info!("Opening database..."); let mut db_path = config.metadata_dir.clone(); @@ -91,11 +115,16 @@ impl Garage { // ---- Sled DB ---- #[cfg(feature = "sled")] "sled" => { + if config.metadata_fsync { + return Err(Error::Message(format!( + "`metadata_fsync = true` is not supported with the Sled database engine" + ))); + } db_path.push("db"); info!("Opening Sled database at: {}", db_path.display()); let db = db::sled_adapter::sled::Config::default() .path(&db_path) - .cache_capacity(config.sled_cache_capacity) + .cache_capacity(config.sled_cache_capacity as u64) .flush_every_ms(Some(config.sled_flush_every_ms)) .open() .ok_or_message("Unable to open sled DB")?; @@ -109,6 +138,15 @@ impl Garage { db_path.push("db.sqlite"); info!("Opening Sqlite database at: {}", db_path.display()); let db = db::sqlite_adapter::rusqlite::Connection::open(db_path) + .and_then(|db| { + db.pragma_update(None, "journal_mode", &"WAL")?; + if config.metadata_fsync { + db.pragma_update(None, "synchronous", &"NORMAL")?; + } else { + db.pragma_update(None, "synchronous", &"OFF")?; + } + Ok(db) + }) .ok_or_message("Unable to open sqlite DB")?; db::sqlite_adapter::SqliteDb::init(db) } @@ -125,7 +163,10 @@ impl Garage { info!("Opening LMDB database at: {}", db_path.display()); std::fs::create_dir_all(&db_path) .ok_or_message("Unable to create LMDB data directory")?; - let map_size = garage_db::lmdb_adapter::recommended_map_size(); + let map_size = match config.lmdb_map_size { + v if v == usize::default() => garage_db::lmdb_adapter::recommended_map_size(), + v => v - (v % 4096), + }; use db::lmdb_adapter::heed; let mut env_builder = heed::EnvOpenOptions::new(); @@ -133,8 +174,10 @@ impl Garage { env_builder.max_readers(500); env_builder.map_size(map_size); unsafe { - env_builder.flag(heed::flags::Flags::MdbNoSync); env_builder.flag(heed::flags::Flags::MdbNoMetaSync); + if !config.metadata_fsync { + env_builder.flag(heed::flags::Flags::MdbNoSync); + } } let db = match env_builder.open(&db_path) { Err(heed::Error::Io(e)) if e.kind() == std::io::ErrorKind::OutOfMemory => { @@ -142,6 +185,7 @@ impl Garage { "OutOfMemory error while trying to open LMDB database. This can happen \ if your operating system is not allowing you to use sufficient virtual \ memory address space. Please check that no limit is set (ulimit -v). \ + You may also try to set a smaller `lmdb_map_size` configuration parameter. \ On 32-bit machines, you should probably switch to another database engine.".into())) } x => x.ok_or_message("Unable to open LMDB DB")?, @@ -178,6 +222,9 @@ impl Garage { let replication_mode = ReplicationMode::parse(&config.replication_mode) .ok_or_message("Invalid replication_mode in config file.")?; + info!("Initialize background variable system..."); + let mut bg_vars = vars::BgVars::new(); + info!("Initialize membership management system..."); let system = System::new(network_key, replication_mode, &config)?; @@ -204,10 +251,12 @@ impl Garage { let block_manager = BlockManager::new( &db, config.data_dir.clone(), + config.data_fsync, config.compression_level, data_rep_param, system.clone(), - ); + )?; + block_manager.register_bg_vars(&mut bg_vars); // ---- admin tables ---- info!("Initialize bucket_table..."); @@ -244,6 +293,20 @@ impl Garage { &db, ); + info!("Initialize multipart upload counter table..."); + let mpu_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db); + + info!("Initialize multipart upload table..."); + let mpu_table = Table::new( + MultipartUploadTable { + version_table: version_table.clone(), + mpu_counter_table: mpu_counter_table.clone(), + }, + meta_rep_param.clone(), + system.clone(), + &db, + ); + info!("Initialize object counter table..."); let object_counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), &db); @@ -252,6 +315,7 @@ impl Garage { let object_table = Table::new( ObjectTable { version_table: version_table.clone(), + mpu_table: mpu_table.clone(), object_counter_table: object_counter_table.clone(), }, meta_rep_param.clone(), @@ -259,14 +323,15 @@ impl Garage { &db, ); + info!("Load lifecycle worker state..."); + let lifecycle_persister = + PersisterShared::new(&system.metadata_dir, "lifecycle_worker_state"); + lifecycle_worker::register_bg_vars(&lifecycle_persister, &mut bg_vars); + // ---- K2V ---- #[cfg(feature = "k2v")] let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param); - // Initialize bg vars - let mut bg_vars = vars::BgVars::new(); - block_manager.register_bg_vars(&mut bg_vars); - // -- done -- Ok(Arc::new(Self { config, @@ -280,14 +345,17 @@ impl Garage { key_table, object_table, object_counter_table, + mpu_table, + mpu_counter_table, version_table, block_ref_table, + lifecycle_persister, #[cfg(feature = "k2v")] k2v, })) } - pub fn spawn_workers(&self, bg: &BackgroundRunner) { + pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) { self.block_manager.spawn_workers(bg); self.bucket_table.spawn_workers(bg); @@ -296,9 +364,16 @@ impl Garage { self.object_table.spawn_workers(bg); self.object_counter_table.spawn_workers(bg); + self.mpu_table.spawn_workers(bg); + self.mpu_counter_table.spawn_workers(bg); self.version_table.spawn_workers(bg); self.block_ref_table.spawn_workers(bg); + bg.spawn_worker(lifecycle_worker::LifecycleWorker::new( + self.clone(), + self.lifecycle_persister.clone(), + )); + #[cfg(feature = "k2v")] self.k2v.spawn_workers(bg); } diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index 4a488d7f..576d03f3 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -478,7 +478,9 @@ impl<'a> BucketHelper<'a> { // ---- /// Deletes all incomplete multipart uploads that are older than a certain time. - /// Returns the number of uploads aborted + /// Returns the number of uploads aborted. + /// This will also include non-multipart uploads, which may be lingering + /// after a node crash pub async fn cleanup_incomplete_uploads( &self, bucket_id: &Uuid, @@ -496,7 +498,9 @@ impl<'a> BucketHelper<'a> { .get_range( bucket_id, start, - Some(ObjectFilter::IsUploading), + Some(ObjectFilter::IsUploading { + check_multipart: None, + }), 1000, EnumerationOrder::Forward, ) @@ -508,7 +512,7 @@ impl<'a> BucketHelper<'a> { let aborted_versions = object .versions() .iter() - .filter(|v| v.is_uploading() && v.timestamp < older_than) + .filter(|v| v.is_uploading(None) && v.timestamp < older_than) .map(|v| ObjectVersion { state: ObjectVersionState::Aborted, uuid: v.uuid, diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index 35d6596d..a46c165f 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -294,7 +294,7 @@ impl<T: CountedItem> IndexCounter<T> { let counter_entry = local_counter.into_counter_entry(self.this_node); self.local_counter .db() - .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?; + .transaction(|tx| self.table.queue_insert(tx, &counter_entry))?; next_start = Some(local_counter_k); } @@ -360,7 +360,7 @@ impl<T: CountedItem> IndexCounter<T> { let counter_entry = local_counter.into_counter_entry(self.this_node); self.local_counter .db() - .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?; + .transaction(|tx| self.table.queue_insert(tx, &counter_entry))?; next_start = Some(counted_entry_k); } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index bb5334a3..a9762f1b 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -149,11 +149,19 @@ impl Key { } /// Import a key from it's parts - pub fn import(key_id: &str, secret_key: &str, name: &str) -> Self { - Self { + pub fn import(key_id: &str, secret_key: &str, name: &str) -> Result<Self, &'static str> { + if key_id.len() != 26 || &key_id[..2] != "GK" || hex::decode(&key_id[2..]).is_err() { + return Err("The specified key ID is not a valid Garage key ID (starts with `GK`, followed by 12 hex-encoded bytes)"); + } + + if secret_key.len() != 64 || hex::decode(&secret_key).is_err() { + return Err("The specified secret key is not a valid Garage secret key (composed of 32 hex-encoded bytes)"); + } + + Ok(Self { key_id: key_id.to_string(), state: crdt::Deletable::present(KeyParams::new(secret_key, name)), - } + }) } /// Create a new Key which can me merged to mark an existing key deleted diff --git a/src/model/migrate.rs b/src/model/migrate.rs index 6b4c3eed..4c74b43b 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -78,6 +78,7 @@ impl Migrate { local_aliases: LwwMap::new(), website_config: Lww::new(website), cors_config: Lww::new(None), + lifecycle_config: Lww::new(None), quotas: Lww::new(Default::default()), }), }) diff --git a/src/model/s3/lifecycle_worker.rs b/src/model/s3/lifecycle_worker.rs new file mode 100644 index 00000000..50d4283f --- /dev/null +++ b/src/model/s3/lifecycle_worker.rs @@ -0,0 +1,410 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use chrono::prelude::*; +use std::time::{Duration, Instant}; +use tokio::sync::watch; + +use garage_util::background::*; +use garage_util::data::*; +use garage_util::error::Error; +use garage_util::persister::PersisterShared; +use garage_util::time::*; + +use garage_table::EmptyKey; + +use crate::bucket_table::*; +use crate::s3::object_table::*; + +use crate::garage::Garage; + +mod v090 { + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize, Default, Clone)] + pub struct LifecycleWorkerPersisted { + pub last_completed: Option<String>, + } + + impl garage_util::migrate::InitialFormat for LifecycleWorkerPersisted { + const VERSION_MARKER: &'static [u8] = b"G09lwp"; + } +} + +pub use v090::*; + +pub struct LifecycleWorker { + garage: Arc<Garage>, + + state: State, + + persister: PersisterShared<LifecycleWorkerPersisted>, +} + +enum State { + Completed(NaiveDate), + Running { + date: NaiveDate, + pos: Vec<u8>, + counter: usize, + objects_expired: usize, + mpu_aborted: usize, + last_bucket: Option<Bucket>, + }, +} + +#[derive(Clone, Copy, Eq, PartialEq)] +enum Skip { + SkipBucket, + NextObject, +} + +pub fn register_bg_vars( + persister: &PersisterShared<LifecycleWorkerPersisted>, + vars: &mut vars::BgVars, +) { + vars.register_ro(persister, "lifecycle-last-completed", |p| { + p.get_with(|x| x.last_completed.clone().unwrap_or("never".to_string())) + }); +} + +impl LifecycleWorker { + pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self { + let today = today(); + let last_completed = persister.get_with(|x| { + x.last_completed + .as_deref() + .and_then(|x| x.parse::<NaiveDate>().ok()) + }); + let state = match last_completed { + Some(d) if d >= today => State::Completed(d), + _ => State::start(today), + }; + Self { + garage, + state, + persister, + } + } +} + +impl State { + fn start(date: NaiveDate) -> Self { + info!("Starting lifecycle worker for {}", date); + State::Running { + date, + pos: vec![], + counter: 0, + objects_expired: 0, + mpu_aborted: 0, + last_bucket: None, + } + } +} + +#[async_trait] +impl Worker for LifecycleWorker { + fn name(&self) -> String { + "object lifecycle worker".to_string() + } + + fn status(&self) -> WorkerStatus { + match &self.state { + State::Completed(d) => WorkerStatus { + freeform: vec![format!("Last completed: {}", d)], + ..Default::default() + }, + State::Running { + date, + counter, + objects_expired, + mpu_aborted, + .. + } => { + let n_objects = self + .garage + .object_table + .data + .store + .fast_len() + .unwrap_or(None); + let progress = match n_objects { + None => "...".to_string(), + Some(total) => format!( + "~{:.2}%", + 100. * std::cmp::min(*counter, total) as f32 / total as f32 + ), + }; + WorkerStatus { + progress: Some(progress), + freeform: vec![ + format!("Started: {}", date), + format!("Objects expired: {}", objects_expired), + format!("Multipart uploads aborted: { }", mpu_aborted), + ], + ..Default::default() + } + } + } + } + + async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> { + match &mut self.state { + State::Completed(_) => Ok(WorkerState::Idle), + State::Running { + date, + counter, + objects_expired, + mpu_aborted, + pos, + last_bucket, + } => { + // Process a batch of 100 items before yielding to bg task scheduler + for _ in 0..100 { + let (object_bytes, next_pos) = match self + .garage + .object_table + .data + .store + .get_gt(&pos)? + { + None => { + info!("Lifecycle worker finished for {}, objects expired: {}, mpu aborted: {}", date, *objects_expired, *mpu_aborted); + self.persister + .set_with(|x| x.last_completed = Some(date.to_string()))?; + self.state = State::Completed(*date); + return Ok(WorkerState::Idle); + } + Some((k, v)) => (v, k), + }; + + let object = self.garage.object_table.data.decode_entry(&object_bytes)?; + let skip = process_object( + &self.garage, + *date, + &object, + objects_expired, + mpu_aborted, + last_bucket, + ) + .await?; + + *counter += 1; + if skip == Skip::SkipBucket { + let bucket_id_len = object.bucket_id.as_slice().len(); + assert_eq!( + next_pos.get(..bucket_id_len), + Some(object.bucket_id.as_slice()) + ); + let last_bucket_pos = [&next_pos[..bucket_id_len], &[0xFFu8][..]].concat(); + *pos = std::cmp::max(next_pos, last_bucket_pos); + } else { + *pos = next_pos; + } + } + + Ok(WorkerState::Busy) + } + } + } + + async fn wait_for_work(&mut self) -> WorkerState { + match &self.state { + State::Completed(d) => { + let next_day = d.succ_opt().expect("no next day"); + let next_start = midnight_ts(next_day); + loop { + let now = now_msec(); + if now < next_start { + tokio::time::sleep_until( + (Instant::now() + Duration::from_millis(next_start - now)).into(), + ) + .await; + } else { + break; + } + } + self.state = State::start(std::cmp::max(next_day, today())); + } + State::Running { .. } => (), + } + WorkerState::Busy + } +} + +async fn process_object( + garage: &Arc<Garage>, + now_date: NaiveDate, + object: &Object, + objects_expired: &mut usize, + mpu_aborted: &mut usize, + last_bucket: &mut Option<Bucket>, +) -> Result<Skip, Error> { + if !object + .versions() + .iter() + .any(|x| x.is_data() || x.is_uploading(None)) + { + return Ok(Skip::NextObject); + } + + let bucket = match last_bucket.take() { + Some(b) if b.id == object.bucket_id => b, + _ => { + match garage + .bucket_table + .get(&EmptyKey, &object.bucket_id) + .await? + { + Some(b) => b, + None => { + warn!( + "Lifecycle worker: object in non-existent bucket {:?}", + object.bucket_id + ); + return Ok(Skip::SkipBucket); + } + } + } + }; + + let lifecycle_policy: &[LifecycleRule] = bucket + .state + .as_option() + .and_then(|s| s.lifecycle_config.get().as_deref()) + .unwrap_or_default(); + + if lifecycle_policy.iter().all(|x| !x.enabled) { + return Ok(Skip::SkipBucket); + } + + let db = garage.object_table.data.store.db(); + + for rule in lifecycle_policy.iter() { + if !rule.enabled { + continue; + } + + if let Some(pfx) = &rule.filter.prefix { + if !object.key.starts_with(pfx) { + continue; + } + } + + if let Some(expire) = &rule.expiration { + if let Some(current_version) = object.versions().iter().rev().find(|v| v.is_data()) { + let version_date = next_date(current_version.timestamp); + + let current_version_data = match ¤t_version.state { + ObjectVersionState::Complete(c) => c, + _ => unreachable!(), + }; + + let size_match = check_size_filter(current_version_data, &rule.filter); + let date_match = match expire { + LifecycleExpiration::AfterDays(n_days) => { + (now_date - version_date) >= chrono::Duration::days(*n_days as i64) + } + LifecycleExpiration::AtDate(exp_date) => { + if let Ok(exp_date) = parse_lifecycle_date(exp_date) { + now_date >= exp_date + } else { + warn!("Invalid expiration date stored in bucket {:?} lifecycle config: {}", bucket.id, exp_date); + false + } + } + }; + + if size_match && date_match { + // Delete expired version + let deleted_object = Object::new( + object.bucket_id, + object.key.clone(), + vec![ObjectVersion { + uuid: gen_uuid(), + timestamp: std::cmp::max(now_msec(), current_version.timestamp + 1), + state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), + }], + ); + info!( + "Lifecycle: expiring 1 object in bucket {:?}", + object.bucket_id + ); + db.transaction(|tx| garage.object_table.queue_insert(tx, &deleted_object))?; + *objects_expired += 1; + } + } + } + + if let Some(abort_mpu_days) = &rule.abort_incomplete_mpu_days { + let aborted_versions = object + .versions() + .iter() + .filter_map(|v| { + let version_date = next_date(v.timestamp); + if (now_date - version_date) >= chrono::Duration::days(*abort_mpu_days as i64) + && matches!(&v.state, ObjectVersionState::Uploading { .. }) + { + Some(ObjectVersion { + state: ObjectVersionState::Aborted, + ..*v + }) + } else { + None + } + }) + .collect::<Vec<_>>(); + if !aborted_versions.is_empty() { + // Insert aborted mpu info + let n_aborted = aborted_versions.len(); + info!( + "Lifecycle: aborting {} incomplete upload(s) in bucket {:?}", + n_aborted, object.bucket_id + ); + let aborted_object = + Object::new(object.bucket_id, object.key.clone(), aborted_versions); + db.transaction(|tx| garage.object_table.queue_insert(tx, &aborted_object))?; + *mpu_aborted += n_aborted; + } + } + } + + *last_bucket = Some(bucket); + Ok(Skip::NextObject) +} + +fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter) -> bool { + let size = match version_data { + ObjectVersionData::Inline(meta, _) | ObjectVersionData::FirstBlock(meta, _) => meta.size, + _ => unreachable!(), + }; + if let Some(size_gt) = filter.size_gt { + if !(size > size_gt) { + return false; + } + } + if let Some(size_lt) = filter.size_lt { + if !(size < size_lt) { + return false; + } + } + true +} + +fn midnight_ts(date: NaiveDate) -> u64 { + date.and_hms_opt(0, 0, 0) + .expect("midnight does not exist") + .timestamp_millis() as u64 +} + +fn next_date(ts: u64) -> NaiveDate { + NaiveDateTime::from_timestamp_millis(ts as i64) + .expect("bad timestamp") + .date() + .succ_opt() + .expect("no next day") +} + +fn today() -> NaiveDate { + Utc::now().naive_utc().date() +} diff --git a/src/model/s3/mod.rs b/src/model/s3/mod.rs index 4e94337d..5c776fb0 100644 --- a/src/model/s3/mod.rs +++ b/src/model/s3/mod.rs @@ -1,3 +1,6 @@ pub mod block_ref_table; +pub mod mpu_table; pub mod object_table; pub mod version_table; + +pub mod lifecycle_worker; diff --git a/src/model/s3/mpu_table.rs b/src/model/s3/mpu_table.rs new file mode 100644 index 00000000..238cbf11 --- /dev/null +++ b/src/model/s3/mpu_table.rs @@ -0,0 +1,254 @@ +use std::sync::Arc; + +use garage_db as db; + +use garage_util::crdt::Crdt; +use garage_util::data::*; +use garage_util::time::*; + +use garage_table::replication::TableShardedReplication; +use garage_table::*; + +use crate::index_counter::*; +use crate::s3::version_table::*; + +pub const UPLOADS: &str = "uploads"; +pub const PARTS: &str = "parts"; +pub const BYTES: &str = "bytes"; + +mod v09 { + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + + /// A part of a multipart upload + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct MultipartUpload { + /// Partition key = Upload id = UUID of the object version + pub upload_id: Uuid, + + /// The timestamp at which the multipart upload was created + pub timestamp: u64, + /// Is this multipart upload deleted + /// The MultipartUpload is marked as deleted as soon as the + /// multipart upload is either completed or aborted + pub deleted: crdt::Bool, + /// List of uploaded parts, key = (part number, timestamp) + /// In case of retries, all versions for each part are kept + /// Everything is cleaned up only once the MultipartUpload is marked deleted + pub parts: crdt::Map<MpuPartKey, MpuPart>, + + // Back link to bucket+key so that we can find the object this mpu + // belongs to and check whether it is still valid + /// Bucket in which the related object is stored + pub bucket_id: Uuid, + /// Key in which the related object is stored + pub key: String, + } + + #[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] + pub struct MpuPartKey { + /// Number of the part + pub part_number: u64, + /// Timestamp of part upload + pub timestamp: u64, + } + + /// The version of an uploaded part + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct MpuPart { + /// Links to a Version in VersionTable + pub version: Uuid, + /// ETag of the content of this part (known only once done uploading) + pub etag: Option<String>, + /// Size of this part (known only once done uploading) + pub size: Option<u64>, + } + + impl garage_util::migrate::InitialFormat for MultipartUpload { + const VERSION_MARKER: &'static [u8] = b"G09s3mpu"; + } +} + +pub use v09::*; + +impl Ord for MpuPartKey { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.part_number + .cmp(&other.part_number) + .then(self.timestamp.cmp(&other.timestamp)) + } +} + +impl PartialOrd for MpuPartKey { + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { + Some(self.cmp(other)) + } +} + +impl MultipartUpload { + pub fn new( + upload_id: Uuid, + timestamp: u64, + bucket_id: Uuid, + key: String, + deleted: bool, + ) -> Self { + Self { + upload_id, + timestamp, + deleted: crdt::Bool::new(deleted), + parts: crdt::Map::new(), + bucket_id, + key, + } + } + + pub fn next_timestamp(&self, part_number: u64) -> u64 { + std::cmp::max( + now_msec(), + 1 + self + .parts + .items() + .iter() + .filter(|(x, _)| x.part_number == part_number) + .map(|(x, _)| x.timestamp) + .max() + .unwrap_or(0), + ) + } +} + +impl Entry<Uuid, EmptyKey> for MultipartUpload { + fn partition_key(&self) -> &Uuid { + &self.upload_id + } + fn sort_key(&self) -> &EmptyKey { + &EmptyKey + } + fn is_tombstone(&self) -> bool { + self.deleted.get() + } +} + +impl Crdt for MultipartUpload { + fn merge(&mut self, other: &Self) { + self.deleted.merge(&other.deleted); + + if self.deleted.get() { + self.parts.clear(); + } else { + self.parts.merge(&other.parts); + } + } +} + +impl Crdt for MpuPart { + fn merge(&mut self, other: &Self) { + self.etag = match (self.etag.take(), &other.etag) { + (None, Some(_)) => other.etag.clone(), + (Some(x), Some(y)) if x < *y => other.etag.clone(), + (x, _) => x, + }; + self.size = match (self.size, other.size) { + (None, Some(_)) => other.size, + (Some(x), Some(y)) if x < y => other.size, + (x, _) => x, + }; + } +} + +pub struct MultipartUploadTable { + pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, + pub mpu_counter_table: Arc<IndexCounter<MultipartUpload>>, +} + +impl TableSchema for MultipartUploadTable { + const TABLE_NAME: &'static str = "multipart_upload"; + + type P = Uuid; + type S = EmptyKey; + type E = MultipartUpload; + type Filter = DeletedFilter; + + fn updated( + &self, + tx: &mut db::Transaction, + old: Option<&Self::E>, + new: Option<&Self::E>, + ) -> db::TxOpResult<()> { + // 1. Count + let counter_res = self.mpu_counter_table.count(tx, old, new); + if let Err(e) = db::unabort(counter_res)? { + error!( + "Unable to update multipart object part counter: {}. Index values will be wrong!", + e + ); + } + + // 2. Propagate deletions to version table + if let (Some(old_mpu), Some(new_mpu)) = (old, new) { + if new_mpu.deleted.get() && !old_mpu.deleted.get() { + let deleted_versions = old_mpu.parts.items().iter().map(|(_k, p)| { + Version::new( + p.version, + VersionBacklink::MultipartUpload { + upload_id: old_mpu.upload_id, + }, + true, + ) + }); + for version in deleted_versions { + let res = self.version_table.queue_insert(tx, &version); + if let Err(e) = db::unabort(res)? { + error!("Unable to enqueue version deletion propagation: {}. A repair will be needed.", e); + } + } + } + } + + Ok(()) + } + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.is_tombstone()) + } +} + +impl CountedItem for MultipartUpload { + const COUNTER_TABLE_NAME: &'static str = "bucket_mpu_counter"; + + // Partition key = bucket id + type CP = Uuid; + // Sort key = nothing + type CS = EmptyKey; + + fn counter_partition_key(&self) -> &Uuid { + &self.bucket_id + } + fn counter_sort_key(&self) -> &EmptyKey { + &EmptyKey + } + + fn counts(&self) -> Vec<(&'static str, i64)> { + let uploads = if self.deleted.get() { 0 } else { 1 }; + let mut parts = self + .parts + .items() + .iter() + .map(|(k, _)| k.part_number) + .collect::<Vec<_>>(); + parts.dedup(); + let bytes = self + .parts + .items() + .iter() + .map(|(_, p)| p.size.unwrap_or(0)) + .sum::<u64>(); + vec![ + (UPLOADS, uploads), + (PARTS, parts.len() as i64), + (BYTES, bytes as i64), + ] + } +} diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index 518acc95..ebea04bd 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -10,6 +10,7 @@ use garage_table::replication::TableShardedReplication; use garage_table::*; use crate::index_counter::*; +use crate::s3::mpu_table::*; use crate::s3::version_table::*; pub const OBJECTS: &str = "objects"; @@ -130,7 +131,86 @@ mod v08 { } } -pub use v08::*; +mod v09 { + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + + use super::v08; + + pub use v08::{ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta}; + + /// An object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Object { + /// The bucket in which the object is stored, used as partition key + pub bucket_id: Uuid, + + /// The key at which the object is stored in its bucket, used as sorting key + pub key: String, + + /// The list of currenty stored versions of the object + pub(super) versions: Vec<ObjectVersion>, + } + + /// Informations about a version of an object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct ObjectVersion { + /// Id of the version + pub uuid: Uuid, + /// Timestamp of when the object was created + pub timestamp: u64, + /// State of the version + pub state: ObjectVersionState, + } + + /// State of an object version + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub enum ObjectVersionState { + /// The version is being received + Uploading { + /// Indicates whether this is a multipart upload + multipart: bool, + /// Headers to be included in the final object + headers: ObjectVersionHeaders, + }, + /// The version is fully received + Complete(ObjectVersionData), + /// The version uploaded containded errors or the upload was explicitly aborted + Aborted, + } + + impl garage_util::migrate::Migrate for Object { + const VERSION_MARKER: &'static [u8] = b"G09s3o"; + + type Previous = v08::Object; + + fn migrate(old: v08::Object) -> Object { + let versions = old + .versions + .into_iter() + .map(|x| ObjectVersion { + uuid: x.uuid, + timestamp: x.timestamp, + state: match x.state { + v08::ObjectVersionState::Uploading(h) => ObjectVersionState::Uploading { + multipart: false, + headers: h, + }, + v08::ObjectVersionState::Complete(d) => ObjectVersionState::Complete(d), + v08::ObjectVersionState::Aborted => ObjectVersionState::Aborted, + }, + }) + .collect(); + Object { + bucket_id: old.bucket_id, + key: old.key, + versions, + } + } + } +} + +pub use v09::*; impl Object { /// Initialize an Object struct from parts @@ -180,11 +260,11 @@ impl Crdt for ObjectVersionState { Complete(a) => { a.merge(b); } - Uploading(_) => { + Uploading { .. } => { *self = Complete(b.clone()); } }, - Uploading(_) => {} + Uploading { .. } => {} } } } @@ -199,8 +279,17 @@ impl ObjectVersion { } /// Is the object version currently being uploaded - pub fn is_uploading(&self) -> bool { - matches!(self.state, ObjectVersionState::Uploading(_)) + /// + /// matches only multipart uploads if check_multipart is Some(true) + /// matches only non-multipart uploads if check_multipart is Some(false) + /// matches both if check_multipart is None + pub fn is_uploading(&self, check_multipart: Option<bool>) -> bool { + match &self.state { + ObjectVersionState::Uploading { multipart, .. } => { + check_multipart.map(|x| x == *multipart).unwrap_or(true) + } + _ => false, + } } /// Is the object version completely received @@ -267,13 +356,20 @@ impl Crdt for Object { pub struct ObjectTable { pub version_table: Arc<Table<VersionTable, TableShardedReplication>>, + pub mpu_table: Arc<Table<MultipartUploadTable, TableShardedReplication>>, pub object_counter_table: Arc<IndexCounter<Object>>, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub enum ObjectFilter { + /// Is the object version available (received and not a tombstone) IsData, - IsUploading, + /// Is the object version currently being uploaded + /// + /// matches only multipart uploads if check_multipart is Some(true) + /// matches only non-multipart uploads if check_multipart is Some(false) + /// matches both if check_multipart is None + IsUploading { check_multipart: Option<bool> }, } impl TableSchema for ObjectTable { @@ -301,21 +397,28 @@ impl TableSchema for ObjectTable { // 2. Enqueue propagation deletions to version table if let (Some(old_v), Some(new_v)) = (old, new) { - // Propagate deletion of old versions for v in old_v.versions.iter() { - let newly_deleted = match new_v + let new_v_id = new_v .versions - .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) - { + .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())); + + // Propagate deletion of old versions to the Version table + let delete_version = match new_v_id { Err(_) => true, Ok(i) => { new_v.versions[i].state == ObjectVersionState::Aborted && v.state != ObjectVersionState::Aborted } }; - if newly_deleted { - let deleted_version = - Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); + if delete_version { + let deleted_version = Version::new( + v.uuid, + VersionBacklink::Object { + bucket_id: old_v.bucket_id, + key: old_v.key.clone(), + }, + true, + ); let res = self.version_table.queue_insert(tx, &deleted_version); if let Err(e) = db::unabort(res)? { error!( @@ -324,6 +427,39 @@ impl TableSchema for ObjectTable { ); } } + + // After abortion or completion of multipart uploads, delete MPU table entry + if matches!( + v.state, + ObjectVersionState::Uploading { + multipart: true, + .. + } + ) { + let delete_mpu = match new_v_id { + Err(_) => true, + Ok(i) => !matches!( + new_v.versions[i].state, + ObjectVersionState::Uploading { .. } + ), + }; + if delete_mpu { + let deleted_mpu = MultipartUpload::new( + v.uuid, + v.timestamp, + old_v.bucket_id, + old_v.key.clone(), + true, + ); + let res = self.mpu_table.queue_insert(tx, &deleted_mpu); + if let Err(e) = db::unabort(res)? { + error!( + "Unable to enqueue multipart upload deletion propagation: {}. A repair will be needed.", + e + ); + } + } + } } } @@ -333,7 +469,10 @@ impl TableSchema for ObjectTable { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { match filter { ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()), - ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()), + ObjectFilter::IsUploading { check_multipart } => entry + .versions + .iter() + .any(|v| v.is_uploading(*check_multipart)), } } } @@ -360,10 +499,7 @@ impl CountedItem for Object { } else { 0 }; - let n_unfinished_uploads = versions - .iter() - .filter(|v| matches!(v.state, ObjectVersionState::Uploading(_))) - .count(); + let n_unfinished_uploads = versions.iter().filter(|v| v.is_uploading(None)).count(); let n_bytes = versions .iter() .map(|v| match &v.state { diff --git a/src/model/s3/version_table.rs b/src/model/s3/version_table.rs index 6edc83f4..5c032f9f 100644 --- a/src/model/s3/version_table.rs +++ b/src/model/s3/version_table.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use garage_db as db; use garage_util::data::*; +use garage_util::error::*; use garage_table::crdt::*; use garage_table::replication::TableShardedReplication; @@ -66,6 +67,8 @@ mod v08 { use super::v05; + pub use v05::{VersionBlock, VersionBlockKey}; + /// A version of an object #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] pub struct Version { @@ -90,8 +93,6 @@ mod v08 { pub key: String, } - pub use v05::{VersionBlock, VersionBlockKey}; - impl garage_util::migrate::Migrate for Version { type Previous = v05::Version; @@ -110,32 +111,94 @@ mod v08 { } } -pub use v08::*; +pub(crate) mod v09 { + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + + use super::v08; + + pub use v08::{VersionBlock, VersionBlockKey}; + + /// A version of an object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Version { + /// UUID of the version, used as partition key + pub uuid: Uuid, + + // Actual data: the blocks for this version + // In the case of a multipart upload, also store the etags + // of individual parts and check them when doing CompleteMultipartUpload + /// Is this version deleted + pub deleted: crdt::Bool, + /// list of blocks of data composing the version + pub blocks: crdt::Map<VersionBlockKey, VersionBlock>, + + // Back link to owner of this version (either an object or a multipart + // upload), used to find whether it has been deleted and this version + // should in turn be deleted (see versions repair procedure) + pub backlink: VersionBacklink, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub enum VersionBacklink { + Object { + /// Bucket in which the related object is stored + bucket_id: Uuid, + /// Key in which the related object is stored + key: String, + }, + MultipartUpload { + upload_id: Uuid, + }, + } + + impl garage_util::migrate::Migrate for Version { + const VERSION_MARKER: &'static [u8] = b"G09s3v"; + + type Previous = v08::Version; + + fn migrate(old: v08::Version) -> Version { + Version { + uuid: old.uuid, + deleted: old.deleted, + blocks: old.blocks, + backlink: VersionBacklink::Object { + bucket_id: old.bucket_id, + key: old.key, + }, + } + } + } +} + +pub use v09::*; impl Version { - pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { + pub fn new(uuid: Uuid, backlink: VersionBacklink, deleted: bool) -> Self { Self { uuid, deleted: deleted.into(), blocks: crdt::Map::new(), - parts_etags: crdt::Map::new(), - bucket_id, - key, + backlink, } } pub fn has_part_number(&self, part_number: u64) -> bool { - let case1 = self - .parts_etags + self.blocks .items() - .binary_search_by(|(k, _)| k.cmp(&part_number)) - .is_ok(); - let case2 = self + .binary_search_by(|(k, _)| k.part_number.cmp(&part_number)) + .is_ok() + } + + pub fn n_parts(&self) -> Result<u64, Error> { + Ok(self .blocks .items() - .binary_search_by(|(k, _)| k.part_number.cmp(&part_number)) - .is_ok(); - case1 || case2 + .last() + .ok_or_message("version has no parts")? + .0 + .part_number) } } @@ -175,10 +238,8 @@ impl Crdt for Version { if self.deleted.get() { self.blocks.clear(); - self.parts_etags.clear(); } else { self.blocks.merge(&other.blocks); - self.parts_etags.merge(&other.parts_etags); } } } diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index f0fde7a7..f450718f 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_rpc" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -14,16 +14,20 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +format_table.workspace = true +garage_db.workspace = true garage_util.workspace = true arc-swap = "1.0" bytes = "1.0" -gethostname = "0.2" +bytesize = "1.1" +gethostname = "0.4" hex = "0.4" tracing = "0.1" rand = "0.8" +itertools="0.10" sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } -systemstat = "0.2.3" +nix = { version = "0.27", default-features = false, features = ["fs"] } async-trait = "0.1.7" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } @@ -45,7 +49,7 @@ tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi tokio-stream = { version = "0.1", features = ["net"] } opentelemetry = "0.17" -netapp = { version = "0.5.2", features = ["telemetry"] } +netapp = { version = "0.10", features = ["telemetry"] } [features] kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ] diff --git a/src/rpc/graph_algo.rs b/src/rpc/graph_algo.rs new file mode 100644 index 00000000..d8c6c9b9 --- /dev/null +++ b/src/rpc/graph_algo.rs @@ -0,0 +1,415 @@ +//! This module deals with graph algorithms. +//! It is used in layout.rs to build the partition to node assignment. + +use rand::prelude::{SeedableRng, SliceRandom}; +use std::cmp::{max, min}; +use std::collections::HashMap; +use std::collections::VecDeque; + +/// Vertex data structures used in all the graphs used in layout.rs. +/// usize parameters correspond to node/zone/partitions ids. +/// To understand the vertex roles below, please refer to the formal description +/// of the layout computation algorithm. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum Vertex { + Source, + Pup(usize), // The vertex p+ of partition p + Pdown(usize), // The vertex p- of partition p + PZ(usize, usize), // The vertex corresponding to x_(partition p, zone z) + N(usize), // The vertex corresponding to node n + Sink, +} + +/// Edge data structure for the flow algorithm. +#[derive(Clone, Copy, Debug)] +pub struct FlowEdge { + cap: u64, // flow maximal capacity of the edge + flow: i64, // flow value on the edge + dest: usize, // destination vertex id + rev: usize, // index of the reversed edge (v, self) in the edge list of vertex v +} + +/// Edge data structure for the detection of negative cycles. +#[derive(Clone, Copy, Debug)] +pub struct WeightedEdge { + w: i64, // weight of the edge + dest: usize, +} + +pub trait Edge: Clone + Copy {} +impl Edge for FlowEdge {} +impl Edge for WeightedEdge {} + +/// Struct for the graph structure. We do encapsulation here to be able to both +/// provide user friendly Vertex enum to address vertices, and to use internally usize +/// indices and Vec instead of HashMap in the graph algorithm to optimize execution speed. +pub struct Graph<E: Edge> { + vertex_to_id: HashMap<Vertex, usize>, + id_to_vertex: Vec<Vertex>, + + // The graph is stored as an adjacency list + graph: Vec<Vec<E>>, +} + +pub type CostFunction = HashMap<(Vertex, Vertex), i64>; + +impl<E: Edge> Graph<E> { + pub fn new(vertices: &[Vertex]) -> Self { + let mut map = HashMap::<Vertex, usize>::new(); + for (i, vert) in vertices.iter().enumerate() { + map.insert(*vert, i); + } + Graph::<E> { + vertex_to_id: map, + id_to_vertex: vertices.to_vec(), + graph: vec![Vec::<E>::new(); vertices.len()], + } + } + + fn get_vertex_id(&self, v: &Vertex) -> Result<usize, String> { + self.vertex_to_id + .get(v) + .cloned() + .ok_or_else(|| format!("The graph does not contain vertex {:?}", v)) + } +} + +impl Graph<FlowEdge> { + /// This function adds a directed edge to the graph with capacity c, and the + /// corresponding reversed edge with capacity 0. + pub fn add_edge(&mut self, u: Vertex, v: Vertex, c: u64) -> Result<(), String> { + let idu = self.get_vertex_id(&u)?; + let idv = self.get_vertex_id(&v)?; + if idu == idv { + return Err("Cannot add edge from vertex to itself in flow graph".into()); + } + + let rev_u = self.graph[idu].len(); + let rev_v = self.graph[idv].len(); + self.graph[idu].push(FlowEdge { + cap: c, + dest: idv, + flow: 0, + rev: rev_v, + }); + self.graph[idv].push(FlowEdge { + cap: 0, + dest: idu, + flow: 0, + rev: rev_u, + }); + Ok(()) + } + + /// This function returns the list of vertices that receive a positive flow from + /// vertex v. + pub fn get_positive_flow_from(&self, v: Vertex) -> Result<Vec<Vertex>, String> { + let idv = self.get_vertex_id(&v)?; + let mut result = Vec::<Vertex>::new(); + for edge in self.graph[idv].iter() { + if edge.flow > 0 { + result.push(self.id_to_vertex[edge.dest]); + } + } + Ok(result) + } + + /// This function returns the value of the flow incoming to v. + pub fn get_inflow(&self, v: Vertex) -> Result<i64, String> { + let idv = self.get_vertex_id(&v)?; + let mut result = 0; + for edge in self.graph[idv].iter() { + result += max(0, self.graph[edge.dest][edge.rev].flow); + } + Ok(result) + } + + /// This function returns the value of the flow outgoing from v. + pub fn get_outflow(&self, v: Vertex) -> Result<i64, String> { + let idv = self.get_vertex_id(&v)?; + let mut result = 0; + for edge in self.graph[idv].iter() { + result += max(0, edge.flow); + } + Ok(result) + } + + /// This function computes the flow total value by computing the outgoing flow + /// from the source. + pub fn get_flow_value(&mut self) -> Result<i64, String> { + self.get_outflow(Vertex::Source) + } + + /// This function shuffles the order of the edge lists. It keeps the ids of the + /// reversed edges consistent. + fn shuffle_edges(&mut self) { + // We use deterministic randomness so that the layout calculation algorihtm + // will output the same thing every time it is run. This way, the results + // pre-calculated in `garage layout show` will match exactly those used + // in practice with `garage layout apply` + let mut rng = rand::rngs::StdRng::from_seed([0x12u8; 32]); + for i in 0..self.graph.len() { + self.graph[i].shuffle(&mut rng); + // We need to update the ids of the reverse edges. + for j in 0..self.graph[i].len() { + let target_v = self.graph[i][j].dest; + let target_rev = self.graph[i][j].rev; + self.graph[target_v][target_rev].rev = j; + } + } + } + + /// Computes an upper bound of the flow on the graph + pub fn flow_upper_bound(&self) -> Result<u64, String> { + let idsource = self.get_vertex_id(&Vertex::Source)?; + let mut flow_upper_bound = 0; + for edge in self.graph[idsource].iter() { + flow_upper_bound += edge.cap; + } + Ok(flow_upper_bound) + } + + /// This function computes the maximal flow using Dinic's algorithm. It starts with + /// the flow values already present in the graph. So it is possible to add some edge to + /// the graph, compute a flow, add other edges, update the flow. + pub fn compute_maximal_flow(&mut self) -> Result<(), String> { + let idsource = self.get_vertex_id(&Vertex::Source)?; + let idsink = self.get_vertex_id(&Vertex::Sink)?; + + let nb_vertices = self.graph.len(); + + let flow_upper_bound = self.flow_upper_bound()?; + + // To ensure the dispersion of the associations generated by the + // assignment, we shuffle the neighbours of the nodes. Hence, + // the vertices do not consider their neighbours in the same order. + self.shuffle_edges(); + + // We run Dinic's max flow algorithm + loop { + // We build the level array from Dinic's algorithm. + let mut level = vec![None; nb_vertices]; + + let mut fifo = VecDeque::new(); + fifo.push_back((idsource, 0)); + while let Some((id, lvl)) = fifo.pop_front() { + if level[id].is_none() { + // it means id has not yet been reached + level[id] = Some(lvl); + for edge in self.graph[id].iter() { + if edge.cap as i64 - edge.flow > 0 { + fifo.push_back((edge.dest, lvl + 1)); + } + } + } + } + if level[idsink].is_none() { + // There is no residual flow + break; + } + // Now we run DFS respecting the level array + let mut next_nbd = vec![0; nb_vertices]; + let mut lifo = Vec::new(); + + lifo.push((idsource, flow_upper_bound)); + + while let Some((id, f)) = lifo.last().cloned() { + if id == idsink { + // The DFS reached the sink, we can add a + // residual flow. + lifo.pop(); + while let Some((id, _)) = lifo.pop() { + let nbd = next_nbd[id]; + self.graph[id][nbd].flow += f as i64; + let id_rev = self.graph[id][nbd].dest; + let nbd_rev = self.graph[id][nbd].rev; + self.graph[id_rev][nbd_rev].flow -= f as i64; + } + lifo.push((idsource, flow_upper_bound)); + continue; + } + // else we did not reach the sink + let nbd = next_nbd[id]; + if nbd >= self.graph[id].len() { + // There is nothing to explore from id anymore + lifo.pop(); + if let Some((parent, _)) = lifo.last() { + next_nbd[*parent] += 1; + } + continue; + } + // else we can try to send flow from id to its nbd + let new_flow = min( + f as i64, + self.graph[id][nbd].cap as i64 - self.graph[id][nbd].flow, + ) as u64; + if new_flow == 0 { + next_nbd[id] += 1; + continue; + } + if let (Some(lvldest), Some(lvlid)) = (level[self.graph[id][nbd].dest], level[id]) { + if lvldest <= lvlid { + // We cannot send flow to nbd. + next_nbd[id] += 1; + continue; + } + } + // otherwise, we send flow to nbd. + lifo.push((self.graph[id][nbd].dest, new_flow)); + } + } + Ok(()) + } + + /// This function takes a flow, and a cost function on the edges, and tries to find an + /// equivalent flow with a better cost, by finding improving overflow cycles. It uses + /// as subroutine the Bellman Ford algorithm run up to path_length. + /// We assume that the cost of edge (u,v) is the opposite of the cost of (v,u), and + /// only one needs to be present in the cost function. + pub fn optimize_flow_with_cost( + &mut self, + cost: &CostFunction, + path_length: usize, + ) -> Result<(), String> { + // We build the weighted graph g where we will look for negative cycle + let mut gf = self.build_cost_graph(cost)?; + let mut cycles = gf.list_negative_cycles(path_length); + while !cycles.is_empty() { + // we enumerate negative cycles + for c in cycles.iter() { + for i in 0..c.len() { + // We add one flow unit to the edge (u,v) of cycle c + let idu = self.vertex_to_id[&c[i]]; + let idv = self.vertex_to_id[&c[(i + 1) % c.len()]]; + for j in 0..self.graph[idu].len() { + // since idu appears at most once in the cycles, we enumerate every + // edge at most once. + let edge = self.graph[idu][j]; + if edge.dest == idv { + self.graph[idu][j].flow += 1; + self.graph[idv][edge.rev].flow -= 1; + break; + } + } + } + } + + gf = self.build_cost_graph(cost)?; + cycles = gf.list_negative_cycles(path_length); + } + Ok(()) + } + + /// Construct the weighted graph G_f from the flow and the cost function + fn build_cost_graph(&self, cost: &CostFunction) -> Result<Graph<WeightedEdge>, String> { + let mut g = Graph::<WeightedEdge>::new(&self.id_to_vertex); + let nb_vertices = self.id_to_vertex.len(); + for i in 0..nb_vertices { + for edge in self.graph[i].iter() { + if edge.cap as i64 - edge.flow > 0 { + // It is possible to send overflow through this edge + let u = self.id_to_vertex[i]; + let v = self.id_to_vertex[edge.dest]; + if cost.contains_key(&(u, v)) { + g.add_edge(u, v, cost[&(u, v)])?; + } else if cost.contains_key(&(v, u)) { + g.add_edge(u, v, -cost[&(v, u)])?; + } else { + g.add_edge(u, v, 0)?; + } + } + } + } + Ok(g) + } +} + +impl Graph<WeightedEdge> { + /// This function adds a single directed weighted edge to the graph. + pub fn add_edge(&mut self, u: Vertex, v: Vertex, w: i64) -> Result<(), String> { + let idu = self.get_vertex_id(&u)?; + let idv = self.get_vertex_id(&v)?; + self.graph[idu].push(WeightedEdge { w, dest: idv }); + Ok(()) + } + + /// This function lists the negative cycles it manages to find after path_length + /// iterations of the main loop of the Bellman-Ford algorithm. For the classical + /// algorithm, path_length needs to be equal to the number of vertices. However, + /// for particular graph structures like in our case, the algorithm is still correct + /// when path_length is the length of the longest possible simple path. + /// See the formal description of the algorithm for more details. + fn list_negative_cycles(&self, path_length: usize) -> Vec<Vec<Vertex>> { + let nb_vertices = self.graph.len(); + + // We start with every vertex at distance 0 of some imaginary extra -1 vertex. + let mut distance = vec![0; nb_vertices]; + // The prev vector collects for every vertex from where does the shortest path come + let mut prev = vec![None; nb_vertices]; + + for _ in 0..path_length + 1 { + for id in 0..nb_vertices { + for e in self.graph[id].iter() { + if distance[id] + e.w < distance[e.dest] { + distance[e.dest] = distance[id] + e.w; + prev[e.dest] = Some(id); + } + } + } + } + + // If self.graph contains a negative cycle, then at this point the graph described + // by prev (which is a directed 1-forest/functional graph) + // must contain a cycle. We list the cycles of prev. + let cycles_prev = cycles_of_1_forest(&prev); + + // Remark that the cycle in prev is in the reverse order compared to the cycle + // in the graph. Thus the .rev(). + return cycles_prev + .iter() + .map(|cycle| { + cycle + .iter() + .rev() + .map(|id| self.id_to_vertex[*id]) + .collect() + }) + .collect(); + } +} + +/// This function returns the list of cycles of a directed 1 forest. It does not +/// check for the consistency of the input. +fn cycles_of_1_forest(forest: &[Option<usize>]) -> Vec<Vec<usize>> { + let mut cycles = Vec::<Vec<usize>>::new(); + let mut time_of_discovery = vec![None; forest.len()]; + + for t in 0..forest.len() { + let mut id = t; + // while we are on a valid undiscovered node + while time_of_discovery[id].is_none() { + time_of_discovery[id] = Some(t); + if let Some(i) = forest[id] { + id = i; + } else { + break; + } + } + if forest[id].is_some() && time_of_discovery[id] == Some(t) { + // We discovered an id that we explored at this iteration t. + // It means we are on a cycle + let mut cy = vec![id; 1]; + let mut id2 = id; + while let Some(id_next) = forest[id2] { + id2 = id_next; + if id2 != id { + cy.push(id2); + } else { + break; + } + } + cycles.push(cy); + } + } + cycles +} diff --git a/src/rpc/layout.rs b/src/rpc/layout.rs index 1030e3a6..e02a180b 100644 --- a/src/rpc/layout.rs +++ b/src/rpc/layout.rs @@ -1,87 +1,286 @@ use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; +use std::collections::HashSet; +use std::fmt; -use serde::{Deserialize, Serialize}; +use bytesize::ByteSize; +use itertools::Itertools; -use garage_util::crdt::{AutoCrdt, Crdt, LwwMap}; +use garage_util::crdt::{AutoCrdt, Crdt, Lww, LwwMap}; use garage_util::data::*; use garage_util::encode::nonversioned_encode; use garage_util::error::*; +use crate::graph_algo::*; + use crate::ring::*; -/// The layout of the cluster, i.e. the list of roles -/// which are assigned to each cluster node -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ClusterLayout { - pub version: u64, - - pub replication_factor: usize, - pub roles: LwwMap<Uuid, NodeRoleV>, - - /// node_id_vec: a vector of node IDs with a role assigned - /// in the system (this includes gateway nodes). - /// The order here is different than the vec stored by `roles`, because: - /// 1. non-gateway nodes are first so that they have lower numbers - /// 2. nodes that don't have a role are excluded (but they need to - /// stay in the CRDT as tombstones) - pub node_id_vec: Vec<Uuid>, - /// the assignation of data partitions to node, the values - /// are indices in node_id_vec - #[serde(with = "serde_bytes")] - pub ring_assignation_data: Vec<CompactNodeType>, - - /// Role changes which are staged for the next version of the layout - pub staging: LwwMap<Uuid, NodeRoleV>, - pub staging_hash: Hash, +use std::convert::TryInto; + +const NB_PARTITIONS: usize = 1usize << PARTITION_BITS; + +// The Message type will be used to collect information on the algorithm. +type Message = Vec<String>; + +mod v08 { + use crate::ring::CompactNodeType; + use garage_util::crdt::LwwMap; + use garage_util::data::{Hash, Uuid}; + use serde::{Deserialize, Serialize}; + + /// The layout of the cluster, i.e. the list of roles + /// which are assigned to each cluster node + #[derive(Clone, Debug, Serialize, Deserialize)] + pub struct ClusterLayout { + pub version: u64, + + pub replication_factor: usize, + pub roles: LwwMap<Uuid, NodeRoleV>, + + /// node_id_vec: a vector of node IDs with a role assigned + /// in the system (this includes gateway nodes). + /// The order here is different than the vec stored by `roles`, because: + /// 1. non-gateway nodes are first so that they have lower numbers + /// 2. nodes that don't have a role are excluded (but they need to + /// stay in the CRDT as tombstones) + pub node_id_vec: Vec<Uuid>, + /// the assignation of data partitions to node, the values + /// are indices in node_id_vec + #[serde(with = "serde_bytes")] + pub ring_assignation_data: Vec<CompactNodeType>, + + /// Role changes which are staged for the next version of the layout + pub staging: LwwMap<Uuid, NodeRoleV>, + pub staging_hash: Hash, + } + + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub struct NodeRoleV(pub Option<NodeRole>); + + /// The user-assigned roles of cluster nodes + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub struct NodeRole { + /// Datacenter at which this entry belong. This information is used to + /// perform a better geodistribution + pub zone: String, + /// The capacity of the node + /// If this is set to None, the node does not participate in storing data for the system + /// and is only active as an API gateway to other nodes + pub capacity: Option<u64>, + /// A set of tags to recognize the node + pub tags: Vec<String>, + } + + impl garage_util::migrate::InitialFormat for ClusterLayout {} } -impl garage_util::migrate::InitialFormat for ClusterLayout {} +mod v09 { + use super::v08; + use crate::ring::CompactNodeType; + use garage_util::crdt::{Lww, LwwMap}; + use garage_util::data::{Hash, Uuid}; + use serde::{Deserialize, Serialize}; + pub use v08::{NodeRole, NodeRoleV}; + + /// The layout of the cluster, i.e. the list of roles + /// which are assigned to each cluster node + #[derive(Clone, Debug, Serialize, Deserialize)] + pub struct ClusterLayout { + pub version: u64, + + pub replication_factor: usize, + + /// This attribute is only used to retain the previously computed partition size, + /// to know to what extent does it change with the layout update. + pub partition_size: u64, + /// Parameters used to compute the assignment currently given by + /// ring_assignment_data + pub parameters: LayoutParameters, + + pub roles: LwwMap<Uuid, NodeRoleV>, + + /// see comment in v08::ClusterLayout + pub node_id_vec: Vec<Uuid>, + /// see comment in v08::ClusterLayout + #[serde(with = "serde_bytes")] + pub ring_assignment_data: Vec<CompactNodeType>, + + /// Parameters to be used in the next partition assignment computation. + pub staging_parameters: Lww<LayoutParameters>, + /// Role changes which are staged for the next version of the layout + pub staging_roles: LwwMap<Uuid, NodeRoleV>, + pub staging_hash: Hash, + } + + /// This struct is used to set the parameters to be used in the assignment computation + /// algorithm. It is stored as a Crdt. + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Serialize, Deserialize)] + pub struct LayoutParameters { + pub zone_redundancy: ZoneRedundancy, + } -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct NodeRoleV(pub Option<NodeRole>); + /// Zone redundancy: if set to AtLeast(x), the layout calculation will aim to store copies + /// of each partition on at least that number of different zones. + /// Otherwise, copies will be stored on the maximum possible number of zones. + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Serialize, Deserialize)] + pub enum ZoneRedundancy { + AtLeast(usize), + Maximum, + } -impl AutoCrdt for NodeRoleV { + impl garage_util::migrate::Migrate for ClusterLayout { + const VERSION_MARKER: &'static [u8] = b"G09layout"; + + type Previous = v08::ClusterLayout; + + fn migrate(previous: Self::Previous) -> Self { + use itertools::Itertools; + + // In the old layout, capacities are in an arbitrary unit, + // but in the new layout they are in bytes. + // Here we arbitrarily multiply everything by 1G, + // such that 1 old capacity unit = 1GB in the new units. + // This is totally arbitrary and won't work for most users. + let cap_mul = 1024 * 1024 * 1024; + let roles = multiply_all_capacities(previous.roles, cap_mul); + let staging_roles = multiply_all_capacities(previous.staging, cap_mul); + let node_id_vec = previous.node_id_vec; + + // Determine partition size + let mut tmp = previous.ring_assignation_data.clone(); + tmp.sort(); + let partition_size = tmp + .into_iter() + .dedup_with_count() + .map(|(npart, node)| { + roles + .get(&node_id_vec[node as usize]) + .and_then(|p| p.0.as_ref().and_then(|r| r.capacity)) + .unwrap_or(0) / npart as u64 + }) + .min() + .unwrap_or(0); + + // By default, zone_redundancy is maximum possible value + let parameters = LayoutParameters { + zone_redundancy: ZoneRedundancy::Maximum, + }; + + let mut res = Self { + version: previous.version, + replication_factor: previous.replication_factor, + partition_size, + parameters, + roles, + node_id_vec, + ring_assignment_data: previous.ring_assignation_data, + staging_parameters: Lww::new(parameters), + staging_roles, + staging_hash: [0u8; 32].into(), + }; + res.staging_hash = res.calculate_staging_hash(); + res + } + } + + fn multiply_all_capacities( + old_roles: LwwMap<Uuid, NodeRoleV>, + mul: u64, + ) -> LwwMap<Uuid, NodeRoleV> { + let mut new_roles = LwwMap::new(); + for (node, ts, role) in old_roles.items() { + let mut role = role.clone(); + if let NodeRoleV(Some(NodeRole { + capacity: Some(ref mut cap), + .. + })) = role + { + *cap *= mul; + } + new_roles.merge_raw(node, *ts, &role); + } + new_roles + } +} + +pub use v09::*; + +impl AutoCrdt for LayoutParameters { const WARN_IF_DIFFERENT: bool = true; } -/// The user-assigned roles of cluster nodes -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct NodeRole { - /// Datacenter at which this entry belong. This information might be used to perform a better - /// geodistribution - pub zone: String, - /// The (relative) capacity of the node - /// If this is set to None, the node does not participate in storing data for the system - /// and is only active as an API gateway to other nodes - pub capacity: Option<u32>, - /// A set of tags to recognize the node - pub tags: Vec<String>, +impl AutoCrdt for NodeRoleV { + const WARN_IF_DIFFERENT: bool = true; } impl NodeRole { pub fn capacity_string(&self) -> String { match self.capacity { - Some(c) => format!("{}", c), + Some(c) => ByteSize::b(c).to_string_as(false), None => "gateway".to_string(), } } + + pub fn tags_string(&self) -> String { + self.tags.join(",") + } } +impl fmt::Display for ZoneRedundancy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ZoneRedundancy::Maximum => write!(f, "maximum"), + ZoneRedundancy::AtLeast(x) => write!(f, "{}", x), + } + } +} + +impl core::str::FromStr for ZoneRedundancy { + type Err = &'static str; + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s { + "none" | "max" | "maximum" => Ok(ZoneRedundancy::Maximum), + x => { + let v = x + .parse::<usize>() + .map_err(|_| "zone redundancy must be 'none'/'max' or an integer")?; + Ok(ZoneRedundancy::AtLeast(v)) + } + } + } +} + +// Implementation of the ClusterLayout methods unrelated to the assignment algorithm. impl ClusterLayout { pub fn new(replication_factor: usize) -> Self { + // We set the default zone redundancy to be Maximum, meaning that the maximum + // possible value will be used depending on the cluster topology + let parameters = LayoutParameters { + zone_redundancy: ZoneRedundancy::Maximum, + }; + let staging_parameters = Lww::<LayoutParameters>::new(parameters); + let empty_lwwmap = LwwMap::new(); - let empty_lwwmap_hash = blake2sum(&nonversioned_encode(&empty_lwwmap).unwrap()[..]); - ClusterLayout { + let mut ret = ClusterLayout { version: 0, replication_factor, + partition_size: 0, roles: LwwMap::new(), node_id_vec: Vec::new(), - ring_assignation_data: Vec::new(), - staging: empty_lwwmap, - staging_hash: empty_lwwmap_hash, - } + ring_assignment_data: Vec::new(), + parameters, + staging_parameters, + staging_roles: empty_lwwmap, + staging_hash: [0u8; 32].into(), + }; + ret.staging_hash = ret.calculate_staging_hash(); + ret + } + + fn calculate_staging_hash(&self) -> Hash { + let hashed_tuple = (&self.staging_roles, &self.staging_parameters); + blake2sum(&nonversioned_encode(&hashed_tuple).unwrap()[..]) } pub fn merge(&mut self, other: &ClusterLayout) -> bool { @@ -91,9 +290,10 @@ impl ClusterLayout { true } Ordering::Equal => { - self.staging.merge(&other.staging); + self.staging_parameters.merge(&other.staging_parameters); + self.staging_roles.merge(&other.staging_roles); - let new_staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); + let new_staging_hash = self.calculate_staging_hash(); let changed = new_staging_hash != self.staging_hash; self.staging_hash = new_staging_hash; @@ -104,7 +304,7 @@ impl ClusterLayout { } } - pub fn apply_staged_changes(mut self, version: Option<u64>) -> Result<Self, Error> { + pub fn apply_staged_changes(mut self, version: Option<u64>) -> Result<(Self, Message), Error> { match version { None => { let error = r#" @@ -120,19 +320,18 @@ To know the correct value of the new layout version, invoke `garage layout show` } } - self.roles.merge(&self.staging); + self.roles.merge(&self.staging_roles); self.roles.retain(|(_, _, v)| v.0.is_some()); + self.parameters = *self.staging_parameters.get(); - if !self.calculate_partition_assignation() { - return Err(Error::Message("Could not calculate new assignation of partitions to nodes. This can happen if there are less nodes than the desired number of copies of your data (see the replication_mode configuration parameter).".into())); - } + self.staging_roles.clear(); + self.staging_hash = self.calculate_staging_hash(); - self.staging.clear(); - self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); + let msg = self.calculate_partition_assignment()?; self.version += 1; - Ok(self) + Ok((self, msg)) } pub fn revert_staged_changes(mut self, version: Option<u64>) -> Result<Self, Error> { @@ -151,8 +350,9 @@ To know the correct value of the new layout version, invoke `garage layout show` } } - self.staging.clear(); - self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); + self.staging_roles.clear(); + self.staging_parameters.update(self.parameters); + self.staging_hash = self.calculate_staging_hash(); self.version += 1; @@ -177,13 +377,98 @@ To know the correct value of the new layout version, invoke `garage layout show` } } + /// Returns the uuids of the non_gateway nodes in self.node_id_vec. + fn nongateway_nodes(&self) -> Vec<Uuid> { + let mut result = Vec::<Uuid>::new(); + for uuid in self.node_id_vec.iter() { + match self.node_role(uuid) { + Some(role) if role.capacity.is_some() => result.push(*uuid), + _ => (), + } + } + result + } + + /// Given a node uuids, this function returns the label of its zone + fn get_node_zone(&self, uuid: &Uuid) -> Result<String, Error> { + match self.node_role(uuid) { + Some(role) => Ok(role.zone.clone()), + _ => Err(Error::Message( + "The Uuid does not correspond to a node present in the cluster.".into(), + )), + } + } + + /// Given a node uuids, this function returns its capacity or fails if it does not have any + pub fn get_node_capacity(&self, uuid: &Uuid) -> Result<u64, Error> { + match self.node_role(uuid) { + Some(NodeRole { + capacity: Some(cap), + zone: _, + tags: _, + }) => Ok(*cap), + _ => Err(Error::Message( + "The Uuid does not correspond to a node present in the \ + cluster or this node does not have a positive capacity." + .into(), + )), + } + } + + /// Returns the number of partitions associated to this node in the ring + pub fn get_node_usage(&self, uuid: &Uuid) -> Result<usize, Error> { + for (i, id) in self.node_id_vec.iter().enumerate() { + if id == uuid { + let mut count = 0; + for nod in self.ring_assignment_data.iter() { + if i as u8 == *nod { + count += 1 + } + } + return Ok(count); + } + } + Err(Error::Message( + "The Uuid does not correspond to a node present in the \ + cluster or this node does not have a positive capacity." + .into(), + )) + } + + /// Returns the sum of capacities of non gateway nodes in the cluster + fn get_total_capacity(&self) -> Result<u64, Error> { + let mut total_capacity = 0; + for uuid in self.nongateway_nodes().iter() { + total_capacity += self.get_node_capacity(uuid)?; + } + Ok(total_capacity) + } + + /// Returns the effective value of the zone_redundancy parameter + fn effective_zone_redundancy(&self) -> usize { + match self.parameters.zone_redundancy { + ZoneRedundancy::AtLeast(v) => v, + ZoneRedundancy::Maximum => { + let n_zones = self + .roles + .items() + .iter() + .filter_map(|(_, _, role)| role.0.as_ref().map(|x| x.zone.as_str())) + .collect::<HashSet<&str>>() + .len(); + std::cmp::min(n_zones, self.replication_factor) + } + } + } + /// Check a cluster layout for internal consistency + /// (assignment, roles, parameters, partition size) /// returns true if consistent, false if error - pub fn check(&self) -> bool { + pub fn check(&self) -> Result<(), String> { // Check that the hash of the staging data is correct - let staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); + let staging_hash = self.calculate_staging_hash(); if staging_hash != self.staging_hash { - return false; + return Err("staging_hash is incorrect".into()); } // Check that node_id_vec contains the correct list of nodes @@ -198,472 +483,811 @@ To know the correct value of the new layout version, invoke `garage layout show` let mut node_id_vec = self.node_id_vec.clone(); node_id_vec.sort(); if expected_nodes != node_id_vec { - return false; + return Err(format!("node_id_vec does not contain the correct set of nodes\nnode_id_vec: {:?}\nexpected: {:?}", node_id_vec, expected_nodes)); } - // Check that the assignation data has the correct length - if self.ring_assignation_data.len() != (1 << PARTITION_BITS) * self.replication_factor { - return false; + // Check that the assignment data has the correct length + let expected_assignment_data_len = (1 << PARTITION_BITS) * self.replication_factor; + if self.ring_assignment_data.len() != expected_assignment_data_len { + return Err(format!( + "ring_assignment_data has incorrect length {} instead of {}", + self.ring_assignment_data.len(), + expected_assignment_data_len + )); } // Check that the assigned nodes are correct identifiers // of nodes that are assigned a role // and that role is not the role of a gateway nodes - for x in self.ring_assignation_data.iter() { + for x in self.ring_assignment_data.iter() { if *x as usize >= self.node_id_vec.len() { - return false; + return Err(format!( + "ring_assignment_data contains invalid node id {}", + *x + )); } let node = self.node_id_vec[*x as usize]; match self.roles.get(&node) { Some(NodeRoleV(Some(x))) if x.capacity.is_some() => (), - _ => return false, + _ => return Err("ring_assignment_data contains id of a gateway node".into()), + } + } + + // Check that every partition is associated to distinct nodes + let zone_redundancy = self.effective_zone_redundancy(); + let rf = self.replication_factor; + for p in 0..(1 << PARTITION_BITS) { + let nodes_of_p = self.ring_assignment_data[rf * p..rf * (p + 1)].to_vec(); + if nodes_of_p.iter().unique().count() != rf { + return Err(format!("partition does not contain {} unique node ids", rf)); + } + // Check that every partition is spread over at least zone_redundancy zones. + let zones_of_p = nodes_of_p + .iter() + .map(|n| { + self.get_node_zone(&self.node_id_vec[*n as usize]) + .expect("Zone not found.") + }) + .collect::<Vec<_>>(); + if zones_of_p.iter().unique().count() < zone_redundancy { + return Err(format!( + "nodes of partition are in less than {} distinct zones", + zone_redundancy + )); + } + } + + // Check that the nodes capacities is consistent with the stored partitions + let mut node_usage = vec![0; MAX_NODE_NUMBER]; + for n in self.ring_assignment_data.iter() { + node_usage[*n as usize] += 1; + } + for (n, usage) in node_usage.iter().enumerate() { + if *usage > 0 { + let uuid = self.node_id_vec[n]; + let partusage = usage * self.partition_size; + let nodecap = self.get_node_capacity(&uuid).unwrap(); + if partusage > nodecap { + return Err(format!( + "node usage ({}) is bigger than node capacity ({})", + usage * self.partition_size, + nodecap + )); + } } } - true + // Check that the partition size stored is the one computed by the asignation + // algorithm. + let cl2 = self.clone(); + let (_, zone_to_id) = cl2.generate_nongateway_zone_ids().unwrap(); + match cl2.compute_optimal_partition_size(&zone_to_id, zone_redundancy) { + Ok(s) if s != self.partition_size => { + return Err(format!( + "partition_size ({}) is different than optimal value ({})", + self.partition_size, s + )) + } + Err(e) => return Err(format!("could not calculate optimal partition size: {}", e)), + _ => (), + } + + Ok(()) } +} - /// Calculate an assignation of partitions to nodes - pub fn calculate_partition_assignation(&mut self) -> bool { - let (configured_nodes, zones) = self.configured_nodes_and_zones(); - let n_zones = zones.len(); +// ==================================================================================== - println!("Calculating updated partition assignation, this may take some time..."); - println!(); +// Implementation of the ClusterLayout methods related to the assignment algorithm. +impl ClusterLayout { + /// This function calculates a new partition-to-node assignment. + /// The computed assignment respects the node replication factor + /// and the zone redundancy parameter It maximizes the capacity of a + /// partition (assuming all partitions have the same size). + /// Among such optimal assignment, it minimizes the distance to + /// the former assignment (if any) to minimize the amount of + /// data to be moved. + /// Staged role changes must be merged with nodes roles before calling this function, + /// hence it must only be called from apply_staged_changes() and hence is not public. + fn calculate_partition_assignment(&mut self) -> Result<Message, Error> { + // We update the node ids, since the node role list might have changed with the + // changes in the layout. We retrieve the old_assignment reframed with new ids + let old_assignment_opt = self.update_node_id_vec()?; + + let zone_redundancy = self.effective_zone_redundancy(); + + let mut msg = Message::new(); + msg.push("==== COMPUTATION OF A NEW PARTITION ASSIGNATION ====".into()); + msg.push("".into()); + msg.push(format!( + "Partitions are \ + replicated {} times on at least {} distinct zones.", + self.replication_factor, zone_redundancy + )); + + // We generate for once numerical ids for the zones of non gateway nodes, + // to use them as indices in the flow graphs. + let (id_to_zone, zone_to_id) = self.generate_nongateway_zone_ids()?; + + let nb_nongateway_nodes = self.nongateway_nodes().len(); + if nb_nongateway_nodes < self.replication_factor { + return Err(Error::Message(format!( + "The number of nodes with positive \ + capacity ({}) is smaller than the replication factor ({}).", + nb_nongateway_nodes, self.replication_factor + ))); + } + if id_to_zone.len() < zone_redundancy { + return Err(Error::Message(format!( + "The number of zones with non-gateway \ + nodes ({}) is smaller than the redundancy parameter ({})", + id_to_zone.len(), + zone_redundancy + ))); + } - // Get old partition assignation - let old_partitions = self.parse_assignation_data(); + // We compute the optimal partition size + // Capacities should be given in a unit so that partition size is at least 100. + // In this case, integer rounding plays a marginal role in the percentages of + // optimality. + let partition_size = self.compute_optimal_partition_size(&zone_to_id, zone_redundancy)?; + + msg.push("".into()); + if old_assignment_opt.is_some() { + msg.push(format!( + "Optimal partition size: {} ({} in previous layout)", + ByteSize::b(partition_size).to_string_as(false), + ByteSize::b(self.partition_size).to_string_as(false) + )); + } else { + msg.push(format!( + "Optimal partition size: {}", + ByteSize::b(partition_size).to_string_as(false) + )); + } + // We write the partition size. + self.partition_size = partition_size; + + if partition_size < 100 { + msg.push( + "WARNING: The partition size is low (< 100), make sure the capacities of your nodes are correct and are of at least a few MB" + .into(), + ); + } - // Start new partition assignation with nodes from old assignation where it is relevant - let mut partitions = old_partitions - .iter() - .map(|old_part| { - let mut new_part = PartitionAss::new(); - for node in old_part.nodes.iter() { - if let Some(role) = node.1 { - if role.capacity.is_some() { - new_part.add(None, n_zones, node.0, role); - } - } - } - new_part - }) - .collect::<Vec<_>>(); + // We compute a first flow/assignment that is heuristically close to the previous + // assignment + let mut gflow = + self.compute_candidate_assignment(&zone_to_id, &old_assignment_opt, zone_redundancy)?; + if let Some(assoc) = &old_assignment_opt { + // We minimize the distance to the previous assignment. + self.minimize_rebalance_load(&mut gflow, &zone_to_id, assoc)?; + } - // In various cases, not enough nodes will have been added for all partitions - // in the step above (e.g. due to node removals, or new zones being added). - // Here we add more nodes to make a complete (but sub-optimal) assignation, - // using an initial partition assignation that is calculated using the multi-dc maglev trick - match self.initial_partition_assignation() { - Some(initial_partitions) => { - for (part, ipart) in partitions.iter_mut().zip(initial_partitions.iter()) { - for _ in 0..2 { - for (id, info) in ipart.nodes.iter() { - if part.nodes.len() < self.replication_factor { - part.add(None, n_zones, id, info.unwrap()); - } - } - } - assert!(part.nodes.len() == self.replication_factor); - } - } - None => { - // Not enough nodes in cluster to build a correct assignation. - // Signal it by returning an error. - return false; - } + // We display statistics of the computation + msg.extend(self.output_stat(&gflow, &old_assignment_opt, &zone_to_id, &id_to_zone)?); + + // We update the layout structure + self.update_ring_from_flow(id_to_zone.len(), &gflow)?; + + if let Err(e) = self.check() { + return Err(Error::Message( + format!("Layout check returned an error: {}\nOriginal result of computation: <<<<\n{}\n>>>>", e, msg.join("\n")) + )); } - // Calculate how many partitions each node should ideally store, - // and how many partitions they are storing with the current assignation - // This defines our target for which we will optimize in the following loop. - let total_capacity = configured_nodes + Ok(msg) + } + + /// The LwwMap of node roles might have changed. This function updates the node_id_vec + /// and returns the assignment given by ring, with the new indices of the nodes, and + /// None if the node is not present anymore. + /// We work with the assumption that only this function and calculate_new_assignment + /// do modify assignment_ring and node_id_vec. + fn update_node_id_vec(&mut self) -> Result<Option<Vec<Vec<usize>>>, Error> { + // (1) We compute the new node list + // Non gateway nodes should be coded on 8bits, hence they must be first in the list + // We build the new node ids + let new_non_gateway_nodes: Vec<Uuid> = self + .roles + .items() .iter() - .map(|(_, info)| info.capacity.unwrap_or(0)) - .sum::<u32>() as usize; - let total_partitions = self.replication_factor * (1 << PARTITION_BITS); - let target_partitions_per_node = configured_nodes + .filter(|(_, _, v)| matches!(&v.0, Some(r) if r.capacity.is_some())) + .map(|(k, _, _)| *k) + .collect(); + + if new_non_gateway_nodes.len() > MAX_NODE_NUMBER { + return Err(Error::Message(format!( + "There are more than {} non-gateway nodes in the new \ + layout. This is not allowed.", + MAX_NODE_NUMBER + ))); + } + + let new_gateway_nodes: Vec<Uuid> = self + .roles + .items() .iter() - .map(|(id, info)| { - ( - *id, - info.capacity.unwrap_or(0) as usize * total_partitions / total_capacity, - ) - }) - .collect::<HashMap<&Uuid, usize>>(); - - let mut partitions_per_node = self.partitions_per_node(&partitions[..]); - - println!("Target number of partitions per node:"); - for (node, npart) in target_partitions_per_node.iter() { - println!("{:?}\t{}", node, npart); - } - println!(); - - // Shuffle partitions between nodes so that nodes will reach (or better approach) - // their target number of stored partitions - loop { - let mut option = None; - for (i, part) in partitions.iter_mut().enumerate() { - for (irm, (idrm, _)) in part.nodes.iter().enumerate() { - let errratio = |node, parts| { - let tgt = *target_partitions_per_node.get(node).unwrap() as f32; - (parts - tgt) / tgt - }; - let square = |x| x * x; - - let partsrm = partitions_per_node.get(*idrm).cloned().unwrap_or(0) as f32; - - for (idadd, infoadd) in configured_nodes.iter() { - // skip replacing a node by itself - // and skip replacing by gateway nodes - if idadd == idrm || infoadd.capacity.is_none() { - continue; - } + .filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity.is_none())) + .map(|(k, _, _)| *k) + .collect(); + + let mut new_node_id_vec = Vec::<Uuid>::new(); + new_node_id_vec.extend(new_non_gateway_nodes); + new_node_id_vec.extend(new_gateway_nodes); + + let old_node_id_vec = self.node_id_vec.clone(); + self.node_id_vec = new_node_id_vec.clone(); + + // (2) We retrieve the old association + // We rewrite the old association with the new indices. We only consider partition + // to node assignments where the node is still in use. + if self.ring_assignment_data.is_empty() { + // This is a new association + return Ok(None); + } - // We want to try replacing node idrm by node idadd - // if that brings us close to our goal. - let partsadd = partitions_per_node.get(*idadd).cloned().unwrap_or(0) as f32; - let oldcost = square(errratio(*idrm, partsrm) - errratio(*idadd, partsadd)); - let newcost = - square(errratio(*idrm, partsrm - 1.) - errratio(*idadd, partsadd + 1.)); - if newcost >= oldcost { - // not closer to our goal - continue; - } - let gain = oldcost - newcost; + if self.ring_assignment_data.len() != NB_PARTITIONS * self.replication_factor { + return Err(Error::Message( + "The old assignment does not have a size corresponding to \ + the old replication factor or the number of partitions." + .into(), + )); + } - let mut newpart = part.clone(); + // We build a translation table between the uuid and new ids + let mut uuid_to_new_id = HashMap::<Uuid, usize>::new(); - newpart.nodes.remove(irm); - if !newpart.add(None, n_zones, idadd, infoadd) { - continue; - } - assert!(newpart.nodes.len() == self.replication_factor); + // We add the indices of only the new non-gateway nodes that can be used in the + // association ring + for (i, uuid) in new_node_id_vec.iter().enumerate() { + uuid_to_new_id.insert(*uuid, i); + } - if !old_partitions[i] - .is_valid_transition_to(&newpart, self.replication_factor) - { - continue; - } + let mut old_assignment = vec![Vec::<usize>::new(); NB_PARTITIONS]; + let rf = self.replication_factor; - if option - .as_ref() - .map(|(old_gain, _, _, _, _)| gain > *old_gain) - .unwrap_or(true) - { - option = Some((gain, i, idadd, idrm, newpart)); - } - } + for (p, old_assign_p) in old_assignment.iter_mut().enumerate() { + for old_id in &self.ring_assignment_data[p * rf..(p + 1) * rf] { + let uuid = old_node_id_vec[*old_id as usize]; + if uuid_to_new_id.contains_key(&uuid) { + old_assign_p.push(uuid_to_new_id[&uuid]); } } - if let Some((_gain, i, idadd, idrm, newpart)) = option { - *partitions_per_node.entry(idadd).or_insert(0) += 1; - *partitions_per_node.get_mut(idrm).unwrap() -= 1; - partitions[i] = newpart; - } else { - break; - } } - // Check we completed the assignation correctly - // (this is a set of checks for the algorithm's consistency) - assert!(partitions.len() == (1 << PARTITION_BITS)); - assert!(partitions - .iter() - .all(|p| p.nodes.len() == self.replication_factor)); + // We write the ring + self.ring_assignment_data = Vec::<CompactNodeType>::new(); - let new_partitions_per_node = self.partitions_per_node(&partitions[..]); - assert!(new_partitions_per_node == partitions_per_node); + Ok(Some(old_assignment)) + } - // Show statistics - println!("New number of partitions per node:"); - for (node, npart) in partitions_per_node.iter() { - let tgt = *target_partitions_per_node.get(node).unwrap(); - let pct = 100f32 * (*npart as f32) / (tgt as f32); - println!("{:?}\t{}\t({}% of {})", node, npart, pct as i32, tgt); + /// This function generates ids for the zone of the nodes appearing in + /// self.node_id_vec. + fn generate_nongateway_zone_ids(&self) -> Result<(Vec<String>, HashMap<String, usize>), Error> { + let mut id_to_zone = Vec::<String>::new(); + let mut zone_to_id = HashMap::<String, usize>::new(); + + for uuid in self.nongateway_nodes().iter() { + let r = self.node_role(uuid).unwrap(); + if !zone_to_id.contains_key(&r.zone) && r.capacity.is_some() { + zone_to_id.insert(r.zone.clone(), id_to_zone.len()); + id_to_zone.push(r.zone.clone()); + } } - println!(); + Ok((id_to_zone, zone_to_id)) + } - let mut diffcount = HashMap::new(); - for (oldpart, newpart) in old_partitions.iter().zip(partitions.iter()) { - let nminus = oldpart.txtplus(newpart); - let nplus = newpart.txtplus(oldpart); - if nminus != "[...]" || nplus != "[...]" { - let tup = (nminus, nplus); - *diffcount.entry(tup).or_insert(0) += 1; - } + /// This function computes by dichotomy the largest realizable partition size, given + /// the layout roles and parameters. + fn compute_optimal_partition_size( + &self, + zone_to_id: &HashMap<String, usize>, + zone_redundancy: usize, + ) -> Result<u64, Error> { + let empty_set = HashSet::<(usize, usize)>::new(); + let mut g = self.generate_flow_graph(1, zone_to_id, &empty_set, zone_redundancy)?; + g.compute_maximal_flow()?; + if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 { + return Err(Error::Message( + "The storage capacity of he cluster is to small. It is \ + impossible to store partitions of size 1." + .into(), + )); } - if diffcount.is_empty() { - println!("No data will be moved between nodes."); - } else { - let mut diffcount = diffcount.into_iter().collect::<Vec<_>>(); - diffcount.sort(); - println!("Number of partitions that move:"); - for ((nminus, nplus), npart) in diffcount { - println!("\t{}\t{} -> {}", npart, nminus, nplus); + + let mut s_down = 1; + let mut s_up = self.get_total_capacity()?; + while s_down + 1 < s_up { + g = self.generate_flow_graph( + (s_down + s_up) / 2, + zone_to_id, + &empty_set, + zone_redundancy, + )?; + g.compute_maximal_flow()?; + if g.get_flow_value()? < (NB_PARTITIONS * self.replication_factor) as i64 { + s_up = (s_down + s_up) / 2; + } else { + s_down = (s_down + s_up) / 2; } } - println!(); - - // Calculate and save new assignation data - let (nodes, assignation_data) = - self.compute_assignation_data(&configured_nodes[..], &partitions[..]); - self.node_id_vec = nodes; - self.ring_assignation_data = assignation_data; - - true + Ok(s_down) } - fn initial_partition_assignation(&self) -> Option<Vec<PartitionAss<'_>>> { - let (configured_nodes, zones) = self.configured_nodes_and_zones(); - let n_zones = zones.len(); - - // Create a vector of partition indices (0 to 2**PARTITION_BITS-1) - let partitions_idx = (0usize..(1usize << PARTITION_BITS)).collect::<Vec<_>>(); - - // Prepare ring - let mut partitions: Vec<PartitionAss> = partitions_idx - .iter() - .map(|_i| PartitionAss::new()) - .collect::<Vec<_>>(); + fn generate_graph_vertices(nb_zones: usize, nb_nodes: usize) -> Vec<Vertex> { + let mut vertices = vec![Vertex::Source, Vertex::Sink]; + for p in 0..NB_PARTITIONS { + vertices.push(Vertex::Pup(p)); + vertices.push(Vertex::Pdown(p)); + for z in 0..nb_zones { + vertices.push(Vertex::PZ(p, z)); + } + } + for n in 0..nb_nodes { + vertices.push(Vertex::N(n)); + } + vertices + } - // Create MagLev priority queues for each node - let mut queues = configured_nodes - .iter() - .filter(|(_id, info)| info.capacity.is_some()) - .map(|(node_id, node_info)| { - let mut parts = partitions_idx - .iter() - .map(|i| { - let part_data = - [&u16::to_be_bytes(*i as u16)[..], node_id.as_slice()].concat(); - (*i, fasthash(&part_data[..])) - }) - .collect::<Vec<_>>(); - parts.sort_by_key(|(_i, h)| *h); - let parts_i = parts.iter().map(|(i, _h)| *i).collect::<Vec<_>>(); - (node_id, node_info, parts_i, 0) - }) - .collect::<Vec<_>>(); + /// Generates the graph to compute the maximal flow corresponding to the optimal + /// partition assignment. + /// exclude_assoc is the set of (partition, node) association that we are forbidden + /// to use (hence we do not add the corresponding edge to the graph). This parameter + /// is used to compute a first flow that uses only edges appearing in the previous + /// assignment. This produces a solution that heuristically should be close to the + /// previous one. + fn generate_flow_graph( + &self, + partition_size: u64, + zone_to_id: &HashMap<String, usize>, + exclude_assoc: &HashSet<(usize, usize)>, + zone_redundancy: usize, + ) -> Result<Graph<FlowEdge>, Error> { + let vertices = + ClusterLayout::generate_graph_vertices(zone_to_id.len(), self.nongateway_nodes().len()); + let mut g = Graph::<FlowEdge>::new(&vertices); + let nb_zones = zone_to_id.len(); + for p in 0..NB_PARTITIONS { + g.add_edge(Vertex::Source, Vertex::Pup(p), zone_redundancy as u64)?; + g.add_edge( + Vertex::Source, + Vertex::Pdown(p), + (self.replication_factor - zone_redundancy) as u64, + )?; + for z in 0..nb_zones { + g.add_edge(Vertex::Pup(p), Vertex::PZ(p, z), 1)?; + g.add_edge( + Vertex::Pdown(p), + Vertex::PZ(p, z), + self.replication_factor as u64, + )?; + } + } + for n in 0..self.nongateway_nodes().len() { + let node_capacity = self.get_node_capacity(&self.node_id_vec[n])?; + let node_zone = zone_to_id[&self.get_node_zone(&self.node_id_vec[n])?]; + g.add_edge(Vertex::N(n), Vertex::Sink, node_capacity / partition_size)?; + for p in 0..NB_PARTITIONS { + if !exclude_assoc.contains(&(p, n)) { + g.add_edge(Vertex::PZ(p, node_zone), Vertex::N(n), 1)?; + } + } + } + Ok(g) + } - let max_capacity = configured_nodes - .iter() - .filter_map(|(_, node_info)| node_info.capacity) - .fold(0, std::cmp::max); - - // Fill up ring - for rep in 0..self.replication_factor { - queues.sort_by_key(|(ni, _np, _q, _p)| { - let queue_data = [&u16::to_be_bytes(rep as u16)[..], ni.as_slice()].concat(); - fasthash(&queue_data[..]) - }); - - for (_, _, _, pos) in queues.iter_mut() { - *pos = 0; - } - - let mut remaining = partitions_idx.len(); - while remaining > 0 { - let remaining0 = remaining; - for i_round in 0..max_capacity { - for (node_id, node_info, q, pos) in queues.iter_mut() { - if i_round >= node_info.capacity.unwrap() { - continue; - } - for (pos2, &qv) in q.iter().enumerate().skip(*pos) { - if partitions[qv].add(Some(rep + 1), n_zones, node_id, node_info) { - remaining -= 1; - *pos = pos2 + 1; - break; - } - } - } + /// This function computes a first optimal assignment (in the form of a flow graph). + fn compute_candidate_assignment( + &self, + zone_to_id: &HashMap<String, usize>, + prev_assign_opt: &Option<Vec<Vec<usize>>>, + zone_redundancy: usize, + ) -> Result<Graph<FlowEdge>, Error> { + // We list the (partition,node) associations that are not used in the + // previous assignment + let mut exclude_edge = HashSet::<(usize, usize)>::new(); + if let Some(prev_assign) = prev_assign_opt { + let nb_nodes = self.nongateway_nodes().len(); + for (p, prev_assign_p) in prev_assign.iter().enumerate() { + for n in 0..nb_nodes { + exclude_edge.insert((p, n)); } - if remaining == remaining0 { - // No progress made, exit - return None; + for n in prev_assign_p.iter() { + exclude_edge.remove(&(p, *n)); } } } - Some(partitions) + // We compute the best flow using only the edges used in the previous assignment + let mut g = self.generate_flow_graph( + self.partition_size, + zone_to_id, + &exclude_edge, + zone_redundancy, + )?; + g.compute_maximal_flow()?; + + // We add the excluded edges and compute the maximal flow with the full graph. + // The algorithm is such that it will start with the flow that we just computed + // and find ameliorating paths from that. + for (p, n) in exclude_edge.iter() { + let node_zone = zone_to_id[&self.get_node_zone(&self.node_id_vec[*n])?]; + g.add_edge(Vertex::PZ(*p, node_zone), Vertex::N(*n), 1)?; + } + g.compute_maximal_flow()?; + Ok(g) } - fn configured_nodes_and_zones(&self) -> (Vec<(&Uuid, &NodeRole)>, HashSet<&str>) { - let configured_nodes = self - .roles - .items() - .iter() - .filter(|(_id, _, info)| info.0.is_some()) - .map(|(id, _, info)| (id, info.0.as_ref().unwrap())) - .collect::<Vec<(&Uuid, &NodeRole)>>(); + /// This function updates the flow graph gflow to minimize the distance between + /// its corresponding assignment and the previous one + fn minimize_rebalance_load( + &self, + gflow: &mut Graph<FlowEdge>, + zone_to_id: &HashMap<String, usize>, + prev_assign: &[Vec<usize>], + ) -> Result<(), Error> { + // We define a cost function on the edges (pairs of vertices) corresponding + // to the distance between the two assignments. + let mut cost = CostFunction::new(); + for (p, assoc_p) in prev_assign.iter().enumerate() { + for n in assoc_p.iter() { + let node_zone = zone_to_id[&self.get_node_zone(&self.node_id_vec[*n])?]; + cost.insert((Vertex::PZ(p, node_zone), Vertex::N(*n)), -1); + } + } - let zones = configured_nodes - .iter() - .filter(|(_id, info)| info.capacity.is_some()) - .map(|(_id, info)| info.zone.as_str()) - .collect::<HashSet<&str>>(); + // We compute the maximal length of a simple path in gflow. It is used in the + // Bellman-Ford algorithm in optimize_flow_with_cost to set the number + // of iterations. + let nb_nodes = self.nongateway_nodes().len(); + let path_length = 4 * nb_nodes; + gflow.optimize_flow_with_cost(&cost, path_length)?; - (configured_nodes, zones) + Ok(()) } - fn compute_assignation_data<'a>( - &self, - configured_nodes: &[(&'a Uuid, &'a NodeRole)], - partitions: &[PartitionAss<'a>], - ) -> (Vec<Uuid>, Vec<CompactNodeType>) { - assert!(partitions.len() == (1 << PARTITION_BITS)); - - // Make a canonical order for nodes - let mut nodes = configured_nodes - .iter() - .filter(|(_id, info)| info.capacity.is_some()) - .map(|(id, _)| **id) - .collect::<Vec<_>>(); - let nodes_rev = nodes - .iter() - .enumerate() - .map(|(i, id)| (*id, i as CompactNodeType)) - .collect::<HashMap<Uuid, CompactNodeType>>(); - - let mut assignation_data = vec![]; - for partition in partitions.iter() { - assert!(partition.nodes.len() == self.replication_factor); - for (id, _) in partition.nodes.iter() { - assignation_data.push(*nodes_rev.get(id).unwrap()); + /// This function updates the assignment ring from the flow graph. + fn update_ring_from_flow( + &mut self, + nb_zones: usize, + gflow: &Graph<FlowEdge>, + ) -> Result<(), Error> { + self.ring_assignment_data = Vec::<CompactNodeType>::new(); + for p in 0..NB_PARTITIONS { + for z in 0..nb_zones { + let assoc_vertex = gflow.get_positive_flow_from(Vertex::PZ(p, z))?; + for vertex in assoc_vertex.iter() { + if let Vertex::N(n) = vertex { + self.ring_assignment_data.push((*n).try_into().unwrap()); + } + } } } - nodes.extend( - configured_nodes - .iter() - .filter(|(_id, info)| info.capacity.is_none()) - .map(|(id, _)| **id), - ); - - (nodes, assignation_data) + if self.ring_assignment_data.len() != NB_PARTITIONS * self.replication_factor { + return Err(Error::Message( + "Critical Error : the association ring we produced does not \ + have the right size." + .into(), + )); + } + Ok(()) } - fn parse_assignation_data(&self) -> Vec<PartitionAss<'_>> { - if self.ring_assignation_data.len() == self.replication_factor * (1 << PARTITION_BITS) { - // If the previous assignation data is correct, use that - let mut partitions = vec![]; - for i in 0..(1 << PARTITION_BITS) { - let mut part = PartitionAss::new(); - for node_i in self.ring_assignation_data - [i * self.replication_factor..(i + 1) * self.replication_factor] - .iter() - { - let node_id = &self.node_id_vec[*node_i as usize]; + /// This function returns a message summing up the partition repartition of the new + /// layout, and other statistics of the partition assignment computation. + fn output_stat( + &self, + gflow: &Graph<FlowEdge>, + prev_assign_opt: &Option<Vec<Vec<usize>>>, + zone_to_id: &HashMap<String, usize>, + id_to_zone: &[String], + ) -> Result<Message, Error> { + let mut msg = Message::new(); + + let used_cap = self.partition_size * NB_PARTITIONS as u64 * self.replication_factor as u64; + let total_cap = self.get_total_capacity()?; + let percent_cap = 100.0 * (used_cap as f32) / (total_cap as f32); + msg.push(format!( + "Usable capacity / total cluster capacity: {} / {} ({:.1} %)", + ByteSize::b(used_cap).to_string_as(false), + ByteSize::b(total_cap).to_string_as(false), + percent_cap + )); + msg.push(format!( + "Effective capacity (replication factor {}): {}", + self.replication_factor, + ByteSize::b(used_cap / self.replication_factor as u64).to_string_as(false) + )); + if percent_cap < 80. { + msg.push("".into()); + msg.push( + "If the percentage is too low, it might be that the \ + cluster topology and redundancy constraints are forcing the use of nodes/zones with small \ + storage capacities." + .into(), + ); + msg.push( + "You might want to move storage capacity between zones or relax the redundancy constraint." + .into(), + ); + msg.push( + "See the detailed statistics below and look for saturated nodes/zones.".into(), + ); + } - if let Some(NodeRoleV(Some(info))) = self.roles.get(node_id) { - part.nodes.push((node_id, Some(info))); - } else { - part.nodes.push((node_id, None)); + // We define and fill in the following tables + let storing_nodes = self.nongateway_nodes(); + let mut new_partitions = vec![0; storing_nodes.len()]; + let mut stored_partitions = vec![0; storing_nodes.len()]; + + let mut new_partitions_zone = vec![0; id_to_zone.len()]; + let mut stored_partitions_zone = vec![0; id_to_zone.len()]; + + for p in 0..NB_PARTITIONS { + for z in 0..id_to_zone.len() { + let pz_nodes = gflow.get_positive_flow_from(Vertex::PZ(p, z))?; + if !pz_nodes.is_empty() { + stored_partitions_zone[z] += 1; + if let Some(prev_assign) = prev_assign_opt { + let mut old_zones_of_p = Vec::<usize>::new(); + for n in prev_assign[p].iter() { + old_zones_of_p + .push(zone_to_id[&self.get_node_zone(&self.node_id_vec[*n])?]); + } + if !old_zones_of_p.contains(&z) { + new_partitions_zone[z] += 1; + } + } + } + for vert in pz_nodes.iter() { + if let Vertex::N(n) = *vert { + stored_partitions[n] += 1; + if let Some(prev_assign) = prev_assign_opt { + if !prev_assign[p].contains(&n) { + new_partitions[n] += 1; + } + } } } - partitions.push(part); } - partitions - } else { - // Otherwise start fresh - (0..(1 << PARTITION_BITS)) - .map(|_| PartitionAss::new()) - .collect() } - } - fn partitions_per_node<'a>(&self, partitions: &[PartitionAss<'a>]) -> HashMap<&'a Uuid, usize> { - let mut partitions_per_node = HashMap::<&Uuid, usize>::new(); - for p in partitions.iter() { - for (id, _) in p.nodes.iter() { - *partitions_per_node.entry(*id).or_insert(0) += 1; + if prev_assign_opt.is_none() { + new_partitions = stored_partitions.clone(); + //new_partitions_zone = stored_partitions_zone.clone(); + } + + // We display the statistics + + msg.push("".into()); + if prev_assign_opt.is_some() { + let total_new_partitions: usize = new_partitions.iter().sum(); + msg.push(format!( + "A total of {} new copies of partitions need to be \ + transferred.", + total_new_partitions + )); + msg.push("".into()); + } + + let mut table = vec![]; + for z in 0..id_to_zone.len() { + let mut nodes_of_z = Vec::<usize>::new(); + for n in 0..storing_nodes.len() { + if self.get_node_zone(&self.node_id_vec[n])? == id_to_zone[z] { + nodes_of_z.push(n); + } + } + let replicated_partitions: usize = + nodes_of_z.iter().map(|n| stored_partitions[*n]).sum(); + table.push(format!( + "{}\tTags\tPartitions\tCapacity\tUsable capacity", + id_to_zone[z] + )); + + let available_cap_z: u64 = self.partition_size * replicated_partitions as u64; + let mut total_cap_z = 0; + for n in nodes_of_z.iter() { + total_cap_z += self.get_node_capacity(&self.node_id_vec[*n])?; } + let percent_cap_z = 100.0 * (available_cap_z as f32) / (total_cap_z as f32); + + for n in nodes_of_z.iter() { + let available_cap_n = stored_partitions[*n] as u64 * self.partition_size; + let total_cap_n = self.get_node_capacity(&self.node_id_vec[*n])?; + let tags_n = (self.node_role(&self.node_id_vec[*n]).ok_or("<??>"))?.tags_string(); + table.push(format!( + " {:?}\t{}\t{} ({} new)\t{}\t{} ({:.1}%)", + self.node_id_vec[*n], + tags_n, + stored_partitions[*n], + new_partitions[*n], + ByteSize::b(total_cap_n).to_string_as(false), + ByteSize::b(available_cap_n).to_string_as(false), + (available_cap_n as f32) / (total_cap_n as f32) * 100.0, + )); + } + + table.push(format!( + " TOTAL\t\t{} ({} unique)\t{}\t{} ({:.1}%)", + replicated_partitions, + stored_partitions_zone[z], + //new_partitions_zone[z], + ByteSize::b(total_cap_z).to_string_as(false), + ByteSize::b(available_cap_z).to_string_as(false), + percent_cap_z + )); + table.push("".into()); } - partitions_per_node + msg.push(format_table::format_table_to_string(table)); + + Ok(msg) } } -// ---- Internal structs for partition assignation in layout ---- +// ==================================================================================== + +#[cfg(test)] +mod tests { + use super::{Error, *}; + use std::cmp::min; + + // This function checks that the partition size S computed is at least better than the + // one given by a very naive algorithm. To do so, we try to run the naive algorithm + // assuming a partion size of S+1. If we succed, it means that the optimal assignment + // was not optimal. The naive algorithm is the following : + // - we compute the max number of partitions associated to every node, capped at the + // partition number. It gives the number of tokens of every node. + // - every zone has a number of tokens equal to the sum of the tokens of its nodes. + // - we cycle over the partitions and associate zone tokens while respecting the + // zone redundancy constraint. + // NOTE: the naive algorithm is not optimal. Counter example: + // take nb_partition = 3 ; replication_factor = 5; redundancy = 4; + // number of tokens by zone : (A, 4), (B,1), (C,4), (D, 4), (E, 2) + // With these parameters, the naive algo fails, whereas there is a solution: + // (A,A,C,D,E) , (A,B,C,D,D) (A,C,C,D,E) + fn check_against_naive(cl: &ClusterLayout) -> Result<bool, Error> { + let over_size = cl.partition_size + 1; + let mut zone_token = HashMap::<String, usize>::new(); + + let (zones, zone_to_id) = cl.generate_nongateway_zone_ids()?; + + if zones.is_empty() { + return Ok(false); + } -#[derive(Clone)] -struct PartitionAss<'a> { - nodes: Vec<(&'a Uuid, Option<&'a NodeRole>)>, -} + for z in zones.iter() { + zone_token.insert(z.clone(), 0); + } + for uuid in cl.nongateway_nodes().iter() { + let z = cl.get_node_zone(uuid)?; + let c = cl.get_node_capacity(uuid)?; + zone_token.insert( + z.clone(), + zone_token[&z] + min(NB_PARTITIONS, (c / over_size) as usize), + ); + } -impl<'a> PartitionAss<'a> { - fn new() -> Self { - Self { nodes: Vec::new() } - } + // For every partition, we count the number of zone already associated and + // the name of the last zone associated - fn nplus(&self, other: &PartitionAss<'a>) -> usize { - self.nodes - .iter() - .filter(|x| !other.nodes.contains(x)) - .count() - } + let mut id_zone_token = vec![0; zones.len()]; + for (z, t) in zone_token.iter() { + id_zone_token[zone_to_id[z]] = *t; + } - fn txtplus(&self, other: &PartitionAss<'a>) -> String { - let mut nodes = self - .nodes - .iter() - .filter(|x| !other.nodes.contains(x)) - .map(|x| format!("{:?}", x.0)) - .collect::<Vec<_>>(); - nodes.sort(); - if self.nodes.iter().any(|x| other.nodes.contains(x)) { - nodes.push("...".into()); + let mut nb_token = vec![0; NB_PARTITIONS]; + let mut last_zone = vec![zones.len(); NB_PARTITIONS]; + + let mut curr_zone = 0; + + let redundancy = cl.effective_zone_redundancy(); + + for replic in 0..cl.replication_factor { + for p in 0..NB_PARTITIONS { + while id_zone_token[curr_zone] == 0 + || (last_zone[p] == curr_zone + && redundancy - nb_token[p] <= cl.replication_factor - replic) + { + curr_zone += 1; + if curr_zone >= zones.len() { + return Ok(true); + } + } + id_zone_token[curr_zone] -= 1; + if last_zone[p] != curr_zone { + nb_token[p] += 1; + last_zone[p] = curr_zone; + } + } } - format!("[{}]", nodes.join(" ")) - } - fn is_valid_transition_to(&self, other: &PartitionAss<'a>, replication_factor: usize) -> bool { - let min_keep_nodes_per_part = (replication_factor + 1) / 2; - let n_removed = self.nplus(other); + return Ok(false); + } - if self.nodes.len() <= min_keep_nodes_per_part { - n_removed == 0 - } else { - n_removed <= self.nodes.len() - min_keep_nodes_per_part + fn show_msg(msg: &Message) { + for s in msg.iter() { + println!("{}", s); } } - // add is a key function in creating a PartitionAss, i.e. the list of nodes - // to which a partition is assigned. It tries to add a certain node id to the - // assignation, but checks that doing so is compatible with the NECESSARY - // condition that the partition assignation must be dispersed over different - // zones (datacenters) if enough zones exist. This is why it takes a n_zones - // parameter, which is the total number of zones that have existing nodes: - // if nodes in the assignation already cover all n_zones zones, then any node - // that is not yet in the assignation can be added. Otherwise, only nodes - // that are in a new zone can be added. - fn add( - &mut self, - target_len: Option<usize>, - n_zones: usize, - node: &'a Uuid, - role: &'a NodeRole, - ) -> bool { - if let Some(tl) = target_len { - if self.nodes.len() != tl - 1 { - return false; + fn update_layout( + cl: &mut ClusterLayout, + node_id_vec: &Vec<u8>, + node_capacity_vec: &Vec<u64>, + node_zone_vec: &Vec<String>, + zone_redundancy: usize, + ) { + for i in 0..node_id_vec.len() { + if let Some(x) = FixedBytes32::try_from(&[i as u8; 32]) { + cl.node_id_vec.push(x); } - } - let p_zns = self - .nodes - .iter() - .map(|(_id, info)| info.unwrap().zone.as_str()) - .collect::<HashSet<&str>>(); - if (p_zns.len() < n_zones && !p_zns.contains(&role.zone.as_str())) - || (p_zns.len() == n_zones && !self.nodes.iter().any(|(id, _)| *id == node)) - { - self.nodes.push((node, Some(role))); - true - } else { - false + let update = cl.staging_roles.update_mutator( + cl.node_id_vec[i], + NodeRoleV(Some(NodeRole { + zone: (node_zone_vec[i].to_string()), + capacity: (Some(node_capacity_vec[i])), + tags: (vec![]), + })), + ); + cl.staging_roles.merge(&update); } + cl.staging_parameters.update(LayoutParameters { + zone_redundancy: ZoneRedundancy::AtLeast(zone_redundancy), + }); + cl.staging_hash = cl.calculate_staging_hash(); + } + + #[test] + fn test_assignment() { + let mut node_id_vec = vec![1, 2, 3]; + let mut node_capacity_vec = vec![4000, 1000, 2000]; + let mut node_zone_vec = vec!["A", "B", "C"] + .into_iter() + .map(|x| x.to_string()) + .collect(); + + let mut cl = ClusterLayout::new(3); + update_layout(&mut cl, &node_id_vec, &node_capacity_vec, &node_zone_vec, 3); + let v = cl.version; + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); + show_msg(&msg); + assert_eq!(cl.check(), Ok(())); + assert!(matches!(check_against_naive(&cl), Ok(true))); + + node_id_vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; + node_capacity_vec = vec![4000, 1000, 1000, 3000, 1000, 1000, 2000, 10000, 2000]; + node_zone_vec = vec!["A", "B", "C", "C", "C", "B", "G", "H", "I"] + .into_iter() + .map(|x| x.to_string()) + .collect(); + update_layout(&mut cl, &node_id_vec, &node_capacity_vec, &node_zone_vec, 2); + let v = cl.version; + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); + show_msg(&msg); + assert_eq!(cl.check(), Ok(())); + assert!(matches!(check_against_naive(&cl), Ok(true))); + + node_capacity_vec = vec![4000, 1000, 2000, 7000, 1000, 1000, 2000, 10000, 2000]; + update_layout(&mut cl, &node_id_vec, &node_capacity_vec, &node_zone_vec, 3); + let v = cl.version; + let (mut cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); + show_msg(&msg); + assert_eq!(cl.check(), Ok(())); + assert!(matches!(check_against_naive(&cl), Ok(true))); + + node_capacity_vec = vec![ + 4000000, 4000000, 2000000, 7000000, 1000000, 9000000, 2000000, 10000, 2000000, + ]; + update_layout(&mut cl, &node_id_vec, &node_capacity_vec, &node_zone_vec, 1); + let v = cl.version; + let (cl, msg) = cl.apply_staged_changes(Some(v + 1)).unwrap(); + show_msg(&msg); + assert_eq!(cl.check(), Ok(())); + assert!(matches!(check_against_naive(&cl), Ok(true))); } } diff --git a/src/rpc/lib.rs b/src/rpc/lib.rs index 5aec92c0..a5f8fc6e 100644 --- a/src/rpc/lib.rs +++ b/src/rpc/lib.rs @@ -11,6 +11,7 @@ mod consul; #[cfg(feature = "kubernetes-discovery")] mod kubernetes; +pub mod graph_algo; pub mod layout; pub mod replication_mode; pub mod ring; diff --git a/src/rpc/ring.rs b/src/rpc/ring.rs index 73a126a2..6a2e5c72 100644 --- a/src/rpc/ring.rs +++ b/src/rpc/ring.rs @@ -40,6 +40,7 @@ pub struct Ring { // Type to store compactly the id of a node in the system // Change this to u16 the day we want to have more than 256 nodes in a cluster pub type CompactNodeType = u8; +pub const MAX_NODE_NUMBER: usize = 256; // The maximum number of times an object might get replicated // This must be at least 3 because Garage supports 3-way replication @@ -62,12 +63,12 @@ struct RingEntry { impl Ring { pub(crate) fn new(layout: ClusterLayout, replication_factor: usize) -> Self { if replication_factor != layout.replication_factor { - warn!("Could not build ring: replication factor does not match between local configuration and network role assignation."); + warn!("Could not build ring: replication factor does not match between local configuration and network role assignment."); return Self::empty(layout, replication_factor); } - if layout.ring_assignation_data.len() != replication_factor * (1 << PARTITION_BITS) { - warn!("Could not build ring: network role assignation data has invalid length"); + if layout.ring_assignment_data.len() != replication_factor * (1 << PARTITION_BITS) { + warn!("Could not build ring: network role assignment data has invalid length"); return Self::empty(layout, replication_factor); } @@ -77,7 +78,7 @@ impl Ring { let top = (i as u16) << (16 - PARTITION_BITS); let mut nodes_buf = [0u8; MAX_REPLICATION]; nodes_buf[..replication_factor].copy_from_slice( - &layout.ring_assignation_data + &layout.ring_assignment_data [replication_factor * i..replication_factor * (i + 1)], ); RingEntry { diff --git a/src/rpc/system.rs b/src/rpc/system.rs index b42e49fc..4b40bec4 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -9,10 +9,10 @@ use std::time::{Duration, Instant}; use arc_swap::ArcSwap; use async_trait::async_trait; -use futures::{join, select}; -use futures_util::future::*; +use futures::join; use serde::{Deserialize, Serialize}; use sodiumoxide::crypto::sign::ed25519; +use tokio::select; use tokio::sync::watch; use tokio::sync::Mutex; @@ -22,9 +22,9 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy; use netapp::util::parse_and_resolve_peer_addr_async; use netapp::{NetApp, NetworkKey, NodeID, NodeKey}; -use garage_util::config::Config; #[cfg(feature = "kubernetes-discovery")] use garage_util::config::KubernetesDiscoveryConfig; +use garage_util::config::{Config, DataDirEnum}; use garage_util::data::*; use garage_util::error::*; use garage_util::persister::Persister; @@ -119,7 +119,7 @@ pub struct System { /// Path to metadata directory pub metadata_dir: PathBuf, /// Path to data directory - pub data_dir: PathBuf, + pub data_dir: DataDirEnum, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -151,7 +151,7 @@ pub struct KnownNodeInfo { pub status: NodeStatus, } -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy)] pub struct ClusterHealth { /// The current health status of the cluster (see below) pub status: ClusterHealthStatus, @@ -171,7 +171,7 @@ pub struct ClusterHealth { pub partitions_all_ok: usize, } -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy)] pub enum ClusterHealthStatus { /// All nodes are available Healthy, @@ -666,9 +666,9 @@ impl System { let update_ring = self.update_ring.lock().await; let mut layout: ClusterLayout = self.ring.borrow().layout.clone(); - let prev_layout_check = layout.check(); + let prev_layout_check = layout.check().is_ok(); if layout.merge(adv) { - if prev_layout_check && !layout.check() { + if prev_layout_check && layout.check().is_err() { error!("New cluster layout is invalid, discarding."); return Err(Error::Message( "New cluster layout is invalid, discarding.".into(), @@ -702,7 +702,7 @@ impl System { async fn status_exchange_loop(&self, mut stop_signal: watch::Receiver<bool>) { while !*stop_signal.borrow() { - let restart_at = tokio::time::sleep(STATUS_EXCHANGE_INTERVAL); + let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL; self.update_local_status(); let local_status: NodeStatus = self.local_status.load().as_ref().clone(); @@ -711,20 +711,21 @@ impl System { .broadcast( &self.system_endpoint, SystemRpc::AdvertiseStatus(local_status), - RequestStrategy::with_priority(PRIO_HIGH), + RequestStrategy::with_priority(PRIO_HIGH) + .with_custom_timeout(STATUS_EXCHANGE_INTERVAL), ) .await; select! { - _ = restart_at.fuse() => {}, - _ = stop_signal.changed().fuse() => {}, + _ = tokio::time::sleep_until(restart_at.into()) => {}, + _ = stop_signal.changed() => {}, } } } async fn discovery_loop(self: &Arc<Self>, mut stop_signal: watch::Receiver<bool>) { while !*stop_signal.borrow() { - let not_configured = !self.ring.borrow().layout.check(); + let not_configured = self.ring.borrow().layout.check().is_err(); let no_peers = self.fullmesh.get_peer_list().len() < self.replication_factor; let expected_n_nodes = self.ring.borrow().layout.num_nodes(); let bad_peers = self @@ -799,10 +800,9 @@ impl System { #[cfg(feature = "kubernetes-discovery")] tokio::spawn(self.clone().advertise_to_kubernetes()); - let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL); select! { - _ = restart_at.fuse() => {}, - _ = stop_signal.changed().fuse() => {}, + _ = tokio::time::sleep(DISCOVERY_INTERVAL) => {}, + _ = stop_signal.changed() => {}, } } } @@ -890,20 +890,47 @@ impl NodeStatus { } } - fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &Path, metrics: &SystemMetrics) { - use systemstat::{Platform, System}; - let mounts = System::new().mounts().unwrap_or_default(); - - let mount_avail = |path: &Path| { - mounts - .iter() - .filter(|x| path.starts_with(&x.fs_mounted_on)) - .max_by_key(|x| x.fs_mounted_on.len()) - .map(|x| (x.avail.as_u64(), x.total.as_u64())) + fn update_disk_usage( + &mut self, + meta_dir: &Path, + data_dir: &DataDirEnum, + metrics: &SystemMetrics, + ) { + use nix::sys::statvfs::statvfs; + let mount_avail = |path: &Path| match statvfs(path) { + Ok(x) => { + let avail = x.blocks_available() as u64 * x.fragment_size() as u64; + let total = x.blocks() as u64 * x.fragment_size() as u64; + Some((x.filesystem_id(), avail, total)) + } + Err(_) => None, }; - self.meta_disk_avail = mount_avail(meta_dir); - self.data_disk_avail = mount_avail(data_dir); + self.meta_disk_avail = mount_avail(meta_dir).map(|(_, a, t)| (a, t)); + self.data_disk_avail = match data_dir { + DataDirEnum::Single(dir) => mount_avail(dir).map(|(_, a, t)| (a, t)), + DataDirEnum::Multiple(dirs) => (|| { + // TODO: more precise calculation that takes into account + // how data is going to be spread among partitions + let mut mounts = HashMap::new(); + for dir in dirs.iter() { + if dir.capacity.is_none() { + continue; + } + match mount_avail(&dir.path) { + Some((fsid, avail, total)) => { + mounts.insert(fsid, (avail, total)); + } + None => return None, + } + } + Some( + mounts + .into_iter() + .fold((0, 0), |(x, y), (_, (a, b))| (x + a, y + b)), + ) + })(), + }; if let Some((avail, total)) = self.meta_disk_avail { metrics diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index d0776945..08ccec72 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_table" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" diff --git a/src/table/data.rs b/src/table/data.rs index e76836ca..bbfdf58b 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -34,7 +34,7 @@ pub struct TableData<F: TableSchema, R: TableReplication> { pub(crate) merkle_todo_notify: Notify, pub(crate) insert_queue: db::Tree, - pub(crate) insert_queue_notify: Notify, + pub(crate) insert_queue_notify: Arc<Notify>, pub(crate) gc_todo: CountedTree, @@ -80,7 +80,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { merkle_todo, merkle_todo_notify: Notify::new(), insert_queue, - insert_queue_notify: Notify::new(), + insert_queue_notify: Arc::new(Notify::new()), gc_todo, metrics, }) @@ -203,14 +203,14 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { ) -> Result<Option<F::E>, Error> { let tree_key = self.tree_key(partition_key, sort_key); - let changed = self.store.db().transaction(|mut tx| { + let changed = self.store.db().transaction(|tx| { let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, &tree_key)? { Some(old_bytes) => { let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?; - let new_entry = update_fn(&mut tx, Some(old_entry.clone()))?; + let new_entry = update_fn(tx, Some(old_entry.clone()))?; (Some(old_entry), Some(old_bytes), new_entry) } - None => (None, None, update_fn(&mut tx, None)?), + None => (None, None, update_fn(tx, None)?), }; // Changed can be true in two scenarios @@ -233,7 +233,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { tx.insert(&self.store, &tree_key, new_bytes)?; self.instance - .updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?; + .updated(tx, old_entry.as_ref(), Some(&new_entry))?; Ok(Some((new_entry, new_bytes_hash))) } else { @@ -270,14 +270,14 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { let removed = self .store .db() - .transaction(|mut tx| match tx.get(&self.store, k)? { + .transaction(|tx| match tx.get(&self.store, k)? { Some(cur_v) if cur_v == v => { let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?; tx.remove(&self.store, k)?; tx.insert(&self.merkle_todo, k, vec![])?; - self.instance.updated(&mut tx, Some(&old_entry), None)?; + self.instance.updated(tx, Some(&old_entry), None)?; Ok(true) } _ => Ok(false), @@ -298,14 +298,14 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { let removed = self .store .db() - .transaction(|mut tx| match tx.get(&self.store, k)? { + .transaction(|tx| match tx.get(&self.store, k)? { Some(cur_v) if blake2sum(&cur_v[..]) == vhash => { let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?; tx.remove(&self.store, k)?; tx.insert(&self.merkle_todo, k, vec![])?; - self.instance.updated(&mut tx, Some(&old_entry), None)?; + self.instance.updated(tx, Some(&old_entry), None)?; Ok(true) } _ => Ok(false), @@ -339,7 +339,9 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { .map_err(Error::RmpEncode) .map_err(db::TxError::Abort)?; tx.insert(&self.insert_queue, &tree_key, new_entry)?; - self.insert_queue_notify.notify_one(); + + let notif = self.insert_queue_notify.clone(); + tx.on_commit(move || notif.notify_one()); Ok(()) } @@ -347,9 +349,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> { // ---- Utility functions ---- pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> { - let mut ret = p.hash().to_vec(); - ret.extend(s.sort_key()); - ret + [p.hash().as_slice(), s.sort_key()].concat() } pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> { diff --git a/src/table/merkle.rs b/src/table/merkle.rs index e86d0251..4577f872 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -108,9 +108,9 @@ impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> { self.data .merkle_tree .db() - .transaction(|mut tx| self.update_item_rec(&mut tx, k, &khash, &key, new_vhash))?; + .transaction(|tx| self.update_item_rec(tx, k, &khash, &key, new_vhash))?; - let deleted = self.data.merkle_todo.db().transaction(|mut tx| { + let deleted = self.data.merkle_todo.db().transaction(|tx| { let remove = matches!(tx.get(&self.data.merkle_todo, k)?, Some(ov) if ov == vhash_by); if remove { tx.remove(&self.data.merkle_todo, k)?; diff --git a/src/table/queue.rs b/src/table/queue.rs index 0857209b..ffe0a4a7 100644 --- a/src/table/queue.rs +++ b/src/table/queue.rs @@ -12,7 +12,7 @@ use crate::replication::*; use crate::schema::*; use crate::table::*; -const BATCH_SIZE: usize = 100; +const BATCH_SIZE: usize = 1024; pub(crate) struct InsertQueueWorker<F, R>(pub(crate) Arc<Table<F, R>>) where @@ -53,7 +53,7 @@ impl<F: TableSchema, R: TableReplication> Worker for InsertQueueWorker<F, R> { self.0.insert_many(values).await?; - self.0.data.insert_queue.db().transaction(|mut tx| { + self.0.data.insert_queue.db().transaction(|tx| { for (k, v) in kv_pairs.iter() { if let Some(v2) = tx.get(&self.0.data.insert_queue, k)? { if &v2 == v { diff --git a/src/table/schema.rs b/src/table/schema.rs index 5cbf6c95..fc1a465e 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -6,6 +6,8 @@ use garage_util::migrate::Migrate; use crate::crdt::Crdt; +// =================================== PARTITION KEYS + /// Trait for field used to partition data pub trait PartitionKey: Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static @@ -29,6 +31,8 @@ impl PartitionKey for FixedBytes32 { } } +// =================================== SORT KEYS + /// Trait for field used to sort data pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static { /// Get the key used to sort @@ -47,6 +51,8 @@ impl SortKey for FixedBytes32 { } } +// =================================== SCHEMA + /// Trait for an entry in a table. It must be sortable and partitionnable. pub trait Entry<P: PartitionKey, S: SortKey>: Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index f72051b9..6554ac13 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_util" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>"] edition = "2018" license = "AGPL-3.0" @@ -20,6 +20,7 @@ arc-swap = "1.0" async-trait = "0.1" blake2 = "0.10" bytes = "1.0" +bytesize = "1.2" digest = "0.10" err-derive = "0.3" hexdump = "0.1" @@ -31,7 +32,7 @@ rand = "0.8" sha2 = "0.10" chrono = "0.4" -rmp-serde = "0.15" +rmp-serde = "1.1.2" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_json = "1.0" toml = "0.6" @@ -39,7 +40,7 @@ toml = "0.6" futures = "0.3" tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } -netapp = "0.5" +netapp = "0.10" http = "0.2" hyper = "0.14" diff --git a/src/util/config.rs b/src/util/config.rs index 1da95b2f..ad5c8e1f 100644 --- a/src/util/config.rs +++ b/src/util/config.rs @@ -1,4 +1,5 @@ //! Contains type and functions related to Garage configuration file +use std::convert::TryFrom; use std::io::Read; use std::net::SocketAddr; use std::path::PathBuf; @@ -6,6 +7,7 @@ use std::path::PathBuf; use serde::{de, Deserialize}; use crate::error::Error; +use crate::socket_address::UnixOrTCPSocketAddress; /// Represent the whole configuration #[derive(Deserialize, Debug, Clone)] @@ -13,10 +15,20 @@ pub struct Config { /// Path where to store metadata. Should be fast, but low volume pub metadata_dir: PathBuf, /// Path where to store data. Can be slower, but need higher volume - pub data_dir: PathBuf, + pub data_dir: DataDirEnum, + + /// Whether to fsync after all metadata transactions (disabled by default) + #[serde(default)] + pub metadata_fsync: bool, + /// Whether to fsync after all data block writes (disabled by default) + #[serde(default)] + pub data_fsync: bool, /// Size of data blocks to save to disk - #[serde(default = "default_block_size")] + #[serde( + deserialize_with = "deserialize_capacity", + default = "default_block_size" + )] pub block_size: usize, /// Replication mode. Supported values: @@ -66,12 +78,19 @@ pub struct Config { pub db_engine: String, /// Sled cache size, in bytes - #[serde(default = "default_sled_cache_capacity")] - pub sled_cache_capacity: u64, + #[serde( + deserialize_with = "deserialize_capacity", + default = "default_sled_cache_capacity" + )] + pub sled_cache_capacity: usize, /// Sled flush interval in milliseconds #[serde(default = "default_sled_flush_every_ms")] pub sled_flush_every_ms: u64, + /// LMDB map size + #[serde(deserialize_with = "deserialize_capacity", default)] + pub lmdb_map_size: usize, + // -- APIs /// Configuration for S3 api pub s3_api: S3ApiConfig, @@ -87,11 +106,31 @@ pub struct Config { pub admin: AdminConfig, } +/// Value for data_dir: either a single directory or a list of dirs with attributes +#[derive(Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum DataDirEnum { + Single(PathBuf), + Multiple(Vec<DataDir>), +} + +#[derive(Deserialize, Debug, Clone)] +pub struct DataDir { + /// Path to the data directory + pub path: PathBuf, + /// Capacity of the drive (required if read_only is false) + #[serde(default)] + pub capacity: Option<String>, + /// Whether this is a legacy read-only path (capacity should be None) + #[serde(default)] + pub read_only: bool, +} + /// Configuration for S3 api #[derive(Deserialize, Debug, Clone)] pub struct S3ApiConfig { /// Address and port to bind for api serving - pub api_bind_addr: Option<SocketAddr>, + pub api_bind_addr: Option<UnixOrTCPSocketAddress>, /// S3 region to use pub s3_region: String, /// Suffix to remove from domain name to find bucket. If None, @@ -103,14 +142,14 @@ pub struct S3ApiConfig { #[derive(Deserialize, Debug, Clone)] pub struct K2VApiConfig { /// Address and port to bind for api serving - pub api_bind_addr: SocketAddr, + pub api_bind_addr: UnixOrTCPSocketAddress, } /// Configuration for serving files as normal web server #[derive(Deserialize, Debug, Clone)] pub struct WebConfig { /// Address and port to bind for web serving - pub bind_addr: SocketAddr, + pub bind_addr: UnixOrTCPSocketAddress, /// Suffix to remove from domain name to find bucket pub root_domain: String, } @@ -119,7 +158,7 @@ pub struct WebConfig { #[derive(Deserialize, Debug, Clone, Default)] pub struct AdminConfig { /// Address and port to bind for admin API serving - pub api_bind_addr: Option<SocketAddr>, + pub api_bind_addr: Option<UnixOrTCPSocketAddress>, /// Bearer token to use to scrape metrics pub metrics_token: Option<String>, @@ -183,10 +222,10 @@ pub struct KubernetesDiscoveryConfig { } fn default_db_engine() -> String { - "sled".into() + "lmdb".into() } -fn default_sled_cache_capacity() -> u64 { +fn default_sled_cache_capacity() -> usize { 128 * 1024 * 1024 } fn default_sled_flush_every_ms() -> u64 { @@ -266,8 +305,6 @@ fn deserialize_compression<'de, D>(deserializer: D) -> Result<Option<i32>, D::Er where D: de::Deserializer<'de>, { - use std::convert::TryFrom; - struct OptionVisitor; impl<'de> serde::de::Visitor<'de> for OptionVisitor { @@ -312,6 +349,50 @@ where deserializer.deserialize_any(OptionVisitor) } +fn deserialize_capacity<'de, D>(deserializer: D) -> Result<usize, D::Error> +where + D: de::Deserializer<'de>, +{ + struct CapacityVisitor; + + impl<'de> serde::de::Visitor<'de> for CapacityVisitor { + type Value = usize; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("int or '<capacity>'") + } + + fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> + where + E: de::Error, + { + value + .parse::<bytesize::ByteSize>() + .map(|x| x.as_u64()) + .map_err(|e| E::custom(format!("invalid capacity value: {}", e))) + .and_then(|v| { + usize::try_from(v) + .map_err(|_| E::custom("capacity value out of bound".to_owned())) + }) + } + + fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E> + where + E: de::Error, + { + usize::try_from(v).map_err(|_| E::custom("capacity value out of bound".to_owned())) + } + + fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> + where + E: de::Error, + { + usize::try_from(v).map_err(|_| E::custom("capacity value out of bound".to_owned())) + } + } + + deserializer.deserialize_any(CapacityVisitor) +} + #[cfg(test)] mod tests { use crate::error::Error; diff --git a/src/util/encode.rs b/src/util/encode.rs index 1cd3198f..a9ab9a35 100644 --- a/src/util/encode.rs +++ b/src/util/encode.rs @@ -8,9 +8,7 @@ where T: Serialize + ?Sized, { let mut wr = Vec::with_capacity(128); - let mut se = rmp_serde::Serializer::new(&mut wr) - .with_struct_map() - .with_string_variants(); + let mut se = rmp_serde::Serializer::new(&mut wr).with_struct_map(); val.serialize(&mut se)?; Ok(wr) } @@ -22,7 +20,7 @@ pub fn nonversioned_decode<T>(bytes: &[u8]) -> Result<T, rmp_serde::decode::Erro where T: for<'de> Deserialize<'de> + ?Sized, { - rmp_serde::decode::from_read_ref::<_, T>(bytes) + rmp_serde::decode::from_slice::<_>(bytes) } /// Serialize to JSON, truncating long result diff --git a/src/util/lib.rs b/src/util/lib.rs index 15f0f829..7df77959 100644 --- a/src/util/lib.rs +++ b/src/util/lib.rs @@ -14,6 +14,7 @@ pub mod forwarded_headers; pub mod metrics; pub mod migrate; pub mod persister; +pub mod socket_address; pub mod time; pub mod tranquilizer; pub mod version; diff --git a/src/util/migrate.rs b/src/util/migrate.rs index b9cce08a..45147c74 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -19,7 +19,7 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { fn decode(bytes: &[u8]) -> Option<Self> { let marker_len = Self::VERSION_MARKER.len(); if bytes.get(..marker_len) == Some(Self::VERSION_MARKER) { - if let Ok(value) = rmp_serde::decode::from_read_ref::<_, Self>(&bytes[marker_len..]) { + if let Ok(value) = rmp_serde::decode::from_slice::<_>(&bytes[marker_len..]) { return Some(value); } } @@ -31,9 +31,7 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { fn encode(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> { let mut wr = Vec::with_capacity(128); wr.extend_from_slice(Self::VERSION_MARKER); - let mut se = rmp_serde::Serializer::new(&mut wr) - .with_struct_map() - .with_string_variants(); + let mut se = rmp_serde::Serializer::new(&mut wr).with_struct_map(); self.serialize(&mut se)?; Ok(wr) } diff --git a/src/util/socket_address.rs b/src/util/socket_address.rs new file mode 100644 index 00000000..f01225f6 --- /dev/null +++ b/src/util/socket_address.rs @@ -0,0 +1,44 @@ +use std::fmt::{Debug, Display, Formatter}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::str::FromStr; + +use serde::de::Error; +use serde::{Deserialize, Deserializer}; + +#[derive(Debug, Clone)] +pub enum UnixOrTCPSocketAddress { + TCPSocket(SocketAddr), + UnixSocket(PathBuf), +} + +impl Display for UnixOrTCPSocketAddress { + fn fmt(&self, formatter: &mut Formatter<'_>) -> std::fmt::Result { + match self { + UnixOrTCPSocketAddress::TCPSocket(address) => write!(formatter, "http://{}", address), + UnixOrTCPSocketAddress::UnixSocket(path) => { + write!(formatter, "http+unix://{}", path.to_string_lossy()) + } + } + } +} + +impl<'de> Deserialize<'de> for UnixOrTCPSocketAddress { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + let string = string.as_str(); + + if string.starts_with("/") { + Ok(UnixOrTCPSocketAddress::UnixSocket( + PathBuf::from_str(string).map_err(Error::custom)?, + )) + } else { + Ok(UnixOrTCPSocketAddress::TCPSocket( + SocketAddr::from_str(string).map_err(Error::custom)?, + )) + } + } +} diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml index 423d3829..88cf1486 100644 --- a/src/web/Cargo.toml +++ b/src/web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_web" -version = "0.8.2" +version = "0.9.0" authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"] edition = "2018" license = "AGPL-3.0" @@ -27,5 +27,8 @@ futures = "0.3" http = "0.2" hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] } +hyperlocal = { version = "0.8.0", default-features = false, features = ["server"] } + +tokio = { version = "1.0", default-features = false, features = ["net"] } opentelemetry = "0.17" diff --git a/src/web/web_server.rs b/src/web/web_server.rs index de63b842..73780efb 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -1,4 +1,6 @@ -use std::{borrow::Cow, convert::Infallible, net::SocketAddr, sync::Arc}; +use std::fs::{self, Permissions}; +use std::os::unix::prelude::PermissionsExt; +use std::{convert::Infallible, sync::Arc}; use futures::future::Future; @@ -6,9 +8,13 @@ use hyper::{ header::{HeaderValue, HOST}, server::conn::AddrStream, service::{make_service_fn, service_fn}, - Body, Method, Request, Response, Server, + Body, Method, Request, Response, Server, StatusCode, }; +use hyperlocal::UnixServerExt; + +use tokio::net::UnixStream; + use opentelemetry::{ global, metrics::{Counter, ValueRecorder}, @@ -28,9 +34,11 @@ use garage_api::s3::get::{handle_get, handle_head}; use garage_model::garage::Garage; use garage_table::*; +use garage_util::data::Uuid; use garage_util::error::Error as GarageError; use garage_util::forwarded_headers; use garage_util::metrics::{gen_trace_id, RecordDuration}; +use garage_util::socket_address::UnixOrTCPSocketAddress; struct WebMetrics { request_counter: Counter<u64>, @@ -68,7 +76,7 @@ impl WebServer { /// Run a web server pub async fn run( garage: Arc<Garage>, - addr: SocketAddr, + addr: UnixOrTCPSocketAddress, root_domain: String, shutdown_signal: impl Future<Output = ()>, ) -> Result<(), GarageError> { @@ -79,7 +87,7 @@ impl WebServer { root_domain, }); - let service = make_service_fn(|conn: &AddrStream| { + let tcp_service = make_service_fn(|conn: &AddrStream| { let web_server = web_server.clone(); let client_addr = conn.remote_addr(); @@ -87,23 +95,56 @@ impl WebServer { Ok::<_, Error>(service_fn(move |req: Request<Body>| { let web_server = web_server.clone(); - web_server.handle_request(req, client_addr) + web_server.handle_request(req, client_addr.to_string()) })) } }); - let server = Server::bind(&addr).serve(service); - let graceful = server.with_graceful_shutdown(shutdown_signal); - info!("Web server listening on http://{}", addr); + let unix_service = make_service_fn(|_: &UnixStream| { + let web_server = web_server.clone(); + + let path = addr.to_string(); + async move { + Ok::<_, Error>(service_fn(move |req: Request<Body>| { + let web_server = web_server.clone(); + + web_server.handle_request(req, path.clone()) + })) + } + }); + + info!("Web server listening on {}", addr); + + match addr { + UnixOrTCPSocketAddress::TCPSocket(addr) => { + Server::bind(&addr) + .serve(tcp_service) + .with_graceful_shutdown(shutdown_signal) + .await? + } + UnixOrTCPSocketAddress::UnixSocket(ref path) => { + if path.exists() { + fs::remove_file(path)? + } + + let bound = Server::bind_unix(path)?; + + fs::set_permissions(path, Permissions::from_mode(0o222))?; + + bound + .serve(unix_service) + .with_graceful_shutdown(shutdown_signal) + .await?; + } + }; - graceful.await?; Ok(()) } async fn handle_request( self: Arc<Self>, req: Request<Body>, - addr: SocketAddr, + addr: String, ) -> Result<Response<Body>, Infallible> { if let Ok(forwarded_for_ip_addr) = forwarded_headers::handle_forwarded_for_headers(req.headers()) @@ -168,6 +209,17 @@ impl WebServer { } } + async fn check_key_exists(self: &Arc<Self>, bucket_id: Uuid, key: &str) -> Result<bool, Error> { + let exists = self + .garage + .object_table + .get(&bucket_id, &key.to_string()) + .await? + .map(|object| object.versions().iter().any(|v| v.is_data())) + .unwrap_or(false); + Ok(exists) + } + async fn serve_file(self: &Arc<Self>, req: &Request<Body>) -> Result<Response<Body>, Error> { // Get http authority string (eg. [::1]:3902 or garage.tld:80) let authority = req @@ -207,11 +259,11 @@ impl WebServer { // Get path let path = req.uri().path().to_string(); let index = &website_config.index_document; - let key = path_to_key(&path, index)?; + let (key, may_redirect) = path_to_keys(&path, index)?; debug!( - "Selected bucket: \"{}\" {:?}, selected key: \"{}\"", - bucket_name, bucket_id, key + "Selected bucket: \"{}\" {:?}, target key: \"{}\", may redirect to: {:?}", + bucket_name, bucket_id, key, may_redirect ); let ret_doc = match *req.method() { @@ -219,10 +271,23 @@ impl WebServer { Method::HEAD => handle_head(self.garage.clone(), req, bucket_id, &key, None).await, Method::GET => handle_get(self.garage.clone(), req, bucket_id, &key, None).await, _ => Err(ApiError::bad_request("HTTP method not supported")), - } - .map_err(Error::from); + }; + + // Try implicit redirect on error + let ret_doc_with_redir = match (&ret_doc, may_redirect) { + (Err(ApiError::NoSuchKey), ImplicitRedirect::To { key, url }) + if self.check_key_exists(bucket_id, key.as_str()).await? => + { + Ok(Response::builder() + .status(StatusCode::FOUND) + .header("Location", url) + .body(Body::empty()) + .unwrap()) + } + _ => ret_doc, + }; - match ret_doc { + match ret_doc_with_redir.map_err(Error::from) { Err(error) => { // For a HEAD or OPTIONS method, and for non-4xx errors, // we don't return the error document as content, @@ -308,30 +373,45 @@ fn error_to_res(e: Error) -> Response<Body> { http_error } +#[derive(Debug, PartialEq)] +enum ImplicitRedirect { + No, + To { key: String, url: String }, +} + /// Path to key /// /// Convert the provided path to the internal key /// When a path ends with "/", we append the index name to match traditional web server behavior /// which is also AWS S3 behavior. -fn path_to_key<'a>(path: &'a str, index: &str) -> Result<Cow<'a, str>, Error> { +/// +/// Check: https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html +fn path_to_keys<'a>(path: &'a str, index: &str) -> Result<(String, ImplicitRedirect), Error> { let path_utf8 = percent_encoding::percent_decode_str(path).decode_utf8()?; - if !path_utf8.starts_with('/') { - return Err(Error::BadRequest("Path must start with a / (slash)".into())); - } - - match path_utf8.chars().last() { - None => unreachable!(), - Some('/') => { - let mut key = String::with_capacity(path_utf8.len() + index.len()); - key.push_str(&path_utf8[1..]); - key.push_str(index); - Ok(key.into()) - } - Some(_) => match path_utf8 { - Cow::Borrowed(pu8) => Ok((&pu8[1..]).into()), - Cow::Owned(pu8) => Ok(pu8[1..].to_string().into()), - }, + let base_key = match path_utf8.strip_prefix("/") { + Some(bk) => bk, + None => return Err(Error::BadRequest("Path must start with a / (slash)".into())), + }; + let is_bucket_root = base_key.len() == 0; + let is_trailing_slash = path_utf8.ends_with("/"); + + match (is_bucket_root, is_trailing_slash) { + // It is not possible to store something at the root of the bucket (ie. empty key), + // the only option is to fetch the index + (true, _) => Ok((index.to_string(), ImplicitRedirect::No)), + + // "If you create a folder structure in your bucket, you must have an index document at each level. In each folder, the index document must have the same name, for example, index.html. When a user specifies a URL that resembles a folder lookup, the presence or absence of a trailing slash determines the behavior of the website. For example, the following URL, with a trailing slash, returns the photos/index.html index document." + (false, true) => Ok((format!("{base_key}{index}"), ImplicitRedirect::No)), + + // "However, if you exclude the trailing slash from the preceding URL, Amazon S3 first looks for an object photos in the bucket. If the photos object is not found, it searches for an index document, photos/index.html. If that document is found, Amazon S3 returns a 302 Found message and points to the photos/ key. For subsequent requests to photos/, Amazon S3 returns photos/index.html. If the index document is not found, Amazon S3 returns an error." + (false, false) => Ok(( + base_key.to_string(), + ImplicitRedirect::To { + key: format!("{base_key}/{index}"), + url: format!("{path}/"), + }, + )), } } @@ -340,13 +420,37 @@ mod tests { use super::*; #[test] - fn path_to_key_test() -> Result<(), Error> { - assert_eq!(path_to_key("/file%20.jpg", "index.html")?, "file .jpg"); - assert_eq!(path_to_key("/%20t/", "index.html")?, " t/index.html"); - assert_eq!(path_to_key("/", "index.html")?, "index.html"); - assert_eq!(path_to_key("/hello", "index.html")?, "hello"); - assert!(path_to_key("", "index.html").is_err()); - assert!(path_to_key("i/am/relative", "index.html").is_err()); + fn path_to_keys_test() -> Result<(), Error> { + assert_eq!( + path_to_keys("/file%20.jpg", "index.html")?, + ( + "file .jpg".to_string(), + ImplicitRedirect::To { + key: "file .jpg/index.html".to_string(), + url: "/file%20.jpg/".to_string() + } + ) + ); + assert_eq!( + path_to_keys("/%20t/", "index.html")?, + (" t/index.html".to_string(), ImplicitRedirect::No) + ); + assert_eq!( + path_to_keys("/", "index.html")?, + ("index.html".to_string(), ImplicitRedirect::No) + ); + assert_eq!( + path_to_keys("/hello", "index.html")?, + ( + "hello".to_string(), + ImplicitRedirect::To { + key: "hello/index.html".to_string(), + url: "/hello/".to_string() + } + ) + ); + assert!(path_to_keys("", "index.html").is_err()); + assert!(path_to_keys("i/am/relative", "index.html").is_err()); Ok(()) } } |