diff options
Diffstat (limited to 'src/api')
-rw-r--r-- | src/api/Cargo.toml | 14 | ||||
-rw-r--r-- | src/api/admin/api_server.rs | 70 | ||||
-rw-r--r-- | src/api/admin/bucket.rs | 4 | ||||
-rw-r--r-- | src/api/admin/key.rs | 4 | ||||
-rw-r--r-- | src/api/admin/router.rs | 6 | ||||
-rw-r--r-- | src/api/generic_server.rs | 2 | ||||
-rw-r--r-- | src/api/k2v/batch.rs | 4 | ||||
-rw-r--r-- | src/api/s3/get.rs | 4 | ||||
-rw-r--r-- | src/api/s3/post_object.rs | 12 | ||||
-rw-r--r-- | src/api/signature/payload.rs | 44 |
10 files changed, 116 insertions, 48 deletions
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 9babec02..c2155eb9 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -14,11 +14,11 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_model = { version = "0.8.2", path = "../model" } -garage_table = { version = "0.8.2", path = "../table" } -garage_block = { version = "0.8.2", path = "../block" } -garage_util = { version = "0.8.2", path = "../util" } -garage_rpc = { version = "0.8.2", path = "../rpc" } +garage_model.workspace = true +garage_table.workspace = true +garage_block.workspace = true +garage_util.workspace = true +garage_rpc.workspace = true async-trait = "0.1.7" base64 = "0.21" @@ -28,7 +28,7 @@ crypto-common = "0.1" err-derive = "0.3" hex = "0.4" hmac = "0.12" -idna = "0.3" +idna = "0.4" tracing = "0.1" md-5 = "0.10" nom = "7.1" @@ -47,7 +47,7 @@ http-range = "0.1" hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] } multer = "2.0" percent-encoding = "2.1.0" -roxmltree = "0.14" +roxmltree = "0.18" serde = { version = "1.0", features = ["derive"] } serde_bytes = "0.11" serde_json = "1.0" diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs index 58dd38d8..50c79120 100644 --- a/src/api/admin/api_server.rs +++ b/src/api/admin/api_server.rs @@ -26,6 +26,7 @@ use crate::admin::cluster::*; use crate::admin::error::*; use crate::admin::key::*; use crate::admin::router::{Authorization, Endpoint}; +use crate::helpers::host_to_bucket; pub struct AdminApiServer { garage: Arc<Garage>, @@ -78,10 +79,7 @@ impl AdminApiServer { .body(Body::empty())?) } - async fn handle_check_website_enabled( - &self, - req: Request<Body>, - ) -> Result<Response<Body>, Error> { + async fn handle_check_domain(&self, req: Request<Body>) -> Result<Response<Body>, Error> { let query_params: HashMap<String, String> = req .uri() .query() @@ -102,12 +100,56 @@ impl AdminApiServer { .get("domain") .ok_or_internal_error("Could not parse domain query string")?; - let bucket_id = self + if self.check_domain(domain).await? { + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(format!( + "Domain '{domain}' is managed by Garage" + )))?) + } else { + Err(Error::bad_request(format!( + "Domain '{domain}' is not managed by Garage" + ))) + } + } + + async fn check_domain(&self, domain: &str) -> Result<bool, Error> { + // Resolve bucket from domain name, inferring if the website must be activated for the + // domain to be valid. + let (bucket_name, must_check_website) = if let Some(bname) = self + .garage + .config + .s3_api + .root_domain + .as_ref() + .and_then(|rd| host_to_bucket(domain, rd)) + { + (bname.to_string(), false) + } else if let Some(bname) = self + .garage + .config + .s3_web + .as_ref() + .and_then(|sw| host_to_bucket(domain, sw.root_domain.as_str())) + { + (bname.to_string(), true) + } else { + (domain.to_string(), true) + }; + + let bucket_id = match self .garage .bucket_helper() - .resolve_global_bucket_name(&domain) + .resolve_global_bucket_name(&bucket_name) .await? - .ok_or(HelperError::NoSuchBucket(domain.to_string()))?; + { + Some(bucket_id) => bucket_id, + None => return Ok(false), + }; + + if !must_check_website { + return Ok(true); + } let bucket = self .garage @@ -119,16 +161,8 @@ impl AdminApiServer { let bucket_website_config = bucket_state.website_config.get(); match bucket_website_config { - Some(_v) => { - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::from(format!( - "Bucket '{domain}' is authorized for website hosting" - )))?) - } - None => Err(Error::bad_request(format!( - "Bucket '{domain}' is not authorized for website hosting" - ))), + Some(_v) => Ok(true), + None => Ok(false), } } @@ -229,7 +263,7 @@ impl ApiHandler for AdminApiServer { match endpoint { Endpoint::Options => self.handle_options(&req), - Endpoint::CheckWebsiteEnabled => self.handle_check_website_enabled(req).await, + Endpoint::CheckDomain => self.handle_check_domain(req).await, Endpoint::Health => self.handle_health(), Endpoint::Metrics => self.handle_metrics(), Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await, diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs index e60f07ca..f0a4a9e7 100644 --- a/src/api/admin/bucket.rs +++ b/src/api/admin/bucket.rs @@ -183,8 +183,8 @@ async fn bucket_info_results( } }), keys: relevant_keys - .into_iter() - .map(|(_, key)| { + .into_values() + .map(|key| { let p = key.state.as_option().unwrap(); GetBucketInfoKey { access_key_id: key.key_id, diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs index 2bbabb7b..d74ca361 100644 --- a/src/api/admin/key.rs +++ b/src/api/admin/key.rs @@ -183,8 +183,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod create_bucket: *key_state.allow_create_bucket.get(), }, buckets: relevant_buckets - .into_iter() - .map(|(_, bucket)| { + .into_values() + .map(|bucket| { let state = bucket.state.as_option().unwrap(); KeyInfoBucketResult { id: hex::encode(bucket.id), diff --git a/src/api/admin/router.rs b/src/api/admin/router.rs index 0dcb1546..0225f18b 100644 --- a/src/api/admin/router.rs +++ b/src/api/admin/router.rs @@ -17,7 +17,7 @@ router_match! {@func #[derive(Debug, Clone, PartialEq, Eq)] pub enum Endpoint { Options, - CheckWebsiteEnabled, + CheckDomain, Health, Metrics, GetClusterStatus, @@ -92,7 +92,7 @@ impl Endpoint { let res = router_match!(@gen_path_parser (req.method(), path, query) [ OPTIONS _ => Options, - GET "/check" => CheckWebsiteEnabled, + GET "/check" => CheckDomain, GET "/health" => Health, GET "/metrics" => Metrics, GET "/v0/status" => GetClusterStatus, @@ -138,7 +138,7 @@ impl Endpoint { pub fn authorization_type(&self) -> Authorization { match self { Self::Health => Authorization::None, - Self::CheckWebsiteEnabled => Authorization::None, + Self::CheckDomain => Authorization::None, Self::Metrics => Authorization::MetricsToken, _ => Authorization::AdminToken, } diff --git a/src/api/generic_server.rs b/src/api/generic_server.rs index d0354d28..757b85ec 100644 --- a/src/api/generic_server.rs +++ b/src/api/generic_server.rs @@ -128,7 +128,7 @@ impl<A: ApiHandler> ApiServer<A> { let uri = req.uri().clone(); if let Ok(forwarded_for_ip_addr) = - forwarded_headers::handle_forwarded_for_headers(&req.headers()) + forwarded_headers::handle_forwarded_for_headers(req.headers()) { info!( "{} (via {}) {} {}", diff --git a/src/api/k2v/batch.rs b/src/api/k2v/batch.rs index 26d678da..294380ea 100644 --- a/src/api/k2v/batch.rs +++ b/src/api/k2v/batch.rs @@ -282,8 +282,8 @@ pub(crate) async fn handle_poll_range( if let Some((items, seen_marker)) = resp { let resp = PollRangeResponse { items: items - .into_iter() - .map(|(_k, i)| ReadBatchResponseItem::from(i)) + .into_values() + .map(ReadBatchResponseItem::from) .collect::<Vec<_>>(), seen_marker, }; diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs index 2a99551a..cde7b461 100644 --- a/src/api/s3/get.rs +++ b/src/api/s3/get.rs @@ -443,7 +443,7 @@ fn body_from_blocks_range( // block.part_number, which is not the same in the case of a multipart upload) let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min( all_blocks.len(), - 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize, + 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size, 1024)) as usize, )); let mut block_offset: u64 = 0; for (_, b) in all_blocks.iter() { @@ -454,7 +454,7 @@ fn body_from_blocks_range( if block_offset < end && block_offset + b.size > begin { blocks.push((*b, block_offset)); } - block_offset += b.size as u64; + block_offset += b.size; } let order_stream = OrderTag::stream(); diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs index f2098ab0..542b7a81 100644 --- a/src/api/s3/post_object.rs +++ b/src/api/s3/post_object.rs @@ -30,7 +30,7 @@ pub async fn handle_post_object( .get(header::CONTENT_TYPE) .and_then(|ct| ct.to_str().ok()) .and_then(|ct| multer::parse_boundary(ct).ok()) - .ok_or_bad_request("Counld not get multipart boundary")?; + .ok_or_bad_request("Could not get multipart boundary")?; // 16k seems plenty for a header. 5G is the max size of a single part, so it seems reasonable // for a PostObject @@ -64,15 +64,13 @@ pub async fn handle_post_object( "tag" => (/* tag need to be reencoded, but we don't support them yet anyway */), "acl" => { if params.insert("x-amz-acl", content).is_some() { - return Err(Error::bad_request( - "Field 'acl' provided more than one time", - )); + return Err(Error::bad_request("Field 'acl' provided more than once")); } } _ => { if params.insert(&name, content).is_some() { return Err(Error::bad_request(format!( - "Field '{}' provided more than one time", + "Field '{}' provided more than once", name ))); } @@ -149,7 +147,7 @@ pub async fn handle_post_object( .ok_or_bad_request("Invalid expiration date")? .into(); if Utc::now() - expiration > Duration::zero() { - return Err(Error::bad_request("Expiration date is in the paste")); + return Err(Error::bad_request("Expiration date is in the past")); } let mut conditions = decoded_policy.into_conditions()?; @@ -330,7 +328,7 @@ impl Policy { if map.len() != 1 { return Err(Error::bad_request("Invalid policy item")); } - let (mut k, v) = map.into_iter().next().expect("size was verified"); + let (mut k, v) = map.into_iter().next().expect("Size could not be verified"); k.make_ascii_lowercase(); params.entry(k).or_default().push(Operation::Equal(v)); } diff --git a/src/api/signature/payload.rs b/src/api/signature/payload.rs index 4c7934e5..b50fb3bb 100644 --- a/src/api/signature/payload.rs +++ b/src/api/signature/payload.rs @@ -19,7 +19,7 @@ use crate::signature::error::*; pub async fn check_payload_signature( garage: &Garage, - service: &str, + service: &'static str, request: &Request<Body>, ) -> Result<(Option<Key>, Option<Hash>), Error> { let mut headers = HashMap::new(); @@ -51,6 +51,7 @@ pub async fn check_payload_signature( }; let canonical_request = canonical_request( + service, request.method(), request.uri(), &headers, @@ -184,7 +185,7 @@ fn parse_query_authorization( if duration > 7 * 24 * 3600 { return Err(Error::bad_request( - "X-Amz-Exprires may not exceed a week".to_string(), + "X-Amz-Expires may not exceed a week".to_string(), )); } @@ -210,7 +211,7 @@ fn parse_query_authorization( fn parse_credential(cred: &str) -> Result<(String, String), Error> { let first_slash = cred .find('/') - .ok_or_bad_request("Credentials does not contain / in authorization field")?; + .ok_or_bad_request("Credentials does not contain '/' in authorization field")?; let (key_id, scope) = cred.split_at(first_slash); Ok(( key_id.to_string(), @@ -231,15 +232,50 @@ pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_re } pub fn canonical_request( + service: &'static str, method: &Method, uri: &hyper::Uri, headers: &HashMap<String, String>, signed_headers: &str, content_sha256: &str, ) -> String { + // There seems to be evidence that in AWSv4 signatures, the path component is url-encoded + // a second time when building the canonical request, as specified in this documentation page: + // -> https://docs.aws.amazon.com/rolesanywhere/latest/userguide/authentication-sign-process.html + // However this documentation page is for a specific service ("roles anywhere"), and + // in the S3 service we know for a fact that there is no double-urlencoding, because all of + // the tests we made with external software work without it. + // + // The theory is that double-urlencoding occurs for all services except S3, + // which is what is implemented in rusoto_signature: + // -> https://docs.rs/rusoto_signature/latest/src/rusoto_signature/signature.rs.html#464 + // + // Digging into the code of the official AWS Rust SDK, we learn that double-URI-encoding can + // be set or unset on a per-request basis (the signature crates, aws-sigv4 and aws-sig-auth, + // are agnostic to this). Grepping the codebase confirms that S3 is the only API for which + // double_uri_encode is set to false, meaning it is true (its default value) for all other + // AWS services. We will therefore implement this behavior in Garage as well. + // + // Note that this documentation page, which is touted as the "authoritative reference" on + // AWSv4 signatures, makes no mention of either single- or double-urlencoding: + // -> https://docs.aws.amazon.com/IAM/latest/UserGuide/create-signed-request.html + // This page of the S3 documentation does also not mention anything specific: + // -> https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + // + // Note that there is also the issue of path normalization, which I hope is unrelated to the + // one of URI-encoding. At least in aws-sigv4 both parameters can be set independently, + // and rusoto_signature does not seem to do any effective path normalization, even though + // it mentions it in the comments (same link to the souce code as above). + // We make the explicit choice of NOT normalizing paths in the K2V API because doing so + // would make non-normalized paths invalid K2V partition keys, and we don't want that. + let path: std::borrow::Cow<str> = if service != "s3" { + uri_encode(uri.path(), false).into() + } else { + uri.path().into() + }; [ method.as_str(), - uri.path(), + &path, &canonical_query_string(uri), &canonical_header_string(headers, signed_headers), "", |