aboutsummaryrefslogtreecommitdiff
path: root/src/api
diff options
context:
space:
mode:
Diffstat (limited to 'src/api')
-rw-r--r--src/api/Cargo.toml3
-rw-r--r--src/api/admin/api_server.rs58
-rw-r--r--src/api/k2v/api_server.rs15
-rw-r--r--src/api/s3/api_server.rs15
-rw-r--r--src/api/s3/copy.rs5
-rw-r--r--src/api/s3/multipart.rs14
-rw-r--r--src/api/s3/post_object.rs26
-rw-r--r--src/api/s3/put.rs223
-rw-r--r--src/api/signature/mod.rs29
-rw-r--r--src/api/signature/payload.rs602
-rw-r--r--src/api/signature/streaming.rs13
11 files changed, 641 insertions, 362 deletions
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index bc6b6aa7..40fab769 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_api"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@@ -21,6 +21,7 @@ garage_net.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
+argon2.workspace = true
async-trait.workspace = true
base64.workspace = true
bytes.workspace = true
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index 50813d11..265639c4 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -1,6 +1,7 @@
use std::collections::HashMap;
use std::sync::Arc;
+use argon2::password_hash::PasswordHash;
use async_trait::async_trait;
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
@@ -45,14 +46,8 @@ impl AdminApiServer {
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
) -> Self {
let cfg = &garage.config.admin;
- let metrics_token = cfg
- .metrics_token
- .as_ref()
- .map(|tok| format!("Bearer {}", tok));
- let admin_token = cfg
- .admin_token
- .as_ref()
- .map(|tok| format!("Bearer {}", tok));
+ let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
+ let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
Self {
garage,
#[cfg(feature = "metrics")]
@@ -248,11 +243,11 @@ impl ApiHandler for AdminApiServer {
req: Request<IncomingBody>,
endpoint: Endpoint,
) -> Result<Response<ResBody>, Error> {
- let expected_auth_header =
+ let required_auth_hash =
match endpoint.authorization_type() {
Authorization::None => None,
- Authorization::MetricsToken => self.metrics_token.as_ref(),
- Authorization::AdminToken => match &self.admin_token {
+ Authorization::MetricsToken => self.metrics_token.as_deref(),
+ Authorization::AdminToken => match self.admin_token.as_deref() {
None => return Err(Error::forbidden(
"Admin token isn't configured, admin API access is disabled for security.",
)),
@@ -260,14 +255,11 @@ impl ApiHandler for AdminApiServer {
},
};
- if let Some(h) = expected_auth_header {
+ if let Some(password_hash) = required_auth_hash {
match req.headers().get("Authorization") {
None => return Err(Error::forbidden("Authorization token must be provided")),
- Some(v) => {
- let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false);
- if !authorized {
- return Err(Error::forbidden("Invalid authorization token provided"));
- }
+ Some(authorization) => {
+ verify_bearer_token(&authorization, password_hash)?;
}
}
}
@@ -342,3 +334,35 @@ impl ApiEndpoint for Endpoint {
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
}
+
+fn hash_bearer_token(token: &str) -> String {
+ use argon2::{
+ password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
+ Argon2,
+ };
+
+ let salt = SaltString::generate(&mut OsRng);
+ let argon2 = Argon2::default();
+ argon2
+ .hash_password(token.trim().as_bytes(), &salt)
+ .expect("could not hash API token")
+ .to_string()
+}
+
+fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
+ use argon2::{password_hash::PasswordVerifier, Argon2};
+
+ let parsed_hash = PasswordHash::new(&password_hash).unwrap();
+
+ token
+ .to_str()?
+ .strip_prefix("Bearer ")
+ .and_then(|token| {
+ Argon2::default()
+ .verify_password(token.trim().as_bytes(), &parsed_hash)
+ .ok()
+ })
+ .ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
+
+ Ok(())
+}
diff --git a/src/api/k2v/api_server.rs b/src/api/k2v/api_server.rs
index e97da2af..fdb5db4c 100644
--- a/src/api/k2v/api_server.rs
+++ b/src/api/k2v/api_server.rs
@@ -15,8 +15,7 @@ use garage_model::garage::Garage;
use crate::generic_server::*;
use crate::k2v::error::*;
-use crate::signature::payload::check_payload_signature;
-use crate::signature::streaming::*;
+use crate::signature::verify_request;
use crate::helpers::*;
use crate::k2v::batch::*;
@@ -86,17 +85,7 @@ impl ApiHandler for K2VApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
- let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
- let api_key = api_key
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
-
- let req = parse_streaming_body(
- &api_key,
- req,
- &mut content_sha256,
- &garage.config.s3_api.s3_region,
- "k2v",
- )?;
+ let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
let bucket_id = garage
.bucket_helper()
diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs
index 08405923..51f19554 100644
--- a/src/api/s3/api_server.rs
+++ b/src/api/s3/api_server.rs
@@ -17,8 +17,7 @@ use garage_model::key_table::Key;
use crate::generic_server::*;
use crate::s3::error::*;
-use crate::signature::payload::check_payload_signature;
-use crate::signature::streaming::*;
+use crate::signature::verify_request;
use crate::helpers::*;
use crate::s3::bucket::*;
@@ -125,17 +124,7 @@ impl ApiHandler for S3ApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
- let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
- let api_key = api_key
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
-
- let req = parse_streaming_body(
- &api_key,
- req,
- &mut content_sha256,
- &garage.config.s3_api.s3_region,
- "s3",
- )?;
+ let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
let bucket_name = match bucket_name {
None => {
diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs
index 7eb1fe60..880ce5f4 100644
--- a/src/api/s3/copy.rs
+++ b/src/api/s3/copy.rs
@@ -387,7 +387,10 @@ pub async fn handle_upload_part_copy(
// we need to insert that data as a new block.
async move {
if must_upload {
- garage2.block_manager.rpc_put_block(final_hash, data).await
+ garage2
+ .block_manager
+ .rpc_put_block(final_hash, data, None)
+ .await
} else {
Ok(())
}
diff --git a/src/api/s3/multipart.rs b/src/api/s3/multipart.rs
index b9d15b21..5959bcd6 100644
--- a/src/api/s3/multipart.rs
+++ b/src/api/s3/multipart.rs
@@ -6,7 +6,6 @@ use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*;
-use garage_util::async_hash::*;
use garage_util::data::*;
use garage_model::bucket_table::Bucket;
@@ -135,17 +134,8 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?;
// Copy data to version
- let first_block_hash = async_blake2sum(first_block.clone()).await;
-
- let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
- &garage,
- &version,
- part_number,
- first_block,
- first_block_hash,
- &mut chunker,
- )
- .await?;
+ let (total_size, data_md5sum, data_sha256sum, _) =
+ read_and_put_blocks(&garage, &version, part_number, first_block, &mut chunker).await?;
// Verify that checksums map
ensure_checksum_matches(
diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs
index bca8d6c6..b542cc1a 100644
--- a/src/api/s3/post_object.rs
+++ b/src/api/s3/post_object.rs
@@ -21,7 +21,7 @@ use crate::s3::cors::*;
use crate::s3::error::*;
use crate::s3::put::{get_headers, save_stream};
use crate::s3::xml as s3_xml;
-use crate::signature::payload::{parse_date, verify_v4};
+use crate::signature::payload::{verify_v4, Authorization};
pub async fn handle_post_object(
garage: Arc<Garage>,
@@ -88,22 +88,11 @@ pub async fn handle_post_object(
.get("key")
.ok_or_bad_request("No key was provided")?
.to_str()?;
- let credential = params
- .get("x-amz-credential")
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
- .to_str()?;
let policy = params
.get("policy")
.ok_or_bad_request("No policy was provided")?
.to_str()?;
- let signature = params
- .get("x-amz-signature")
- .ok_or_bad_request("No signature was provided")?
- .to_str()?;
- let date = params
- .get("x-amz-date")
- .ok_or_bad_request("No date was provided")?
- .to_str()?;
+ let authorization = Authorization::parse_form(&params)?;
let key = if key.contains("${filename}") {
// if no filename is provided, don't replace. This matches the behavior of AWS.
@@ -116,16 +105,7 @@ pub async fn handle_post_object(
key.to_owned()
};
- let date = parse_date(date)?;
- let api_key = verify_v4(
- &garage,
- "s3",
- credential,
- &date,
- signature,
- policy.as_bytes(),
- )
- .await?;
+ let api_key = verify_v4(&garage, "s3", &authorization, policy.as_bytes()).await?;
let bucket_id = garage
.bucket_helper()
diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs
index fdfa567d..489f1136 100644
--- a/src/api/s3/put.rs
+++ b/src/api/s3/put.rs
@@ -3,10 +3,13 @@ use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*;
+use futures::stream::FuturesOrdered;
use futures::try_join;
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
use sha2::Sha256;
+use tokio::sync::mpsc;
+
use hyper::body::Bytes;
use hyper::header::{HeaderMap, HeaderValue};
use hyper::{Request, Response};
@@ -17,6 +20,7 @@ use opentelemetry::{
};
use garage_net::bytes_buf::BytesBuf;
+use garage_rpc::rpc_helper::OrderTag;
use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*;
@@ -35,6 +39,8 @@ use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
+const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
+
pub async fn handle_put(
garage: Arc<Garage>,
req: Request<ReqBody>,
@@ -168,17 +174,8 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage.version_table.insert(&version).await?;
// Transfer data and verify checksum
- let first_block_hash = async_blake2sum(first_block.clone()).await;
-
- let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
- &garage,
- &version,
- 1,
- first_block,
- first_block_hash,
- &mut chunker,
- )
- .await?;
+ let (total_size, data_md5sum, data_sha256sum, first_block_hash) =
+ read_and_put_blocks(&garage, &version, 1, first_block, &mut chunker).await?;
ensure_checksum_matches(
data_md5sum.as_slice(),
@@ -299,84 +296,164 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
version: &Version,
part_number: u64,
first_block: Bytes,
- first_block_hash: Hash,
chunker: &mut StreamChunker<S>,
-) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash), Error> {
+) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage");
- let md5hasher = AsyncHasher::<Md5>::new();
- let sha256hasher = AsyncHasher::<Sha256>::new();
+ let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
+ let read_blocks = async {
+ block_tx.send(Ok(first_block)).await?;
+ loop {
+ let res = chunker
+ .next()
+ .with_context(Context::current_with_span(
+ tracer.start("Read block from client"),
+ ))
+ .await;
+ match res {
+ Ok(Some(block)) => block_tx.send(Ok(block)).await?,
+ Ok(None) => break,
+ Err(e) => {
+ block_tx.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx);
+ Ok::<_, mpsc::error::SendError<_>>(())
+ };
- futures::future::join(
- md5hasher.update(first_block.clone()),
- sha256hasher.update(first_block.clone()),
- )
- .with_context(Context::current_with_span(
- tracer.start("Hash first block (md5, sha256)"),
- ))
- .await;
+ let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
+ let hash_stream = async {
+ let md5hasher = AsyncHasher::<Md5>::new();
+ let sha256hasher = AsyncHasher::<Sha256>::new();
+ while let Some(next) = block_rx.recv().await {
+ match next {
+ Ok(block) => {
+ block_tx2.send(Ok(block.clone())).await?;
+ futures::future::join(
+ md5hasher.update(block.clone()),
+ sha256hasher.update(block.clone()),
+ )
+ .with_context(Context::current_with_span(
+ tracer.start("Hash block (md5, sha256)"),
+ ))
+ .await;
+ }
+ Err(e) => {
+ block_tx2.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx2);
+ Ok::<_, mpsc::error::SendError<_>>(futures::join!(
+ md5hasher.finalize(),
+ sha256hasher.finalize()
+ ))
+ };
- let mut next_offset = first_block.len();
- let mut put_curr_version_block = put_block_meta(
- garage,
- version,
- part_number,
- 0,
- first_block_hash,
- first_block.len() as u64,
- );
- let mut put_curr_block = garage
- .block_manager
- .rpc_put_block(first_block_hash, first_block);
-
- loop {
- let (_, _, next_block) = futures::try_join!(
- put_curr_block.map_err(Error::from),
- put_curr_version_block.map_err(Error::from),
- chunker.next(),
- )?;
- if let Some(block) = next_block {
- let (_, _, block_hash) = futures::future::join3(
- md5hasher.update(block.clone()),
- sha256hasher.update(block.clone()),
- async_blake2sum(block.clone()),
- )
- .with_context(Context::current_with_span(
- tracer.start("Hash block (md5, sha256, blake2)"),
- ))
- .await;
- let block_len = block.len();
- put_curr_version_block = put_block_meta(
+ let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, Hash), Error>>(1);
+ let hash_blocks = async {
+ let mut first_block_hash = None;
+ while let Some(next) = block_rx2.recv().await {
+ match next {
+ Ok(block) => {
+ let hash = async_blake2sum(block.clone())
+ .with_context(Context::current_with_span(
+ tracer.start("Hash block (blake2)"),
+ ))
+ .await;
+ if first_block_hash.is_none() {
+ first_block_hash = Some(hash);
+ }
+ block_tx3.send(Ok((block, hash))).await?;
+ }
+ Err(e) => {
+ block_tx3.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx3);
+ Ok::<_, mpsc::error::SendError<_>>(first_block_hash.unwrap())
+ };
+
+ let put_blocks = async {
+ // Structure for handling several concurrent writes to storage nodes
+ let order_stream = OrderTag::stream();
+ let mut write_futs = FuturesOrdered::new();
+ let mut written_bytes = 0u64;
+ loop {
+ // Simultaneously write blocks to storage nodes & await for next block to be written
+ let currently_running = write_futs.len();
+ let write_futs_next = async {
+ if write_futs.is_empty() {
+ futures::future::pending().await
+ } else {
+ write_futs.next().await.unwrap()
+ }
+ };
+ let recv_next = async {
+ // If more than a maximum number of writes are in progress, don't add more for now
+ if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
+ futures::future::pending().await
+ } else {
+ block_rx3.recv().await
+ }
+ };
+ let (block, hash) = tokio::select! {
+ result = write_futs_next => {
+ result?;
+ continue;
+ },
+ recv = recv_next => match recv {
+ Some(next) => next?,
+ None => break,
+ },
+ };
+
+ // For next block to be written: count its size and spawn future to write it
+ let offset = written_bytes;
+ written_bytes += block.len() as u64;
+ write_futs.push_back(put_block_and_meta(
garage,
version,
part_number,
- next_offset as u64,
- block_hash,
- block_len as u64,
- );
- put_curr_block = garage.block_manager.rpc_put_block(block_hash, block);
- next_offset += block_len;
- } else {
- break;
+ offset,
+ hash,
+ block,
+ order_stream.order(written_bytes),
+ ));
}
- }
+ while let Some(res) = write_futs.next().await {
+ res?;
+ }
+ Ok::<_, Error>(written_bytes)
+ };
+
+ let (_, stream_hash_result, block_hash_result, final_result) =
+ futures::join!(read_blocks, hash_stream, hash_blocks, put_blocks);
- let total_size = next_offset as u64;
- let data_md5sum = md5hasher.finalize().await;
+ let total_size = final_result?;
+ // unwrap here is ok, because if hasher failed, it is because something failed
+ // later in the pipeline which already caused a return at the ? on previous line
+ let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
+ let first_block_hash = block_hash_result.unwrap();
- let data_sha256sum = sha256hasher.finalize().await;
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
- Ok((total_size, data_md5sum, data_sha256sum))
+ Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
}
-async fn put_block_meta(
+async fn put_block_and_meta(
garage: &Garage,
version: &Version,
part_number: u64,
offset: u64,
hash: Hash,
- size: u64,
+ block: Bytes,
+ order_tag: OrderTag,
) -> Result<(), GarageError> {
let mut version = version.clone();
version.blocks.put(
@@ -384,7 +461,10 @@ async fn put_block_meta(
part_number,
offset,
},
- VersionBlock { hash, size },
+ VersionBlock {
+ hash,
+ size: block.len() as u64,
+ },
);
let block_ref = BlockRef {
@@ -394,6 +474,9 @@ async fn put_block_meta(
};
futures::try_join!(
+ garage
+ .block_manager
+ .rpc_put_block(hash, block, Some(order_tag)),
garage.version_table.insert(&version),
garage.block_ref_table.insert(&block_ref),
)?;
diff --git a/src/api/signature/mod.rs b/src/api/signature/mod.rs
index 4b8b990f..6514da43 100644
--- a/src/api/signature/mod.rs
+++ b/src/api/signature/mod.rs
@@ -2,19 +2,44 @@ use chrono::{DateTime, Utc};
use hmac::{Hmac, Mac};
use sha2::Sha256;
+use hyper::{body::Incoming as IncomingBody, Request};
+
+use garage_model::garage::Garage;
+use garage_model::key_table::Key;
use garage_util::data::{sha256sum, Hash};
+use error::*;
+
pub mod error;
pub mod payload;
pub mod streaming;
-use error::*;
-
pub const SHORT_DATE: &str = "%Y%m%d";
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
type HmacSha256 = Hmac<Sha256>;
+pub async fn verify_request(
+ garage: &Garage,
+ mut req: Request<IncomingBody>,
+ service: &'static str,
+) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
+ let (api_key, mut content_sha256) =
+ payload::check_payload_signature(&garage, &mut req, service).await?;
+ let api_key =
+ api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
+
+ let req = streaming::parse_streaming_body(
+ &api_key,
+ req,
+ &mut content_sha256,
+ &garage.config.s3_api.s3_region,
+ service,
+ )?;
+
+ Ok((req, api_key, content_sha256))
+}
+
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
if expected_sha256 != sha256sum(body) {
return Err(Error::bad_request(
diff --git a/src/api/signature/payload.rs b/src/api/signature/payload.rs
index 423aad93..0029716a 100644
--- a/src/api/signature/payload.rs
+++ b/src/api/signature/payload.rs
@@ -1,7 +1,9 @@
use std::collections::HashMap;
+use std::convert::TryFrom;
use chrono::{DateTime, Duration, NaiveDateTime, TimeZone, Utc};
use hmac::Mac;
+use hyper::header::{HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, HOST};
use hyper::{body::Incoming as IncomingBody, Method, Request};
use sha2::{Digest, Sha256};
@@ -17,66 +19,92 @@ use super::{compute_scope, signing_hmac};
use crate::encoding::uri_encode;
use crate::signature::error::*;
+pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
+pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
+pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
+pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
+pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
+pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
+pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
+
+pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
+pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
+pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
+
+pub type QueryMap = HashMap<String, String>;
+
pub async fn check_payload_signature(
garage: &Garage,
+ request: &mut Request<IncomingBody>,
service: &'static str,
- request: &Request<IncomingBody>,
) -> Result<(Option<Key>, Option<Hash>), Error> {
- let mut headers = HashMap::new();
- for (key, val) in request.headers() {
- headers.insert(key.to_string(), val.to_str()?.to_string());
- }
- if let Some(query) = request.uri().query() {
- let query_pairs = url::form_urlencoded::parse(query.as_bytes());
- for (key, val) in query_pairs {
- headers.insert(key.to_lowercase(), val.to_string());
- }
- }
-
- let authorization = if let Some(authorization) = headers.get("authorization") {
- parse_authorization(authorization, &headers)?
- } else if let Some(algorithm) = headers.get("x-amz-algorithm") {
- parse_query_authorization(algorithm, &headers)?
+ let query = parse_query_map(request.uri())?;
+
+ if query.contains_key(X_AMZ_ALGORITHM.as_str()) {
+ // We check for presigned-URL-style authentification first, because
+ // the browser or someting else could inject an Authorization header
+ // that is totally unrelated to AWS signatures.
+ check_presigned_signature(garage, service, request, query).await
+ } else if request.headers().contains_key(AUTHORIZATION) {
+ check_standard_signature(garage, service, request, query).await
} else {
- let content_sha256 = headers.get("x-amz-content-sha256");
- if let Some(content_sha256) = content_sha256.filter(|c| "UNSIGNED-PAYLOAD" != c.as_str()) {
+ // Unsigned (anonymous) request
+ let content_sha256 = request
+ .headers()
+ .get("x-amz-content-sha256")
+ .filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
+ if let Some(content_sha256) = content_sha256 {
let sha256 = hex::decode(content_sha256)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid content sha256 hash")?;
- return Ok((None, Some(sha256)));
+ Ok((None, Some(sha256)))
} else {
- return Ok((None, None));
+ Ok((None, None))
}
- };
+ }
+}
+
+async fn check_standard_signature(
+ garage: &Garage,
+ service: &'static str,
+ request: &Request<IncomingBody>,
+ query: QueryMap,
+) -> Result<(Option<Key>, Option<Hash>), Error> {
+ let authorization = Authorization::parse_header(request.headers())?;
+
+ // Verify that all necessary request headers are included in signed_headers
+ // The following must be included for all signatures:
+ // - the Host header (mandatory)
+ // - all x-amz-* headers used in the request
+ // AWS also indicates that the Content-Type header should be signed if
+ // it is used, but Minio client doesn't sign it so we don't check it for compatibility.
+ let signed_headers = split_signed_headers(&authorization)?;
+ verify_signed_headers(request.headers(), &signed_headers)?;
let canonical_request = canonical_request(
service,
request.method(),
- request.uri(),
- &headers,
- &authorization.signed_headers,
+ request.uri().path(),
+ &query,
+ request.headers(),
+ &signed_headers,
&authorization.content_sha256,
+ )?;
+ let string_to_sign = string_to_sign(
+ &authorization.date,
+ &authorization.scope,
+ &canonical_request,
);
- let (_, scope) = parse_credential(&authorization.credential)?;
- let string_to_sign = string_to_sign(&authorization.date, &scope, &canonical_request);
trace!("canonical request:\n{}", canonical_request);
trace!("string to sign:\n{}", string_to_sign);
- let key = verify_v4(
- garage,
- service,
- &authorization.credential,
- &authorization.date,
- &authorization.signature,
- string_to_sign.as_bytes(),
- )
- .await?;
+ let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
- let content_sha256 = if authorization.content_sha256 == "UNSIGNED-PAYLOAD" {
+ let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
None
- } else if authorization.content_sha256 == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" {
+ } else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
} else {
@@ -88,124 +116,96 @@ pub async fn check_payload_signature(
Ok((Some(key), content_sha256))
}
-struct Authorization {
- credential: String,
- signed_headers: String,
- signature: String,
- content_sha256: String,
- date: DateTime<Utc>,
-}
-
-fn parse_authorization(
- authorization: &str,
- headers: &HashMap<String, String>,
-) -> Result<Authorization, Error> {
- let first_space = authorization
- .find(' ')
- .ok_or_bad_request("Authorization field to short")?;
- let (auth_kind, rest) = authorization.split_at(first_space);
-
- if auth_kind != "AWS4-HMAC-SHA256" {
- return Err(Error::bad_request("Unsupported authorization method"));
- }
-
- let mut auth_params = HashMap::new();
- for auth_part in rest.split(',') {
- let auth_part = auth_part.trim();
- let eq = auth_part
- .find('=')
- .ok_or_bad_request("Field without value in authorization header")?;
- let (key, value) = auth_part.split_at(eq);
- auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
- }
-
- let cred = auth_params
- .get("Credential")
- .ok_or_bad_request("Could not find Credential in Authorization field")?;
-
- let content_sha256 = headers
- .get("x-amz-content-sha256")
- .ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
-
- let date = headers
- .get("x-amz-date")
- .ok_or_bad_request("Missing X-Amz-Date field")
- .map_err(Error::from)
- .and_then(|d| parse_date(d))?;
+async fn check_presigned_signature(
+ garage: &Garage,
+ service: &'static str,
+ request: &mut Request<IncomingBody>,
+ mut query: QueryMap,
+) -> Result<(Option<Key>, Option<Hash>), Error> {
+ let algorithm = query.get(X_AMZ_ALGORITHM.as_str()).unwrap();
+ let authorization = Authorization::parse_presigned(algorithm, &query)?;
+
+ // Verify that all necessary request headers are included in signed_headers
+ // For AWSv4 pre-signed URLs, the following must be incldued:
+ // - the Host header (mandatory)
+ // - all x-amz-* headers used in the request
+ let signed_headers = split_signed_headers(&authorization)?;
+ verify_signed_headers(request.headers(), &signed_headers)?;
+
+ // The X-Amz-Signature value is passed as a query parameter,
+ // but the signature cannot be computed from a string that contains itself.
+ // AWS specifies that all query params except X-Amz-Signature are included
+ // in the canonical request.
+ query.remove(X_AMZ_SIGNATURE.as_str());
+ let canonical_request = canonical_request(
+ service,
+ request.method(),
+ request.uri().path(),
+ &query,
+ request.headers(),
+ &signed_headers,
+ &authorization.content_sha256,
+ )?;
+ let string_to_sign = string_to_sign(
+ &authorization.date,
+ &authorization.scope,
+ &canonical_request,
+ );
- if Utc::now() - date > Duration::hours(24) {
- return Err(Error::bad_request("Date is too old".to_string()));
+ trace!("canonical request (presigned url):\n{}", canonical_request);
+ trace!("string to sign (presigned url):\n{}", string_to_sign);
+
+ let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
+
+ // In the page on presigned URLs, AWS specifies that if a signed query
+ // parameter and a signed header of the same name have different values,
+ // then an InvalidRequest error is raised.
+ let headers_mut = request.headers_mut();
+ for (name, value) in query.iter() {
+ let name =
+ HeaderName::from_bytes(name.as_bytes()).ok_or_bad_request("Invalid header name")?;
+ if let Some(existing) = headers_mut.get(&name) {
+ if signed_headers.contains(&name) && existing.as_bytes() != value.as_bytes() {
+ return Err(Error::bad_request(format!(
+ "Conflicting values for `{}` in query parameters and request headers",
+ name
+ )));
+ }
+ }
+ if name.as_str().starts_with("x-amz-") {
+ // Query parameters that start by x-amz- are actually intended to stand in for
+ // headers that can't be added at the time the request is made.
+ // What we do is just add them to the Request object as regular headers,
+ // that will be handled downstream as if they were included like in a normal request.
+ // (Here we allow such query parameters to override headers with the same name
+ // that are not signed, however there is not much reason that this would happen)
+ headers_mut.insert(
+ name,
+ HeaderValue::from_bytes(value.as_bytes())
+ .ok_or_bad_request("invalid query parameter value")?,
+ );
+ }
}
- let auth = Authorization {
- credential: cred.to_string(),
- signed_headers: auth_params
- .get("SignedHeaders")
- .ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
- .to_string(),
- signature: auth_params
- .get("Signature")
- .ok_or_bad_request("Could not find Signature in Authorization field")?
- .to_string(),
- content_sha256: content_sha256.to_string(),
- date,
- };
- Ok(auth)
+ // Presigned URLs always use UNSIGNED-PAYLOAD,
+ // so there is no sha256 hash to return.
+ Ok((Some(key), None))
}
-fn parse_query_authorization(
- algorithm: &str,
- headers: &HashMap<String, String>,
-) -> Result<Authorization, Error> {
- if algorithm != "AWS4-HMAC-SHA256" {
- return Err(Error::bad_request(
- "Unsupported authorization method".to_string(),
- ));
- }
-
- let cred = headers
- .get("x-amz-credential")
- .ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
- let signed_headers = headers
- .get("x-amz-signedheaders")
- .ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
- let signature = headers
- .get("x-amz-signature")
- .ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
- let content_sha256 = headers
- .get("x-amz-content-sha256")
- .map(|x| x.as_str())
- .unwrap_or("UNSIGNED-PAYLOAD");
-
- let duration = headers
- .get("x-amz-expires")
- .ok_or_bad_request("X-Amz-Expires not found in query parameters")?
- .parse()
- .map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
-
- if duration > 7 * 24 * 3600 {
- return Err(Error::bad_request(
- "X-Amz-Expires may not exceed a week".to_string(),
- ));
- }
-
- let date = headers
- .get("x-amz-date")
- .ok_or_bad_request("Missing X-Amz-Date field")
- .map_err(Error::from)
- .and_then(|d| parse_date(d))?;
-
- if Utc::now() - date > Duration::seconds(duration) {
- return Err(Error::bad_request("Date is too old".to_string()));
+pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
+ let mut query = QueryMap::new();
+ if let Some(query_str) = uri.query() {
+ let query_pairs = url::form_urlencoded::parse(query_str.as_bytes());
+ for (key, val) in query_pairs {
+ if query.insert(key.to_string(), val.into_owned()).is_some() {
+ return Err(Error::bad_request(format!(
+ "duplicate query parameter: `{}`",
+ key
+ )));
+ }
+ }
}
-
- Ok(Authorization {
- credential: cred.to_string(),
- signed_headers: signed_headers.to_string(),
- signature: signature.to_string(),
- content_sha256: content_sha256.to_string(),
- date,
- })
+ Ok(query)
}
fn parse_credential(cred: &str) -> Result<(String, String), Error> {
@@ -219,11 +219,39 @@ fn parse_credential(cred: &str) -> Result<(String, String), Error> {
))
}
+fn split_signed_headers(authorization: &Authorization) -> Result<Vec<HeaderName>, Error> {
+ let mut signed_headers = authorization
+ .signed_headers
+ .split(';')
+ .map(HeaderName::try_from)
+ .collect::<Result<Vec<HeaderName>, _>>()
+ .ok_or_bad_request("invalid header name")?;
+ signed_headers.sort_by(|h1, h2| h1.as_str().cmp(h2.as_str()));
+ Ok(signed_headers)
+}
+
+fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) -> Result<(), Error> {
+ if !signed_headers.contains(&HOST) {
+ return Err(Error::bad_request("Header `Host` should be signed"));
+ }
+ for (name, _) in headers.iter() {
+ if name.as_str().starts_with("x-amz-") {
+ if !signed_headers.contains(name) {
+ return Err(Error::bad_request(format!(
+ "Header `{}` should be signed",
+ name
+ )));
+ }
+ }
+ }
+ Ok(())
+}
+
pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_req: &str) -> String {
let mut hasher = Sha256::default();
hasher.update(canonical_req.as_bytes());
[
- "AWS4-HMAC-SHA256",
+ AWS4_HMAC_SHA256,
&datetime.format(LONG_DATETIME).to_string(),
scope_string,
&hex::encode(hasher.finalize().as_slice()),
@@ -234,11 +262,12 @@ pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_re
pub fn canonical_request(
service: &'static str,
method: &Method,
- uri: &hyper::Uri,
- headers: &HashMap<String, String>,
- signed_headers: &str,
+ canonical_uri: &str,
+ query: &QueryMap,
+ headers: &HeaderMap,
+ signed_headers: &[HeaderName],
content_sha256: &str,
-) -> String {
+) -> Result<String, Error> {
// There seems to be evidence that in AWSv4 signatures, the path component is url-encoded
// a second time when building the canonical request, as specified in this documentation page:
// -> https://docs.aws.amazon.com/rolesanywhere/latest/userguide/authentication-sign-process.html
@@ -268,49 +297,46 @@ pub fn canonical_request(
// it mentions it in the comments (same link to the souce code as above).
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
- let path: std::borrow::Cow<str> = if service != "s3" {
- uri_encode(uri.path(), false).into()
+ let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
+ uri_encode(canonical_uri, false).into()
} else {
- uri.path().into()
+ canonical_uri.into()
};
- [
- method.as_str(),
- &path,
- &canonical_query_string(uri),
- &canonical_header_string(headers, signed_headers),
- "",
- signed_headers,
- content_sha256,
- ]
- .join("\n")
-}
-
-fn canonical_header_string(headers: &HashMap<String, String>, signed_headers: &str) -> String {
- let signed_headers_vec = signed_headers.split(';').collect::<Vec<_>>();
- let mut items = headers
- .iter()
- .filter(|(key, _)| signed_headers_vec.contains(&key.as_str()))
- .collect::<Vec<_>>();
- items.sort_by(|(k1, _), (k2, _)| k1.cmp(k2));
- items
- .iter()
- .map(|(key, value)| key.to_lowercase() + ":" + value.trim())
- .collect::<Vec<_>>()
- .join("\n")
-}
-fn canonical_query_string(uri: &hyper::Uri) -> String {
- if let Some(query) = uri.query() {
- let query_pairs = url::form_urlencoded::parse(query.as_bytes());
- let mut items = query_pairs
- .filter(|(key, _)| key != "X-Amz-Signature")
- .map(|(key, value)| uri_encode(&key, true) + "=" + &uri_encode(&value, true))
- .collect::<Vec<_>>();
+ // Canonical query string from passed HeaderMap
+ let canonical_query_string = {
+ let mut items = Vec::with_capacity(query.len());
+ for (key, value) in query.iter() {
+ items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true));
+ }
items.sort();
items.join("&")
- } else {
- "".to_string()
- }
+ };
+
+ // Canonical header string calculated from signed headers
+ let canonical_header_string = signed_headers
+ .iter()
+ .map(|name| {
+ let value = headers
+ .get(name)
+ .ok_or_bad_request(format!("signed header `{}` is not present", name))?
+ .to_str()?;
+ Ok(format!("{}:{}", name.as_str(), value.trim()))
+ })
+ .collect::<Result<Vec<String>, Error>>()?
+ .join("\n");
+ let signed_headers = signed_headers.join(";");
+
+ let list = [
+ method.as_str(),
+ &canonical_uri,
+ &canonical_query_string,
+ &canonical_header_string,
+ "",
+ &signed_headers,
+ content_sha256,
+ ];
+ Ok(list.join("\n"))
}
pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
@@ -322,38 +348,202 @@ pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
pub async fn verify_v4(
garage: &Garage,
service: &str,
- credential: &str,
- date: &DateTime<Utc>,
- signature: &str,
+ auth: &Authorization,
payload: &[u8],
) -> Result<Key, Error> {
- let (key_id, scope) = parse_credential(credential)?;
-
- let scope_expected = compute_scope(date, &garage.config.s3_api.s3_region, service);
- if scope != scope_expected {
- return Err(Error::AuthorizationHeaderMalformed(scope.to_string()));
+ let scope_expected = compute_scope(&auth.date, &garage.config.s3_api.s3_region, service);
+ if auth.scope != scope_expected {
+ return Err(Error::AuthorizationHeaderMalformed(auth.scope.to_string()));
}
let key = garage
.key_table
- .get(&EmptyKey, &key_id)
+ .get(&EmptyKey, &auth.key_id)
.await?
.filter(|k| !k.state.is_deleted())
- .ok_or_else(|| Error::forbidden(format!("No such key: {}", &key_id)))?;
+ .ok_or_else(|| Error::forbidden(format!("No such key: {}", &auth.key_id)))?;
let key_p = key.params().unwrap();
let mut hmac = signing_hmac(
- date,
+ &auth.date,
&key_p.secret_key,
&garage.config.s3_api.s3_region,
service,
)
.ok_or_internal_error("Unable to build signing HMAC")?;
hmac.update(payload);
- let our_signature = hex::encode(hmac.finalize().into_bytes());
- if signature != our_signature {
- return Err(Error::forbidden("Invalid signature".to_string()));
+ let signature =
+ hex::decode(&auth.signature).map_err(|_| Error::forbidden("Invalid signature"))?;
+ if hmac.verify_slice(&signature).is_err() {
+ return Err(Error::forbidden("Invalid signature"));
}
Ok(key)
}
+
+// ============ Authorization header, or X-Amz-* query params =========
+
+pub struct Authorization {
+ key_id: String,
+ scope: String,
+ signed_headers: String,
+ signature: String,
+ content_sha256: String,
+ date: DateTime<Utc>,
+}
+
+impl Authorization {
+ fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
+ let authorization = headers
+ .get(AUTHORIZATION)
+ .ok_or_bad_request("Missing authorization header")?
+ .to_str()?;
+
+ let (auth_kind, rest) = authorization
+ .split_once(' ')
+ .ok_or_bad_request("Authorization field to short")?;
+
+ if auth_kind != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request("Unsupported authorization method"));
+ }
+
+ let mut auth_params = HashMap::new();
+ for auth_part in rest.split(',') {
+ let auth_part = auth_part.trim();
+ let eq = auth_part
+ .find('=')
+ .ok_or_bad_request("Field without value in authorization header")?;
+ let (key, value) = auth_part.split_at(eq);
+ auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
+ }
+
+ let cred = auth_params
+ .get("Credential")
+ .ok_or_bad_request("Could not find Credential in Authorization field")?;
+ let signed_headers = auth_params
+ .get("SignedHeaders")
+ .ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
+ .to_string();
+ let signature = auth_params
+ .get("Signature")
+ .ok_or_bad_request("Could not find Signature in Authorization field")?
+ .to_string();
+
+ let content_sha256 = headers
+ .get(X_AMZ_CONTENT_SH256)
+ .ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
+
+ let date = headers
+ .get(X_AMZ_DATE)
+ .ok_or_bad_request("Missing X-Amz-Date field")
+ .map_err(Error::from)?
+ .to_str()?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::hours(24) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(cred)?;
+ let auth = Authorization {
+ key_id,
+ scope,
+ signed_headers,
+ signature,
+ content_sha256: content_sha256.to_str()?.to_string(),
+ date,
+ };
+ Ok(auth)
+ }
+
+ fn parse_presigned(algorithm: &str, query: &QueryMap) -> Result<Self, Error> {
+ if algorithm != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request(
+ "Unsupported authorization method".to_string(),
+ ));
+ }
+
+ let cred = query
+ .get(X_AMZ_CREDENTIAL.as_str())
+ .ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
+ let signed_headers = query
+ .get(X_AMZ_SIGNEDHEADERS.as_str())
+ .ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
+ let signature = query
+ .get(X_AMZ_SIGNATURE.as_str())
+ .ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
+
+ let duration = query
+ .get(X_AMZ_EXPIRES.as_str())
+ .ok_or_bad_request("X-Amz-Expires not found in query parameters")?
+ .parse()
+ .map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
+
+ if duration > 7 * 24 * 3600 {
+ return Err(Error::bad_request(
+ "X-Amz-Expires may not exceed a week".to_string(),
+ ));
+ }
+
+ let date = query
+ .get(X_AMZ_DATE.as_str())
+ .ok_or_bad_request("Missing X-Amz-Date field")?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::seconds(duration) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(cred)?;
+ Ok(Authorization {
+ key_id,
+ scope,
+ signed_headers: signed_headers.to_string(),
+ signature: signature.to_string(),
+ content_sha256: UNSIGNED_PAYLOAD.to_string(),
+ date,
+ })
+ }
+
+ pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
+ let algorithm = params
+ .get(X_AMZ_ALGORITHM)
+ .ok_or_bad_request("Missing X-Amz-Algorithm header")?
+ .to_str()?;
+ if algorithm != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request(
+ "Unsupported authorization method".to_string(),
+ ));
+ }
+
+ let credential = params
+ .get(X_AMZ_CREDENTIAL)
+ .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
+ .to_str()?;
+ let signature = params
+ .get(X_AMZ_SIGNATURE)
+ .ok_or_bad_request("No signature was provided")?
+ .to_str()?
+ .to_string();
+ let date = params
+ .get(X_AMZ_DATE)
+ .ok_or_bad_request("No date was provided")?
+ .to_str()?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::hours(24) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(credential)?;
+ let auth = Authorization {
+ key_id,
+ scope,
+ signed_headers: "".to_string(),
+ signature,
+ content_sha256: UNSIGNED_PAYLOAD.to_string(),
+ date,
+ };
+ Ok(auth)
+ }
+}
diff --git a/src/api/signature/streaming.rs b/src/api/signature/streaming.rs
index a2a71f6b..e223d1b1 100644
--- a/src/api/signature/streaming.rs
+++ b/src/api/signature/streaming.rs
@@ -15,6 +15,11 @@ use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
use crate::helpers::*;
use crate::signature::error::*;
+use crate::signature::payload::{
+ STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
+};
+
+pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
pub type ReqBody = BoxBody<Error>;
@@ -25,8 +30,8 @@ pub fn parse_streaming_body(
region: &str,
service: &str,
) -> Result<Request<ReqBody>, Error> {
- match req.headers().get("x-amz-content-sha256") {
- Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
+ match req.headers().get(X_AMZ_CONTENT_SH256) {
+ Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
let signature = content_sha256
.take()
.ok_or_bad_request("No signature provided")?;
@@ -39,7 +44,7 @@ pub fn parse_streaming_body(
let date = req
.headers()
- .get("x-amz-date")
+ .get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?
.to_str()?;
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
@@ -75,7 +80,7 @@ fn compute_streaming_payload_signature(
content_sha256: Hash,
) -> Result<Hash, Error> {
let string_to_sign = [
- "AWS4-HMAC-SHA256-PAYLOAD",
+ AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),