aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock44
-rw-r--r--Cargo.nix175
-rw-r--r--Cargo.toml19
-rw-r--r--doc/book/connect/backup.md4
-rw-r--r--doc/book/cookbook/real-world.md10
-rw-r--r--doc/book/cookbook/reverse-proxy.md29
-rw-r--r--doc/book/quick-start/_index.md3
-rw-r--r--doc/book/reference-manual/configuration.md10
-rw-r--r--doc/book/reference-manual/features.md22
-rw-r--r--script/helm/garage/Chart.yaml2
-rwxr-xr-xscript/test-smoke.sh4
-rw-r--r--src/api/Cargo.toml3
-rw-r--r--src/api/admin/api_server.rs58
-rw-r--r--src/api/k2v/api_server.rs15
-rw-r--r--src/api/s3/api_server.rs15
-rw-r--r--src/api/s3/copy.rs5
-rw-r--r--src/api/s3/multipart.rs14
-rw-r--r--src/api/s3/post_object.rs26
-rw-r--r--src/api/s3/put.rs223
-rw-r--r--src/api/signature/mod.rs29
-rw-r--r--src/api/signature/payload.rs602
-rw-r--r--src/api/signature/streaming.rs13
-rw-r--r--src/block/Cargo.toml2
-rw-r--r--src/block/manager.rs12
-rw-r--r--src/db/Cargo.toml2
-rw-r--r--src/garage/Cargo.toml2
-rw-r--r--src/garage/tests/common/custom_requester.rs83
-rw-r--r--src/garage/tests/s3/streaming_signature.rs2
-rw-r--r--src/model/Cargo.toml2
-rw-r--r--src/net/Cargo.toml4
-rw-r--r--src/rpc/Cargo.toml2
-rw-r--r--src/table/Cargo.toml2
-rw-r--r--src/util/Cargo.toml2
-rw-r--r--src/web/Cargo.toml2
34 files changed, 932 insertions, 510 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 38cc5e1f..b2956559 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -121,6 +121,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"
[[package]]
+name = "argon2"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072"
+dependencies = [
+ "base64ct",
+ "blake2",
+ "cpufeatures",
+ "password-hash",
+]
+
+[[package]]
name = "arrayvec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1265,7 +1277,7 @@ dependencies = [
[[package]]
name = "garage"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"assert-json-diff",
"async-trait",
@@ -1319,8 +1331,9 @@ dependencies = [
[[package]]
name = "garage_api"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
+ "argon2",
"async-trait",
"base64 0.21.7",
"bytes",
@@ -1367,7 +1380,7 @@ dependencies = [
[[package]]
name = "garage_block"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-compression",
@@ -1394,7 +1407,7 @@ dependencies = [
[[package]]
name = "garage_db"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"err-derive",
"heed",
@@ -1407,7 +1420,7 @@ dependencies = [
[[package]]
name = "garage_model"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-trait",
@@ -1435,7 +1448,7 @@ dependencies = [
[[package]]
name = "garage_net"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-trait",
@@ -1461,7 +1474,7 @@ dependencies = [
[[package]]
name = "garage_rpc"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-trait",
@@ -1496,7 +1509,7 @@ dependencies = [
[[package]]
name = "garage_table"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-trait",
@@ -1518,7 +1531,7 @@ dependencies = [
[[package]]
name = "garage_util"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"arc-swap",
"async-trait",
@@ -1552,7 +1565,7 @@ dependencies = [
[[package]]
name = "garage_web"
-version = "0.9.1"
+version = "0.9.2"
dependencies = [
"err-derive",
"futures",
@@ -2800,6 +2813,17 @@ dependencies = [
]
[[package]]
+name = "password-hash"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
+dependencies = [
+ "base64ct",
+ "rand_core",
+ "subtle",
+]
+
+[[package]]
name = "paste"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.nix b/Cargo.nix
index dfe01bfe..f976d2e6 100644
--- a/Cargo.nix
+++ b/Cargo.nix
@@ -34,7 +34,7 @@ args@{
ignoreLockHash,
}:
let
- nixifiedLockHash = "9377d18da3b48658f9d8b2070db135db2d9ac6d9c692d6656948b765348498cc";
+ nixifiedLockHash = "3e1e0730302ee7d1f4185a13ad0086392562eb7cb962b1212801847887beaa47";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash
@@ -58,17 +58,17 @@ in
{
cargo2nixVersion = "0.11.0";
workspace = {
- garage_db = rustPackages.unknown.garage_db."0.9.1";
- garage_util = rustPackages.unknown.garage_util."0.9.1";
- garage_net = rustPackages.unknown.garage_net."0.9.1";
- garage_rpc = rustPackages.unknown.garage_rpc."0.9.1";
+ garage_db = rustPackages.unknown.garage_db."0.9.2";
+ garage_util = rustPackages.unknown.garage_util."0.9.2";
+ garage_net = rustPackages.unknown.garage_net."0.9.2";
+ garage_rpc = rustPackages.unknown.garage_rpc."0.9.2";
format_table = rustPackages.unknown.format_table."0.1.1";
- garage_table = rustPackages.unknown.garage_table."0.9.1";
- garage_block = rustPackages.unknown.garage_block."0.9.1";
- garage_model = rustPackages.unknown.garage_model."0.9.1";
- garage_api = rustPackages.unknown.garage_api."0.9.1";
- garage_web = rustPackages.unknown.garage_web."0.9.1";
- garage = rustPackages.unknown.garage."0.9.1";
+ garage_table = rustPackages.unknown.garage_table."0.9.2";
+ garage_block = rustPackages.unknown.garage_block."0.9.2";
+ garage_model = rustPackages.unknown.garage_model."0.9.2";
+ garage_api = rustPackages.unknown.garage_api."0.9.2";
+ garage_web = rustPackages.unknown.garage_web."0.9.2";
+ garage = rustPackages.unknown.garage."0.9.2";
k2v-client = rustPackages.unknown.k2v-client."0.0.4";
};
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.21.0" = overridableMkRustCrate (profileName: rec {
@@ -235,6 +235,25 @@ in
src = fetchCratesIo { inherit name version; sha256 = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6"; };
});
+ "registry+https://github.com/rust-lang/crates.io-index".argon2."0.5.3" = overridableMkRustCrate (profileName: rec {
+ name = "argon2";
+ version = "0.5.3";
+ registry = "registry+https://github.com/rust-lang/crates.io-index";
+ src = fetchCratesIo { inherit name version; sha256 = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072"; };
+ features = builtins.concatLists [
+ [ "alloc" ]
+ [ "default" ]
+ [ "password-hash" ]
+ [ "rand" ]
+ ];
+ dependencies = {
+ base64ct = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64ct."1.6.0" { inherit profileName; }).out;
+ blake2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.10.6" { inherit profileName; }).out;
+ ${ if hostPlatform.parsed.cpu.name == "i686" || hostPlatform.parsed.cpu.name == "x86_64" then "cpufeatures" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".cpufeatures."0.2.12" { inherit profileName; }).out;
+ password_hash = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".password-hash."0.5.0" { inherit profileName; }).out;
+ };
+ });
+
"registry+https://github.com/rust-lang/crates.io-index".arrayvec."0.5.2" = overridableMkRustCrate (profileName: rec {
name = "arrayvec";
version = "0.5.2";
@@ -1852,9 +1871,9 @@ in
};
});
- "unknown".garage."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage");
features = builtins.concatLists [
@@ -1881,15 +1900,15 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_api = (rustPackages."unknown".garage_api."0.9.1" { inherit profileName; }).out;
- garage_block = (rustPackages."unknown".garage_block."0.9.1" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_model = (rustPackages."unknown".garage_model."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
- garage_rpc = (rustPackages."unknown".garage_rpc."0.9.1" { inherit profileName; }).out;
- garage_table = (rustPackages."unknown".garage_table."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
- garage_web = (rustPackages."unknown".garage_web."0.9.1" { inherit profileName; }).out;
+ garage_api = (rustPackages."unknown".garage_api."0.9.2" { inherit profileName; }).out;
+ garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
+ garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out;
+ garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
+ garage_web = (rustPackages."unknown".garage_web."0.9.2" { inherit profileName; }).out;
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.9" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
@@ -1927,9 +1946,9 @@ in
};
});
- "unknown".garage_api."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_api."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_api";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api");
features = builtins.concatLists [
@@ -1939,6 +1958,7 @@ in
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
];
dependencies = {
+ argon2 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".argon2."0.5.3" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.77" { profileName = "__noProfile"; }).out;
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.21.7" { inherit profileName; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
@@ -1948,12 +1968,12 @@ in
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.2.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_block = (rustPackages."unknown".garage_block."0.9.1" { inherit profileName; }).out;
- garage_model = (rustPackages."unknown".garage_model."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
- garage_rpc = (rustPackages."unknown".garage_rpc."0.9.1" { inherit profileName; }).out;
- garage_table = (rustPackages."unknown".garage_table."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out;
+ garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
+ garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out;
+ garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
@@ -1984,9 +2004,9 @@ in
};
});
- "unknown".garage_block."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_block."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_block";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/block");
features = builtins.concatLists [
@@ -2000,11 +2020,11 @@ in
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.3.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
- garage_rpc = (rustPackages."unknown".garage_rpc."0.9.1" { inherit profileName; }).out;
- garage_table = (rustPackages."unknown".garage_table."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
+ garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out;
+ garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
@@ -2017,9 +2037,9 @@ in
};
});
- "unknown".garage_db."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_db."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_db";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/db");
features = builtins.concatLists [
@@ -2044,9 +2064,9 @@ in
};
});
- "unknown".garage_model."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_model."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_model";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/model");
features = builtins.concatLists [
@@ -2065,12 +2085,12 @@ in
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_block = (rustPackages."unknown".garage_block."0.9.1" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
- garage_rpc = (rustPackages."unknown".garage_rpc."0.9.1" { inherit profileName; }).out;
- garage_table = (rustPackages."unknown".garage_table."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_block = (rustPackages."unknown".garage_block."0.9.2" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
+ garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out;
+ garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
@@ -2082,9 +2102,9 @@ in
};
});
- "unknown".garage_net."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_net."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_net";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/net");
features = builtins.concatLists [
@@ -2119,9 +2139,9 @@ in
};
});
- "unknown".garage_rpc."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_rpc."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
features = builtins.concatLists [
@@ -2143,9 +2163,9 @@ in
format_table = (rustPackages."unknown".format_table."0.1.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.4.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.12.1" { inherit profileName; }).out;
@@ -2167,9 +2187,9 @@ in
};
});
- "unknown".garage_table."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_table."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_table";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = {
@@ -2178,9 +2198,9 @@ in
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.5.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.30" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_rpc = (rustPackages."unknown".garage_rpc."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_rpc = (rustPackages."unknown".garage_rpc."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
@@ -2192,9 +2212,9 @@ in
};
});
- "unknown".garage_util."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_util."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_util";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util");
features = builtins.concatLists [
@@ -2210,8 +2230,8 @@ in
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.7" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
- garage_db = (rustPackages."unknown".garage_db."0.9.1" { inherit profileName; }).out;
- garage_net = (rustPackages."unknown".garage_net."0.9.1" { inherit profileName; }).out;
+ garage_db = (rustPackages."unknown".garage_db."0.9.2" { inherit profileName; }).out;
+ garage_net = (rustPackages."unknown".garage_net."0.9.2" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
@@ -2236,18 +2256,18 @@ in
};
});
- "unknown".garage_web."0.9.1" = overridableMkRustCrate (profileName: rec {
+ "unknown".garage_web."0.9.2" = overridableMkRustCrate (profileName: rec {
name = "garage_web";
- version = "0.9.1";
+ version = "0.9.2";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.30" { inherit profileName; }).out;
- garage_api = (rustPackages."unknown".garage_api."0.9.1" { inherit profileName; }).out;
- garage_model = (rustPackages."unknown".garage_model."0.9.1" { inherit profileName; }).out;
- garage_table = (rustPackages."unknown".garage_table."0.9.1" { inherit profileName; }).out;
- garage_util = (rustPackages."unknown".garage_util."0.9.1" { inherit profileName; }).out;
+ garage_api = (rustPackages."unknown".garage_api."0.9.2" { inherit profileName; }).out;
+ garage_model = (rustPackages."unknown".garage_model."0.9.2" { inherit profileName; }).out;
+ garage_table = (rustPackages."unknown".garage_table."0.9.2" { inherit profileName; }).out;
+ garage_util = (rustPackages."unknown".garage_util."0.9.2" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."1.0.0" { inherit profileName; }).out;
http_body_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-body-util."0.1.0" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."1.1.0" { inherit profileName; }).out;
@@ -3989,6 +4009,23 @@ in
};
});
+ "registry+https://github.com/rust-lang/crates.io-index".password-hash."0.5.0" = overridableMkRustCrate (profileName: rec {
+ name = "password-hash";
+ version = "0.5.0";
+ registry = "registry+https://github.com/rust-lang/crates.io-index";
+ src = fetchCratesIo { inherit name version; sha256 = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"; };
+ features = builtins.concatLists [
+ [ "alloc" ]
+ [ "default" ]
+ [ "rand_core" ]
+ ];
+ dependencies = {
+ base64ct = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64ct."1.6.0" { inherit profileName; }).out;
+ rand_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand_core."0.6.4" { inherit profileName; }).out;
+ subtle = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".subtle."2.5.0" { inherit profileName; }).out;
+ };
+ });
+
"registry+https://github.com/rust-lang/crates.io-index".paste."1.0.14" = overridableMkRustCrate (profileName: rec {
name = "paste";
version = "1.0.14";
diff --git a/Cargo.toml b/Cargo.toml
index b7ffcfc5..59d1c26b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -21,19 +21,20 @@ default-members = ["src/garage"]
# Internal Garage crates
format_table = { version = "0.1.1", path = "src/format-table" }
-garage_api = { version = "0.9.1", path = "src/api" }
-garage_block = { version = "0.9.1", path = "src/block" }
-garage_db = { version = "0.9.1", path = "src/db", default-features = false }
-garage_model = { version = "0.9.1", path = "src/model", default-features = false }
-garage_net = { version = "0.9.1", path = "src/net" }
-garage_rpc = { version = "0.9.1", path = "src/rpc" }
-garage_table = { version = "0.9.1", path = "src/table" }
-garage_util = { version = "0.9.1", path = "src/util" }
-garage_web = { version = "0.9.1", path = "src/web" }
+garage_api = { version = "0.9.2", path = "src/api" }
+garage_block = { version = "0.9.2", path = "src/block" }
+garage_db = { version = "0.9.2", path = "src/db", default-features = false }
+garage_model = { version = "0.9.2", path = "src/model", default-features = false }
+garage_net = { version = "0.9.2", path = "src/net" }
+garage_rpc = { version = "0.9.2", path = "src/rpc" }
+garage_table = { version = "0.9.2", path = "src/table" }
+garage_util = { version = "0.9.2", path = "src/util" }
+garage_web = { version = "0.9.2", path = "src/web" }
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
# External crates from crates.io
arc-swap = "1.0"
+argon2 = "0.5"
async-trait = "0.1.7"
backtrace = "0.3"
base64 = "0.21"
diff --git a/doc/book/connect/backup.md b/doc/book/connect/backup.md
index 585ec469..f39cc3b6 100644
--- a/doc/book/connect/backup.md
+++ b/doc/book/connect/backup.md
@@ -55,8 +55,8 @@ Create your key and bucket:
```bash
garage key create my-key
-garage bucket create backup
-garage bucket allow backup --read --write --key my-key
+garage bucket create backups
+garage bucket allow backups --read --write --key my-key
```
Then register your Key ID and Secret key in your environment:
diff --git a/doc/book/cookbook/real-world.md b/doc/book/cookbook/real-world.md
index ce0abddd..5c2dd0f1 100644
--- a/doc/book/cookbook/real-world.md
+++ b/doc/book/cookbook/real-world.md
@@ -85,14 +85,14 @@ to store 2 TB of data in total.
## Get a Docker image
Our docker image is currently named `dxflrs/garage` and is stored on the [Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
-We encourage you to use a fixed tag (eg. `v0.9.1`) and not the `latest` tag.
-For this example, we will use the latest published version at the time of the writing which is `v0.9.1` but it's up to you
+We encourage you to use a fixed tag (eg. `v0.9.2`) and not the `latest` tag.
+For this example, we will use the latest published version at the time of the writing which is `v0.9.2` but it's up to you
to check [the most recent versions on the Docker Hub](https://hub.docker.com/r/dxflrs/garage/tags?page=1&ordering=last_updated).
For example:
```
-sudo docker pull dxflrs/garage:v0.9.1
+sudo docker pull dxflrs/garage:v0.9.2
```
## Deploying and configuring Garage
@@ -157,7 +157,7 @@ docker run \
-v /etc/garage.toml:/etc/garage.toml \
-v /var/lib/garage/meta:/var/lib/garage/meta \
-v /var/lib/garage/data:/var/lib/garage/data \
- dxflrs/garage:v0.9.1
+ dxflrs/garage:v0.9.2
```
With this command line, Garage should be started automatically at each boot.
@@ -171,7 +171,7 @@ If you want to use `docker-compose`, you may use the following `docker-compose.y
version: "3"
services:
garage:
- image: dxflrs/garage:v0.9.1
+ image: dxflrs/garage:v0.9.2
network_mode: "host"
restart: unless-stopped
volumes:
diff --git a/doc/book/cookbook/reverse-proxy.md b/doc/book/cookbook/reverse-proxy.md
index b715193e..bdc1c549 100644
--- a/doc/book/cookbook/reverse-proxy.md
+++ b/doc/book/cookbook/reverse-proxy.md
@@ -472,3 +472,32 @@ https:// {
More information on how this endpoint is implemented in Garage is available
in the [Admin API Reference](@/documentation/reference-manual/admin-api.md) page.
+
+### Fileserver browser
+
+Caddy's built-in
+[file_server](https://caddyserver.com/docs/caddyfile/directives/file_server)
+browser functionality can be extended with the
+[caddy-fs-s3](https://github.com/sagikazarmark/caddy-fs-s3) module.
+
+This can be configured to use Garage as a backend with the following
+configuration:
+
+```caddy
+browse.garage.tld {
+ file_server {
+ fs s3 {
+ bucket test-bucket
+ region garage
+
+ endpoint https://s3.garage.tld
+ use_path_style
+ }
+
+ browse
+ }
+}
+```
+
+Caddy must also be configured with the required `AWS_ACCESS_KEY_ID` and
+`AWS_SECRET_ACCESS_KEY` environment variables to access the bucket.
diff --git a/doc/book/quick-start/_index.md b/doc/book/quick-start/_index.md
index acfefb07..f359843d 100644
--- a/doc/book/quick-start/_index.md
+++ b/doc/book/quick-start/_index.md
@@ -79,8 +79,9 @@ index = "index.html"
api_bind_addr = "[::]:3904"
[admin]
-api_bind_addr = "0.0.0.0:3903"
+api_bind_addr = "[::]:3903"
admin_token = "$(openssl rand -base64 32)"
+metrics_token = "$(openssl rand -base64 32)"
EOF
```
diff --git a/doc/book/reference-manual/configuration.md b/doc/book/reference-manual/configuration.md
index f1474613..af7690f4 100644
--- a/doc/book/reference-manual/configuration.md
+++ b/doc/book/reference-manual/configuration.md
@@ -69,8 +69,8 @@ root_domain = ".web.garage"
[admin]
api_bind_addr = "0.0.0.0:3903"
-metrics_token = "cacce0b2de4bc2d9f5b5fdff551e01ac1496055aed248202d415398987e35f81"
-admin_token = "ae8cb40ea7368bbdbb6430af11cca7da833d3458a5f52086f4e805a570fb5c2a"
+metrics_token = "BCAdFjoa9G0KJR0WXnHHm7fs1ZAbfpI8iIZ+Z/a2NgI="
+admin_token = "UkLeGWEvHnXBqnueR3ISEMWpOnm40jH2tM2HnnL/0F4="
trace_sink = "http://localhost:4317"
```
@@ -417,7 +417,7 @@ the node, even in the case of a NAT: the NAT should be configured to forward the
port number to the same internal port nubmer. This means that if you have several nodes running
behind a NAT, they should each use a different RPC port number.
-#### `rpc_bind_outgoing` {#rpc_bind_outgoing} (since v0.9.2)
+#### `rpc_bind_outgoing`(since v0.9.2) {#rpc_bind_outgoing}
If enabled, pre-bind all sockets for outgoing connections to the same IP address
used for listening (the IP address specified in `rpc_bind_addr`) before
@@ -612,7 +612,7 @@ the socket will have 0220 mode. Make sure to set user and group permissions acco
The token for accessing the Metrics endpoint. If this token is not set, the
Metrics endpoint can be accessed without access control.
-You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
+You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
`metrics_token` was introduced in Garage `v0.7.2`.
`metrics_token_file` and the `GARAGE_METRICS_TOKEN` environment variable are supported since Garage `v0.8.2`.
@@ -624,7 +624,7 @@ You can use any random string for this value. We recommend generating a random t
The token for accessing all of the other administration endpoints. If this
token is not set, access to these endpoints is disabled entirely.
-You can use any random string for this value. We recommend generating a random token with `openssl rand -hex 32`.
+You can use any random string for this value. We recommend generating a random token with `openssl rand -base64 32`.
`admin_token` was introduced in Garage `v0.7.2`.
`admin_token_file` and the `GARAGE_ADMIN_TOKEN` environment variable are supported since Garage `v0.8.2`.
diff --git a/doc/book/reference-manual/features.md b/doc/book/reference-manual/features.md
index e8ba9510..f7014b26 100644
--- a/doc/book/reference-manual/features.md
+++ b/doc/book/reference-manual/features.md
@@ -37,6 +37,21 @@ A Garage cluster can very easily evolve over time, as storage nodes are added or
Garage will automatically rebalance data between nodes as needed to ensure the desired number of copies.
Read about cluster layout management [here](@/documentation/operations/layout.md).
+### Several replication modes
+
+Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
+and with various levels of consistency, in order to adapt to a variety of usage scenarios.
+Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
+to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
+
+### Compression and deduplication
+
+All data stored in Garage is deduplicated, and optionnally compressed using
+Zstd. Objects uploaded to Garage are chunked in blocks of constant sizes (see
+[`block_size`](@/documentation/reference-manual/configuration.md#block_size)),
+and the hashes of individual blocks are used to dispatch them to storage nodes
+and to deduplicate them.
+
### No RAFT slowing you down
It might seem strange to tout the absence of something as a desirable feature,
@@ -48,13 +63,6 @@ As a consequence, requests can be handled much faster, even in cases where laten
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this).
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections.
-### Several replication modes
-
-Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
-and with various levels of consistency, in order to adapt to a variety of usage scenarios.
-Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
-to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
-
### Web server for static websites
A storage bucket can easily be configured to be served directly by Garage as a static web site.
diff --git a/script/helm/garage/Chart.yaml b/script/helm/garage/Chart.yaml
index 31b75c1f..9a21aa8e 100644
--- a/script/helm/garage/Chart.yaml
+++ b/script/helm/garage/Chart.yaml
@@ -21,4 +21,4 @@ version: 0.4.1
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-appVersion: "v0.9.1"
+appVersion: "v0.9.2"
diff --git a/script/test-smoke.sh b/script/test-smoke.sh
index 6965c0f3..9f9ea50c 100755
--- a/script/test-smoke.sh
+++ b/script/test-smoke.sh
@@ -81,11 +81,9 @@ if [ -z "$SKIP_AWS" ]; then
echo "Invalid multipart upload"
exit 1
fi
+ aws s3api delete-object --bucket eprouvette --key upload
fi
-echo "OK!!"
-exit 0
-
# S3CMD
if [ -z "$SKIP_S3CMD" ]; then
echo "🛠️ Testing with s3cmd"
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index bc6b6aa7..40fab769 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_api"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@@ -21,6 +21,7 @@ garage_net.workspace = true
garage_util.workspace = true
garage_rpc.workspace = true
+argon2.workspace = true
async-trait.workspace = true
base64.workspace = true
bytes.workspace = true
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index 50813d11..265639c4 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -1,6 +1,7 @@
use std::collections::HashMap;
use std::sync::Arc;
+use argon2::password_hash::PasswordHash;
use async_trait::async_trait;
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
@@ -45,14 +46,8 @@ impl AdminApiServer {
#[cfg(feature = "metrics")] exporter: PrometheusExporter,
) -> Self {
let cfg = &garage.config.admin;
- let metrics_token = cfg
- .metrics_token
- .as_ref()
- .map(|tok| format!("Bearer {}", tok));
- let admin_token = cfg
- .admin_token
- .as_ref()
- .map(|tok| format!("Bearer {}", tok));
+ let metrics_token = cfg.metrics_token.as_deref().map(hash_bearer_token);
+ let admin_token = cfg.admin_token.as_deref().map(hash_bearer_token);
Self {
garage,
#[cfg(feature = "metrics")]
@@ -248,11 +243,11 @@ impl ApiHandler for AdminApiServer {
req: Request<IncomingBody>,
endpoint: Endpoint,
) -> Result<Response<ResBody>, Error> {
- let expected_auth_header =
+ let required_auth_hash =
match endpoint.authorization_type() {
Authorization::None => None,
- Authorization::MetricsToken => self.metrics_token.as_ref(),
- Authorization::AdminToken => match &self.admin_token {
+ Authorization::MetricsToken => self.metrics_token.as_deref(),
+ Authorization::AdminToken => match self.admin_token.as_deref() {
None => return Err(Error::forbidden(
"Admin token isn't configured, admin API access is disabled for security.",
)),
@@ -260,14 +255,11 @@ impl ApiHandler for AdminApiServer {
},
};
- if let Some(h) = expected_auth_header {
+ if let Some(password_hash) = required_auth_hash {
match req.headers().get("Authorization") {
None => return Err(Error::forbidden("Authorization token must be provided")),
- Some(v) => {
- let authorized = v.to_str().map(|hv| hv.trim() == h).unwrap_or(false);
- if !authorized {
- return Err(Error::forbidden("Invalid authorization token provided"));
- }
+ Some(authorization) => {
+ verify_bearer_token(&authorization, password_hash)?;
}
}
}
@@ -342,3 +334,35 @@ impl ApiEndpoint for Endpoint {
fn add_span_attributes(&self, _span: SpanRef<'_>) {}
}
+
+fn hash_bearer_token(token: &str) -> String {
+ use argon2::{
+ password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
+ Argon2,
+ };
+
+ let salt = SaltString::generate(&mut OsRng);
+ let argon2 = Argon2::default();
+ argon2
+ .hash_password(token.trim().as_bytes(), &salt)
+ .expect("could not hash API token")
+ .to_string()
+}
+
+fn verify_bearer_token(token: &hyper::http::HeaderValue, password_hash: &str) -> Result<(), Error> {
+ use argon2::{password_hash::PasswordVerifier, Argon2};
+
+ let parsed_hash = PasswordHash::new(&password_hash).unwrap();
+
+ token
+ .to_str()?
+ .strip_prefix("Bearer ")
+ .and_then(|token| {
+ Argon2::default()
+ .verify_password(token.trim().as_bytes(), &parsed_hash)
+ .ok()
+ })
+ .ok_or_else(|| Error::forbidden("Invalid authorization token"))?;
+
+ Ok(())
+}
diff --git a/src/api/k2v/api_server.rs b/src/api/k2v/api_server.rs
index e97da2af..fdb5db4c 100644
--- a/src/api/k2v/api_server.rs
+++ b/src/api/k2v/api_server.rs
@@ -15,8 +15,7 @@ use garage_model::garage::Garage;
use crate::generic_server::*;
use crate::k2v::error::*;
-use crate::signature::payload::check_payload_signature;
-use crate::signature::streaming::*;
+use crate::signature::verify_request;
use crate::helpers::*;
use crate::k2v::batch::*;
@@ -86,17 +85,7 @@ impl ApiHandler for K2VApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
- let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
- let api_key = api_key
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
-
- let req = parse_streaming_body(
- &api_key,
- req,
- &mut content_sha256,
- &garage.config.s3_api.s3_region,
- "k2v",
- )?;
+ let (req, api_key, _content_sha256) = verify_request(&garage, req, "k2v").await?;
let bucket_id = garage
.bucket_helper()
diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs
index 08405923..51f19554 100644
--- a/src/api/s3/api_server.rs
+++ b/src/api/s3/api_server.rs
@@ -17,8 +17,7 @@ use garage_model::key_table::Key;
use crate::generic_server::*;
use crate::s3::error::*;
-use crate::signature::payload::check_payload_signature;
-use crate::signature::streaming::*;
+use crate::signature::verify_request;
use crate::helpers::*;
use crate::s3::bucket::*;
@@ -125,17 +124,7 @@ impl ApiHandler for S3ApiServer {
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
}
- let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
- let api_key = api_key
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
-
- let req = parse_streaming_body(
- &api_key,
- req,
- &mut content_sha256,
- &garage.config.s3_api.s3_region,
- "s3",
- )?;
+ let (req, api_key, content_sha256) = verify_request(&garage, req, "s3").await?;
let bucket_name = match bucket_name {
None => {
diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs
index 7eb1fe60..880ce5f4 100644
--- a/src/api/s3/copy.rs
+++ b/src/api/s3/copy.rs
@@ -387,7 +387,10 @@ pub async fn handle_upload_part_copy(
// we need to insert that data as a new block.
async move {
if must_upload {
- garage2.block_manager.rpc_put_block(final_hash, data).await
+ garage2
+ .block_manager
+ .rpc_put_block(final_hash, data, None)
+ .await
} else {
Ok(())
}
diff --git a/src/api/s3/multipart.rs b/src/api/s3/multipart.rs
index b9d15b21..5959bcd6 100644
--- a/src/api/s3/multipart.rs
+++ b/src/api/s3/multipart.rs
@@ -6,7 +6,6 @@ use hyper::{Request, Response};
use md5::{Digest as Md5Digest, Md5};
use garage_table::*;
-use garage_util::async_hash::*;
use garage_util::data::*;
use garage_model::bucket_table::Bucket;
@@ -135,17 +134,8 @@ pub async fn handle_put_part(
garage.version_table.insert(&version).await?;
// Copy data to version
- let first_block_hash = async_blake2sum(first_block.clone()).await;
-
- let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
- &garage,
- &version,
- part_number,
- first_block,
- first_block_hash,
- &mut chunker,
- )
- .await?;
+ let (total_size, data_md5sum, data_sha256sum, _) =
+ read_and_put_blocks(&garage, &version, part_number, first_block, &mut chunker).await?;
// Verify that checksums map
ensure_checksum_matches(
diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs
index bca8d6c6..b542cc1a 100644
--- a/src/api/s3/post_object.rs
+++ b/src/api/s3/post_object.rs
@@ -21,7 +21,7 @@ use crate::s3::cors::*;
use crate::s3::error::*;
use crate::s3::put::{get_headers, save_stream};
use crate::s3::xml as s3_xml;
-use crate::signature::payload::{parse_date, verify_v4};
+use crate::signature::payload::{verify_v4, Authorization};
pub async fn handle_post_object(
garage: Arc<Garage>,
@@ -88,22 +88,11 @@ pub async fn handle_post_object(
.get("key")
.ok_or_bad_request("No key was provided")?
.to_str()?;
- let credential = params
- .get("x-amz-credential")
- .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
- .to_str()?;
let policy = params
.get("policy")
.ok_or_bad_request("No policy was provided")?
.to_str()?;
- let signature = params
- .get("x-amz-signature")
- .ok_or_bad_request("No signature was provided")?
- .to_str()?;
- let date = params
- .get("x-amz-date")
- .ok_or_bad_request("No date was provided")?
- .to_str()?;
+ let authorization = Authorization::parse_form(&params)?;
let key = if key.contains("${filename}") {
// if no filename is provided, don't replace. This matches the behavior of AWS.
@@ -116,16 +105,7 @@ pub async fn handle_post_object(
key.to_owned()
};
- let date = parse_date(date)?;
- let api_key = verify_v4(
- &garage,
- "s3",
- credential,
- &date,
- signature,
- policy.as_bytes(),
- )
- .await?;
+ let api_key = verify_v4(&garage, "s3", &authorization, policy.as_bytes()).await?;
let bucket_id = garage
.bucket_helper()
diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs
index fdfa567d..489f1136 100644
--- a/src/api/s3/put.rs
+++ b/src/api/s3/put.rs
@@ -3,10 +3,13 @@ use std::sync::Arc;
use base64::prelude::*;
use futures::prelude::*;
+use futures::stream::FuturesOrdered;
use futures::try_join;
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
use sha2::Sha256;
+use tokio::sync::mpsc;
+
use hyper::body::Bytes;
use hyper::header::{HeaderMap, HeaderValue};
use hyper::{Request, Response};
@@ -17,6 +20,7 @@ use opentelemetry::{
};
use garage_net::bytes_buf::BytesBuf;
+use garage_rpc::rpc_helper::OrderTag;
use garage_table::*;
use garage_util::async_hash::*;
use garage_util::data::*;
@@ -35,6 +39,8 @@ use crate::helpers::*;
use crate::s3::api_server::{ReqBody, ResBody};
use crate::s3::error::*;
+const PUT_BLOCKS_MAX_PARALLEL: usize = 3;
+
pub async fn handle_put(
garage: Arc<Garage>,
req: Request<ReqBody>,
@@ -168,17 +174,8 @@ pub(crate) async fn save_stream<S: Stream<Item = Result<Bytes, Error>> + Unpin>(
garage.version_table.insert(&version).await?;
// Transfer data and verify checksum
- let first_block_hash = async_blake2sum(first_block.clone()).await;
-
- let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks(
- &garage,
- &version,
- 1,
- first_block,
- first_block_hash,
- &mut chunker,
- )
- .await?;
+ let (total_size, data_md5sum, data_sha256sum, first_block_hash) =
+ read_and_put_blocks(&garage, &version, 1, first_block, &mut chunker).await?;
ensure_checksum_matches(
data_md5sum.as_slice(),
@@ -299,84 +296,164 @@ pub(crate) async fn read_and_put_blocks<S: Stream<Item = Result<Bytes, Error>> +
version: &Version,
part_number: u64,
first_block: Bytes,
- first_block_hash: Hash,
chunker: &mut StreamChunker<S>,
-) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash), Error> {
+) -> Result<(u64, GenericArray<u8, typenum::U16>, Hash, Hash), Error> {
let tracer = opentelemetry::global::tracer("garage");
- let md5hasher = AsyncHasher::<Md5>::new();
- let sha256hasher = AsyncHasher::<Sha256>::new();
+ let (block_tx, mut block_rx) = mpsc::channel::<Result<Bytes, Error>>(2);
+ let read_blocks = async {
+ block_tx.send(Ok(first_block)).await?;
+ loop {
+ let res = chunker
+ .next()
+ .with_context(Context::current_with_span(
+ tracer.start("Read block from client"),
+ ))
+ .await;
+ match res {
+ Ok(Some(block)) => block_tx.send(Ok(block)).await?,
+ Ok(None) => break,
+ Err(e) => {
+ block_tx.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx);
+ Ok::<_, mpsc::error::SendError<_>>(())
+ };
- futures::future::join(
- md5hasher.update(first_block.clone()),
- sha256hasher.update(first_block.clone()),
- )
- .with_context(Context::current_with_span(
- tracer.start("Hash first block (md5, sha256)"),
- ))
- .await;
+ let (block_tx2, mut block_rx2) = mpsc::channel::<Result<Bytes, Error>>(1);
+ let hash_stream = async {
+ let md5hasher = AsyncHasher::<Md5>::new();
+ let sha256hasher = AsyncHasher::<Sha256>::new();
+ while let Some(next) = block_rx.recv().await {
+ match next {
+ Ok(block) => {
+ block_tx2.send(Ok(block.clone())).await?;
+ futures::future::join(
+ md5hasher.update(block.clone()),
+ sha256hasher.update(block.clone()),
+ )
+ .with_context(Context::current_with_span(
+ tracer.start("Hash block (md5, sha256)"),
+ ))
+ .await;
+ }
+ Err(e) => {
+ block_tx2.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx2);
+ Ok::<_, mpsc::error::SendError<_>>(futures::join!(
+ md5hasher.finalize(),
+ sha256hasher.finalize()
+ ))
+ };
- let mut next_offset = first_block.len();
- let mut put_curr_version_block = put_block_meta(
- garage,
- version,
- part_number,
- 0,
- first_block_hash,
- first_block.len() as u64,
- );
- let mut put_curr_block = garage
- .block_manager
- .rpc_put_block(first_block_hash, first_block);
-
- loop {
- let (_, _, next_block) = futures::try_join!(
- put_curr_block.map_err(Error::from),
- put_curr_version_block.map_err(Error::from),
- chunker.next(),
- )?;
- if let Some(block) = next_block {
- let (_, _, block_hash) = futures::future::join3(
- md5hasher.update(block.clone()),
- sha256hasher.update(block.clone()),
- async_blake2sum(block.clone()),
- )
- .with_context(Context::current_with_span(
- tracer.start("Hash block (md5, sha256, blake2)"),
- ))
- .await;
- let block_len = block.len();
- put_curr_version_block = put_block_meta(
+ let (block_tx3, mut block_rx3) = mpsc::channel::<Result<(Bytes, Hash), Error>>(1);
+ let hash_blocks = async {
+ let mut first_block_hash = None;
+ while let Some(next) = block_rx2.recv().await {
+ match next {
+ Ok(block) => {
+ let hash = async_blake2sum(block.clone())
+ .with_context(Context::current_with_span(
+ tracer.start("Hash block (blake2)"),
+ ))
+ .await;
+ if first_block_hash.is_none() {
+ first_block_hash = Some(hash);
+ }
+ block_tx3.send(Ok((block, hash))).await?;
+ }
+ Err(e) => {
+ block_tx3.send(Err(e)).await?;
+ break;
+ }
+ }
+ }
+ drop(block_tx3);
+ Ok::<_, mpsc::error::SendError<_>>(first_block_hash.unwrap())
+ };
+
+ let put_blocks = async {
+ // Structure for handling several concurrent writes to storage nodes
+ let order_stream = OrderTag::stream();
+ let mut write_futs = FuturesOrdered::new();
+ let mut written_bytes = 0u64;
+ loop {
+ // Simultaneously write blocks to storage nodes & await for next block to be written
+ let currently_running = write_futs.len();
+ let write_futs_next = async {
+ if write_futs.is_empty() {
+ futures::future::pending().await
+ } else {
+ write_futs.next().await.unwrap()
+ }
+ };
+ let recv_next = async {
+ // If more than a maximum number of writes are in progress, don't add more for now
+ if currently_running >= PUT_BLOCKS_MAX_PARALLEL {
+ futures::future::pending().await
+ } else {
+ block_rx3.recv().await
+ }
+ };
+ let (block, hash) = tokio::select! {
+ result = write_futs_next => {
+ result?;
+ continue;
+ },
+ recv = recv_next => match recv {
+ Some(next) => next?,
+ None => break,
+ },
+ };
+
+ // For next block to be written: count its size and spawn future to write it
+ let offset = written_bytes;
+ written_bytes += block.len() as u64;
+ write_futs.push_back(put_block_and_meta(
garage,
version,
part_number,
- next_offset as u64,
- block_hash,
- block_len as u64,
- );
- put_curr_block = garage.block_manager.rpc_put_block(block_hash, block);
- next_offset += block_len;
- } else {
- break;
+ offset,
+ hash,
+ block,
+ order_stream.order(written_bytes),
+ ));
}
- }
+ while let Some(res) = write_futs.next().await {
+ res?;
+ }
+ Ok::<_, Error>(written_bytes)
+ };
+
+ let (_, stream_hash_result, block_hash_result, final_result) =
+ futures::join!(read_blocks, hash_stream, hash_blocks, put_blocks);
- let total_size = next_offset as u64;
- let data_md5sum = md5hasher.finalize().await;
+ let total_size = final_result?;
+ // unwrap here is ok, because if hasher failed, it is because something failed
+ // later in the pipeline which already caused a return at the ? on previous line
+ let (data_md5sum, data_sha256sum) = stream_hash_result.unwrap();
+ let first_block_hash = block_hash_result.unwrap();
- let data_sha256sum = sha256hasher.finalize().await;
let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap();
- Ok((total_size, data_md5sum, data_sha256sum))
+ Ok((total_size, data_md5sum, data_sha256sum, first_block_hash))
}
-async fn put_block_meta(
+async fn put_block_and_meta(
garage: &Garage,
version: &Version,
part_number: u64,
offset: u64,
hash: Hash,
- size: u64,
+ block: Bytes,
+ order_tag: OrderTag,
) -> Result<(), GarageError> {
let mut version = version.clone();
version.blocks.put(
@@ -384,7 +461,10 @@ async fn put_block_meta(
part_number,
offset,
},
- VersionBlock { hash, size },
+ VersionBlock {
+ hash,
+ size: block.len() as u64,
+ },
);
let block_ref = BlockRef {
@@ -394,6 +474,9 @@ async fn put_block_meta(
};
futures::try_join!(
+ garage
+ .block_manager
+ .rpc_put_block(hash, block, Some(order_tag)),
garage.version_table.insert(&version),
garage.block_ref_table.insert(&block_ref),
)?;
diff --git a/src/api/signature/mod.rs b/src/api/signature/mod.rs
index 4b8b990f..6514da43 100644
--- a/src/api/signature/mod.rs
+++ b/src/api/signature/mod.rs
@@ -2,19 +2,44 @@ use chrono::{DateTime, Utc};
use hmac::{Hmac, Mac};
use sha2::Sha256;
+use hyper::{body::Incoming as IncomingBody, Request};
+
+use garage_model::garage::Garage;
+use garage_model::key_table::Key;
use garage_util::data::{sha256sum, Hash};
+use error::*;
+
pub mod error;
pub mod payload;
pub mod streaming;
-use error::*;
-
pub const SHORT_DATE: &str = "%Y%m%d";
pub const LONG_DATETIME: &str = "%Y%m%dT%H%M%SZ";
type HmacSha256 = Hmac<Sha256>;
+pub async fn verify_request(
+ garage: &Garage,
+ mut req: Request<IncomingBody>,
+ service: &'static str,
+) -> Result<(Request<streaming::ReqBody>, Key, Option<Hash>), Error> {
+ let (api_key, mut content_sha256) =
+ payload::check_payload_signature(&garage, &mut req, service).await?;
+ let api_key =
+ api_key.ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?;
+
+ let req = streaming::parse_streaming_body(
+ &api_key,
+ req,
+ &mut content_sha256,
+ &garage.config.s3_api.s3_region,
+ service,
+ )?;
+
+ Ok((req, api_key, content_sha256))
+}
+
pub fn verify_signed_content(expected_sha256: Hash, body: &[u8]) -> Result<(), Error> {
if expected_sha256 != sha256sum(body) {
return Err(Error::bad_request(
diff --git a/src/api/signature/payload.rs b/src/api/signature/payload.rs
index 423aad93..0029716a 100644
--- a/src/api/signature/payload.rs
+++ b/src/api/signature/payload.rs
@@ -1,7 +1,9 @@
use std::collections::HashMap;
+use std::convert::TryFrom;
use chrono::{DateTime, Duration, NaiveDateTime, TimeZone, Utc};
use hmac::Mac;
+use hyper::header::{HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, HOST};
use hyper::{body::Incoming as IncomingBody, Method, Request};
use sha2::{Digest, Sha256};
@@ -17,66 +19,92 @@ use super::{compute_scope, signing_hmac};
use crate::encoding::uri_encode;
use crate::signature::error::*;
+pub const X_AMZ_ALGORITHM: HeaderName = HeaderName::from_static("x-amz-algorithm");
+pub const X_AMZ_CREDENTIAL: HeaderName = HeaderName::from_static("x-amz-credential");
+pub const X_AMZ_DATE: HeaderName = HeaderName::from_static("x-amz-date");
+pub const X_AMZ_EXPIRES: HeaderName = HeaderName::from_static("x-amz-expires");
+pub const X_AMZ_SIGNEDHEADERS: HeaderName = HeaderName::from_static("x-amz-signedheaders");
+pub const X_AMZ_SIGNATURE: HeaderName = HeaderName::from_static("x-amz-signature");
+pub const X_AMZ_CONTENT_SH256: HeaderName = HeaderName::from_static("x-amz-content-sha256");
+
+pub const AWS4_HMAC_SHA256: &str = "AWS4-HMAC-SHA256";
+pub const UNSIGNED_PAYLOAD: &str = "UNSIGNED-PAYLOAD";
+pub const STREAMING_AWS4_HMAC_SHA256_PAYLOAD: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
+
+pub type QueryMap = HashMap<String, String>;
+
pub async fn check_payload_signature(
garage: &Garage,
+ request: &mut Request<IncomingBody>,
service: &'static str,
- request: &Request<IncomingBody>,
) -> Result<(Option<Key>, Option<Hash>), Error> {
- let mut headers = HashMap::new();
- for (key, val) in request.headers() {
- headers.insert(key.to_string(), val.to_str()?.to_string());
- }
- if let Some(query) = request.uri().query() {
- let query_pairs = url::form_urlencoded::parse(query.as_bytes());
- for (key, val) in query_pairs {
- headers.insert(key.to_lowercase(), val.to_string());
- }
- }
-
- let authorization = if let Some(authorization) = headers.get("authorization") {
- parse_authorization(authorization, &headers)?
- } else if let Some(algorithm) = headers.get("x-amz-algorithm") {
- parse_query_authorization(algorithm, &headers)?
+ let query = parse_query_map(request.uri())?;
+
+ if query.contains_key(X_AMZ_ALGORITHM.as_str()) {
+ // We check for presigned-URL-style authentification first, because
+ // the browser or someting else could inject an Authorization header
+ // that is totally unrelated to AWS signatures.
+ check_presigned_signature(garage, service, request, query).await
+ } else if request.headers().contains_key(AUTHORIZATION) {
+ check_standard_signature(garage, service, request, query).await
} else {
- let content_sha256 = headers.get("x-amz-content-sha256");
- if let Some(content_sha256) = content_sha256.filter(|c| "UNSIGNED-PAYLOAD" != c.as_str()) {
+ // Unsigned (anonymous) request
+ let content_sha256 = request
+ .headers()
+ .get("x-amz-content-sha256")
+ .filter(|c| c.as_bytes() != UNSIGNED_PAYLOAD.as_bytes());
+ if let Some(content_sha256) = content_sha256 {
let sha256 = hex::decode(content_sha256)
.ok()
.and_then(|bytes| Hash::try_from(&bytes))
.ok_or_bad_request("Invalid content sha256 hash")?;
- return Ok((None, Some(sha256)));
+ Ok((None, Some(sha256)))
} else {
- return Ok((None, None));
+ Ok((None, None))
}
- };
+ }
+}
+
+async fn check_standard_signature(
+ garage: &Garage,
+ service: &'static str,
+ request: &Request<IncomingBody>,
+ query: QueryMap,
+) -> Result<(Option<Key>, Option<Hash>), Error> {
+ let authorization = Authorization::parse_header(request.headers())?;
+
+ // Verify that all necessary request headers are included in signed_headers
+ // The following must be included for all signatures:
+ // - the Host header (mandatory)
+ // - all x-amz-* headers used in the request
+ // AWS also indicates that the Content-Type header should be signed if
+ // it is used, but Minio client doesn't sign it so we don't check it for compatibility.
+ let signed_headers = split_signed_headers(&authorization)?;
+ verify_signed_headers(request.headers(), &signed_headers)?;
let canonical_request = canonical_request(
service,
request.method(),
- request.uri(),
- &headers,
- &authorization.signed_headers,
+ request.uri().path(),
+ &query,
+ request.headers(),
+ &signed_headers,
&authorization.content_sha256,
+ )?;
+ let string_to_sign = string_to_sign(
+ &authorization.date,
+ &authorization.scope,
+ &canonical_request,
);
- let (_, scope) = parse_credential(&authorization.credential)?;
- let string_to_sign = string_to_sign(&authorization.date, &scope, &canonical_request);
trace!("canonical request:\n{}", canonical_request);
trace!("string to sign:\n{}", string_to_sign);
- let key = verify_v4(
- garage,
- service,
- &authorization.credential,
- &authorization.date,
- &authorization.signature,
- string_to_sign.as_bytes(),
- )
- .await?;
+ let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
- let content_sha256 = if authorization.content_sha256 == "UNSIGNED-PAYLOAD" {
+ let content_sha256 = if authorization.content_sha256 == UNSIGNED_PAYLOAD {
None
- } else if authorization.content_sha256 == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" {
+ } else if authorization.content_sha256 == STREAMING_AWS4_HMAC_SHA256_PAYLOAD {
let bytes = hex::decode(authorization.signature).ok_or_bad_request("Invalid signature")?;
Some(Hash::try_from(&bytes).ok_or_bad_request("Invalid signature")?)
} else {
@@ -88,124 +116,96 @@ pub async fn check_payload_signature(
Ok((Some(key), content_sha256))
}
-struct Authorization {
- credential: String,
- signed_headers: String,
- signature: String,
- content_sha256: String,
- date: DateTime<Utc>,
-}
-
-fn parse_authorization(
- authorization: &str,
- headers: &HashMap<String, String>,
-) -> Result<Authorization, Error> {
- let first_space = authorization
- .find(' ')
- .ok_or_bad_request("Authorization field to short")?;
- let (auth_kind, rest) = authorization.split_at(first_space);
-
- if auth_kind != "AWS4-HMAC-SHA256" {
- return Err(Error::bad_request("Unsupported authorization method"));
- }
-
- let mut auth_params = HashMap::new();
- for auth_part in rest.split(',') {
- let auth_part = auth_part.trim();
- let eq = auth_part
- .find('=')
- .ok_or_bad_request("Field without value in authorization header")?;
- let (key, value) = auth_part.split_at(eq);
- auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
- }
-
- let cred = auth_params
- .get("Credential")
- .ok_or_bad_request("Could not find Credential in Authorization field")?;
-
- let content_sha256 = headers
- .get("x-amz-content-sha256")
- .ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
-
- let date = headers
- .get("x-amz-date")
- .ok_or_bad_request("Missing X-Amz-Date field")
- .map_err(Error::from)
- .and_then(|d| parse_date(d))?;
+async fn check_presigned_signature(
+ garage: &Garage,
+ service: &'static str,
+ request: &mut Request<IncomingBody>,
+ mut query: QueryMap,
+) -> Result<(Option<Key>, Option<Hash>), Error> {
+ let algorithm = query.get(X_AMZ_ALGORITHM.as_str()).unwrap();
+ let authorization = Authorization::parse_presigned(algorithm, &query)?;
+
+ // Verify that all necessary request headers are included in signed_headers
+ // For AWSv4 pre-signed URLs, the following must be incldued:
+ // - the Host header (mandatory)
+ // - all x-amz-* headers used in the request
+ let signed_headers = split_signed_headers(&authorization)?;
+ verify_signed_headers(request.headers(), &signed_headers)?;
+
+ // The X-Amz-Signature value is passed as a query parameter,
+ // but the signature cannot be computed from a string that contains itself.
+ // AWS specifies that all query params except X-Amz-Signature are included
+ // in the canonical request.
+ query.remove(X_AMZ_SIGNATURE.as_str());
+ let canonical_request = canonical_request(
+ service,
+ request.method(),
+ request.uri().path(),
+ &query,
+ request.headers(),
+ &signed_headers,
+ &authorization.content_sha256,
+ )?;
+ let string_to_sign = string_to_sign(
+ &authorization.date,
+ &authorization.scope,
+ &canonical_request,
+ );
- if Utc::now() - date > Duration::hours(24) {
- return Err(Error::bad_request("Date is too old".to_string()));
+ trace!("canonical request (presigned url):\n{}", canonical_request);
+ trace!("string to sign (presigned url):\n{}", string_to_sign);
+
+ let key = verify_v4(garage, service, &authorization, string_to_sign.as_bytes()).await?;
+
+ // In the page on presigned URLs, AWS specifies that if a signed query
+ // parameter and a signed header of the same name have different values,
+ // then an InvalidRequest error is raised.
+ let headers_mut = request.headers_mut();
+ for (name, value) in query.iter() {
+ let name =
+ HeaderName::from_bytes(name.as_bytes()).ok_or_bad_request("Invalid header name")?;
+ if let Some(existing) = headers_mut.get(&name) {
+ if signed_headers.contains(&name) && existing.as_bytes() != value.as_bytes() {
+ return Err(Error::bad_request(format!(
+ "Conflicting values for `{}` in query parameters and request headers",
+ name
+ )));
+ }
+ }
+ if name.as_str().starts_with("x-amz-") {
+ // Query parameters that start by x-amz- are actually intended to stand in for
+ // headers that can't be added at the time the request is made.
+ // What we do is just add them to the Request object as regular headers,
+ // that will be handled downstream as if they were included like in a normal request.
+ // (Here we allow such query parameters to override headers with the same name
+ // that are not signed, however there is not much reason that this would happen)
+ headers_mut.insert(
+ name,
+ HeaderValue::from_bytes(value.as_bytes())
+ .ok_or_bad_request("invalid query parameter value")?,
+ );
+ }
}
- let auth = Authorization {
- credential: cred.to_string(),
- signed_headers: auth_params
- .get("SignedHeaders")
- .ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
- .to_string(),
- signature: auth_params
- .get("Signature")
- .ok_or_bad_request("Could not find Signature in Authorization field")?
- .to_string(),
- content_sha256: content_sha256.to_string(),
- date,
- };
- Ok(auth)
+ // Presigned URLs always use UNSIGNED-PAYLOAD,
+ // so there is no sha256 hash to return.
+ Ok((Some(key), None))
}
-fn parse_query_authorization(
- algorithm: &str,
- headers: &HashMap<String, String>,
-) -> Result<Authorization, Error> {
- if algorithm != "AWS4-HMAC-SHA256" {
- return Err(Error::bad_request(
- "Unsupported authorization method".to_string(),
- ));
- }
-
- let cred = headers
- .get("x-amz-credential")
- .ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
- let signed_headers = headers
- .get("x-amz-signedheaders")
- .ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
- let signature = headers
- .get("x-amz-signature")
- .ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
- let content_sha256 = headers
- .get("x-amz-content-sha256")
- .map(|x| x.as_str())
- .unwrap_or("UNSIGNED-PAYLOAD");
-
- let duration = headers
- .get("x-amz-expires")
- .ok_or_bad_request("X-Amz-Expires not found in query parameters")?
- .parse()
- .map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
-
- if duration > 7 * 24 * 3600 {
- return Err(Error::bad_request(
- "X-Amz-Expires may not exceed a week".to_string(),
- ));
- }
-
- let date = headers
- .get("x-amz-date")
- .ok_or_bad_request("Missing X-Amz-Date field")
- .map_err(Error::from)
- .and_then(|d| parse_date(d))?;
-
- if Utc::now() - date > Duration::seconds(duration) {
- return Err(Error::bad_request("Date is too old".to_string()));
+pub fn parse_query_map(uri: &http::uri::Uri) -> Result<QueryMap, Error> {
+ let mut query = QueryMap::new();
+ if let Some(query_str) = uri.query() {
+ let query_pairs = url::form_urlencoded::parse(query_str.as_bytes());
+ for (key, val) in query_pairs {
+ if query.insert(key.to_string(), val.into_owned()).is_some() {
+ return Err(Error::bad_request(format!(
+ "duplicate query parameter: `{}`",
+ key
+ )));
+ }
+ }
}
-
- Ok(Authorization {
- credential: cred.to_string(),
- signed_headers: signed_headers.to_string(),
- signature: signature.to_string(),
- content_sha256: content_sha256.to_string(),
- date,
- })
+ Ok(query)
}
fn parse_credential(cred: &str) -> Result<(String, String), Error> {
@@ -219,11 +219,39 @@ fn parse_credential(cred: &str) -> Result<(String, String), Error> {
))
}
+fn split_signed_headers(authorization: &Authorization) -> Result<Vec<HeaderName>, Error> {
+ let mut signed_headers = authorization
+ .signed_headers
+ .split(';')
+ .map(HeaderName::try_from)
+ .collect::<Result<Vec<HeaderName>, _>>()
+ .ok_or_bad_request("invalid header name")?;
+ signed_headers.sort_by(|h1, h2| h1.as_str().cmp(h2.as_str()));
+ Ok(signed_headers)
+}
+
+fn verify_signed_headers(headers: &HeaderMap, signed_headers: &[HeaderName]) -> Result<(), Error> {
+ if !signed_headers.contains(&HOST) {
+ return Err(Error::bad_request("Header `Host` should be signed"));
+ }
+ for (name, _) in headers.iter() {
+ if name.as_str().starts_with("x-amz-") {
+ if !signed_headers.contains(name) {
+ return Err(Error::bad_request(format!(
+ "Header `{}` should be signed",
+ name
+ )));
+ }
+ }
+ }
+ Ok(())
+}
+
pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_req: &str) -> String {
let mut hasher = Sha256::default();
hasher.update(canonical_req.as_bytes());
[
- "AWS4-HMAC-SHA256",
+ AWS4_HMAC_SHA256,
&datetime.format(LONG_DATETIME).to_string(),
scope_string,
&hex::encode(hasher.finalize().as_slice()),
@@ -234,11 +262,12 @@ pub fn string_to_sign(datetime: &DateTime<Utc>, scope_string: &str, canonical_re
pub fn canonical_request(
service: &'static str,
method: &Method,
- uri: &hyper::Uri,
- headers: &HashMap<String, String>,
- signed_headers: &str,
+ canonical_uri: &str,
+ query: &QueryMap,
+ headers: &HeaderMap,
+ signed_headers: &[HeaderName],
content_sha256: &str,
-) -> String {
+) -> Result<String, Error> {
// There seems to be evidence that in AWSv4 signatures, the path component is url-encoded
// a second time when building the canonical request, as specified in this documentation page:
// -> https://docs.aws.amazon.com/rolesanywhere/latest/userguide/authentication-sign-process.html
@@ -268,49 +297,46 @@ pub fn canonical_request(
// it mentions it in the comments (same link to the souce code as above).
// We make the explicit choice of NOT normalizing paths in the K2V API because doing so
// would make non-normalized paths invalid K2V partition keys, and we don't want that.
- let path: std::borrow::Cow<str> = if service != "s3" {
- uri_encode(uri.path(), false).into()
+ let canonical_uri: std::borrow::Cow<str> = if service != "s3" {
+ uri_encode(canonical_uri, false).into()
} else {
- uri.path().into()
+ canonical_uri.into()
};
- [
- method.as_str(),
- &path,
- &canonical_query_string(uri),
- &canonical_header_string(headers, signed_headers),
- "",
- signed_headers,
- content_sha256,
- ]
- .join("\n")
-}
-
-fn canonical_header_string(headers: &HashMap<String, String>, signed_headers: &str) -> String {
- let signed_headers_vec = signed_headers.split(';').collect::<Vec<_>>();
- let mut items = headers
- .iter()
- .filter(|(key, _)| signed_headers_vec.contains(&key.as_str()))
- .collect::<Vec<_>>();
- items.sort_by(|(k1, _), (k2, _)| k1.cmp(k2));
- items
- .iter()
- .map(|(key, value)| key.to_lowercase() + ":" + value.trim())
- .collect::<Vec<_>>()
- .join("\n")
-}
-fn canonical_query_string(uri: &hyper::Uri) -> String {
- if let Some(query) = uri.query() {
- let query_pairs = url::form_urlencoded::parse(query.as_bytes());
- let mut items = query_pairs
- .filter(|(key, _)| key != "X-Amz-Signature")
- .map(|(key, value)| uri_encode(&key, true) + "=" + &uri_encode(&value, true))
- .collect::<Vec<_>>();
+ // Canonical query string from passed HeaderMap
+ let canonical_query_string = {
+ let mut items = Vec::with_capacity(query.len());
+ for (key, value) in query.iter() {
+ items.push(uri_encode(&key, true) + "=" + &uri_encode(&value, true));
+ }
items.sort();
items.join("&")
- } else {
- "".to_string()
- }
+ };
+
+ // Canonical header string calculated from signed headers
+ let canonical_header_string = signed_headers
+ .iter()
+ .map(|name| {
+ let value = headers
+ .get(name)
+ .ok_or_bad_request(format!("signed header `{}` is not present", name))?
+ .to_str()?;
+ Ok(format!("{}:{}", name.as_str(), value.trim()))
+ })
+ .collect::<Result<Vec<String>, Error>>()?
+ .join("\n");
+ let signed_headers = signed_headers.join(";");
+
+ let list = [
+ method.as_str(),
+ &canonical_uri,
+ &canonical_query_string,
+ &canonical_header_string,
+ "",
+ &signed_headers,
+ content_sha256,
+ ];
+ Ok(list.join("\n"))
}
pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
@@ -322,38 +348,202 @@ pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
pub async fn verify_v4(
garage: &Garage,
service: &str,
- credential: &str,
- date: &DateTime<Utc>,
- signature: &str,
+ auth: &Authorization,
payload: &[u8],
) -> Result<Key, Error> {
- let (key_id, scope) = parse_credential(credential)?;
-
- let scope_expected = compute_scope(date, &garage.config.s3_api.s3_region, service);
- if scope != scope_expected {
- return Err(Error::AuthorizationHeaderMalformed(scope.to_string()));
+ let scope_expected = compute_scope(&auth.date, &garage.config.s3_api.s3_region, service);
+ if auth.scope != scope_expected {
+ return Err(Error::AuthorizationHeaderMalformed(auth.scope.to_string()));
}
let key = garage
.key_table
- .get(&EmptyKey, &key_id)
+ .get(&EmptyKey, &auth.key_id)
.await?
.filter(|k| !k.state.is_deleted())
- .ok_or_else(|| Error::forbidden(format!("No such key: {}", &key_id)))?;
+ .ok_or_else(|| Error::forbidden(format!("No such key: {}", &auth.key_id)))?;
let key_p = key.params().unwrap();
let mut hmac = signing_hmac(
- date,
+ &auth.date,
&key_p.secret_key,
&garage.config.s3_api.s3_region,
service,
)
.ok_or_internal_error("Unable to build signing HMAC")?;
hmac.update(payload);
- let our_signature = hex::encode(hmac.finalize().into_bytes());
- if signature != our_signature {
- return Err(Error::forbidden("Invalid signature".to_string()));
+ let signature =
+ hex::decode(&auth.signature).map_err(|_| Error::forbidden("Invalid signature"))?;
+ if hmac.verify_slice(&signature).is_err() {
+ return Err(Error::forbidden("Invalid signature"));
}
Ok(key)
}
+
+// ============ Authorization header, or X-Amz-* query params =========
+
+pub struct Authorization {
+ key_id: String,
+ scope: String,
+ signed_headers: String,
+ signature: String,
+ content_sha256: String,
+ date: DateTime<Utc>,
+}
+
+impl Authorization {
+ fn parse_header(headers: &HeaderMap) -> Result<Self, Error> {
+ let authorization = headers
+ .get(AUTHORIZATION)
+ .ok_or_bad_request("Missing authorization header")?
+ .to_str()?;
+
+ let (auth_kind, rest) = authorization
+ .split_once(' ')
+ .ok_or_bad_request("Authorization field to short")?;
+
+ if auth_kind != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request("Unsupported authorization method"));
+ }
+
+ let mut auth_params = HashMap::new();
+ for auth_part in rest.split(',') {
+ let auth_part = auth_part.trim();
+ let eq = auth_part
+ .find('=')
+ .ok_or_bad_request("Field without value in authorization header")?;
+ let (key, value) = auth_part.split_at(eq);
+ auth_params.insert(key.to_string(), value.trim_start_matches('=').to_string());
+ }
+
+ let cred = auth_params
+ .get("Credential")
+ .ok_or_bad_request("Could not find Credential in Authorization field")?;
+ let signed_headers = auth_params
+ .get("SignedHeaders")
+ .ok_or_bad_request("Could not find SignedHeaders in Authorization field")?
+ .to_string();
+ let signature = auth_params
+ .get("Signature")
+ .ok_or_bad_request("Could not find Signature in Authorization field")?
+ .to_string();
+
+ let content_sha256 = headers
+ .get(X_AMZ_CONTENT_SH256)
+ .ok_or_bad_request("Missing X-Amz-Content-Sha256 field")?;
+
+ let date = headers
+ .get(X_AMZ_DATE)
+ .ok_or_bad_request("Missing X-Amz-Date field")
+ .map_err(Error::from)?
+ .to_str()?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::hours(24) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(cred)?;
+ let auth = Authorization {
+ key_id,
+ scope,
+ signed_headers,
+ signature,
+ content_sha256: content_sha256.to_str()?.to_string(),
+ date,
+ };
+ Ok(auth)
+ }
+
+ fn parse_presigned(algorithm: &str, query: &QueryMap) -> Result<Self, Error> {
+ if algorithm != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request(
+ "Unsupported authorization method".to_string(),
+ ));
+ }
+
+ let cred = query
+ .get(X_AMZ_CREDENTIAL.as_str())
+ .ok_or_bad_request("X-Amz-Credential not found in query parameters")?;
+ let signed_headers = query
+ .get(X_AMZ_SIGNEDHEADERS.as_str())
+ .ok_or_bad_request("X-Amz-SignedHeaders not found in query parameters")?;
+ let signature = query
+ .get(X_AMZ_SIGNATURE.as_str())
+ .ok_or_bad_request("X-Amz-Signature not found in query parameters")?;
+
+ let duration = query
+ .get(X_AMZ_EXPIRES.as_str())
+ .ok_or_bad_request("X-Amz-Expires not found in query parameters")?
+ .parse()
+ .map_err(|_| Error::bad_request("X-Amz-Expires is not a number".to_string()))?;
+
+ if duration > 7 * 24 * 3600 {
+ return Err(Error::bad_request(
+ "X-Amz-Expires may not exceed a week".to_string(),
+ ));
+ }
+
+ let date = query
+ .get(X_AMZ_DATE.as_str())
+ .ok_or_bad_request("Missing X-Amz-Date field")?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::seconds(duration) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(cred)?;
+ Ok(Authorization {
+ key_id,
+ scope,
+ signed_headers: signed_headers.to_string(),
+ signature: signature.to_string(),
+ content_sha256: UNSIGNED_PAYLOAD.to_string(),
+ date,
+ })
+ }
+
+ pub(crate) fn parse_form(params: &HeaderMap) -> Result<Self, Error> {
+ let algorithm = params
+ .get(X_AMZ_ALGORITHM)
+ .ok_or_bad_request("Missing X-Amz-Algorithm header")?
+ .to_str()?;
+ if algorithm != AWS4_HMAC_SHA256 {
+ return Err(Error::bad_request(
+ "Unsupported authorization method".to_string(),
+ ));
+ }
+
+ let credential = params
+ .get(X_AMZ_CREDENTIAL)
+ .ok_or_else(|| Error::forbidden("Garage does not support anonymous access yet"))?
+ .to_str()?;
+ let signature = params
+ .get(X_AMZ_SIGNATURE)
+ .ok_or_bad_request("No signature was provided")?
+ .to_str()?
+ .to_string();
+ let date = params
+ .get(X_AMZ_DATE)
+ .ok_or_bad_request("No date was provided")?
+ .to_str()?;
+ let date = parse_date(date)?;
+
+ if Utc::now() - date > Duration::hours(24) {
+ return Err(Error::bad_request("Date is too old".to_string()));
+ }
+
+ let (key_id, scope) = parse_credential(credential)?;
+ let auth = Authorization {
+ key_id,
+ scope,
+ signed_headers: "".to_string(),
+ signature,
+ content_sha256: UNSIGNED_PAYLOAD.to_string(),
+ date,
+ };
+ Ok(auth)
+ }
+}
diff --git a/src/api/signature/streaming.rs b/src/api/signature/streaming.rs
index a2a71f6b..e223d1b1 100644
--- a/src/api/signature/streaming.rs
+++ b/src/api/signature/streaming.rs
@@ -15,6 +15,11 @@ use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
use crate::helpers::*;
use crate::signature::error::*;
+use crate::signature::payload::{
+ STREAMING_AWS4_HMAC_SHA256_PAYLOAD, X_AMZ_CONTENT_SH256, X_AMZ_DATE,
+};
+
+pub const AWS4_HMAC_SHA256_PAYLOAD: &str = "AWS4-HMAC-SHA256-PAYLOAD";
pub type ReqBody = BoxBody<Error>;
@@ -25,8 +30,8 @@ pub fn parse_streaming_body(
region: &str,
service: &str,
) -> Result<Request<ReqBody>, Error> {
- match req.headers().get("x-amz-content-sha256") {
- Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
+ match req.headers().get(X_AMZ_CONTENT_SH256) {
+ Some(header) if header == STREAMING_AWS4_HMAC_SHA256_PAYLOAD => {
let signature = content_sha256
.take()
.ok_or_bad_request("No signature provided")?;
@@ -39,7 +44,7 @@ pub fn parse_streaming_body(
let date = req
.headers()
- .get("x-amz-date")
+ .get(X_AMZ_DATE)
.ok_or_bad_request("Missing X-Amz-Date field")?
.to_str()?;
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
@@ -75,7 +80,7 @@ fn compute_streaming_payload_signature(
content_sha256: Hash,
) -> Result<Hash, Error> {
let string_to_sign = [
- "AWS4-HMAC-SHA256-PAYLOAD",
+ AWS4_HMAC_SHA256_PAYLOAD,
&date.format(LONG_DATETIME).to_string(),
scope,
&hex::encode(previous_signature),
diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml
index d2666b10..54093e4e 100644
--- a/src/block/Cargo.toml
+++ b/src/block/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_block"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/block/manager.rs b/src/block/manager.rs
index 817866f6..890ea8b7 100644
--- a/src/block/manager.rs
+++ b/src/block/manager.rs
@@ -346,7 +346,12 @@ impl BlockManager {
}
/// Send block to nodes that should have it
- pub async fn rpc_put_block(&self, hash: Hash, data: Bytes) -> Result<(), Error> {
+ pub async fn rpc_put_block(
+ &self,
+ hash: Hash,
+ data: Bytes,
+ order_tag: Option<OrderTag>,
+ ) -> Result<(), Error> {
let who = self.replication.write_nodes(&hash);
let (header, bytes) = DataBlock::from_buffer(data, self.compression_level)
@@ -354,6 +359,11 @@ impl BlockManager {
.into_parts();
let put_block_rpc =
Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
+ let put_block_rpc = if let Some(tag) = order_tag {
+ put_block_rpc.with_order_tag(tag)
+ } else {
+ put_block_rpc
+ };
self.system
.rpc
diff --git a/src/db/Cargo.toml b/src/db/Cargo.toml
index 9a925136..bb72d5cd 100644
--- a/src/db/Cargo.toml
+++ b/src/db/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_db"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml
index 65135530..2c8ea3f9 100644
--- a/src/garage/Cargo.toml
+++ b/src/garage/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/garage/tests/common/custom_requester.rs b/src/garage/tests/common/custom_requester.rs
index e5f4cca1..2cac5cd5 100644
--- a/src/garage/tests/common/custom_requester.rs
+++ b/src/garage/tests/common/custom_requester.rs
@@ -1,12 +1,15 @@
#![allow(dead_code)]
use std::collections::HashMap;
-use std::convert::TryFrom;
+use std::convert::{TryFrom, TryInto};
use chrono::{offset::Utc, DateTime};
use hmac::{Hmac, Mac};
use http_body_util::BodyExt;
use http_body_util::Full as FullBody;
+use hyper::header::{
+ HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_LENGTH, HOST,
+};
use hyper::{Method, Request, Response, Uri};
use hyper_util::client::legacy::{connect::HttpConnector, Client};
use hyper_util::rt::TokioExecutor;
@@ -173,54 +176,85 @@ impl<'a> RequestBuilder<'a> {
.unwrap();
let streaming_signer = signer.clone();
- let mut all_headers = self.signed_headers.clone();
+ let mut all_headers = self
+ .signed_headers
+ .iter()
+ .map(|(k, v)| {
+ (
+ HeaderName::try_from(k).expect("invalid header name"),
+ HeaderValue::try_from(v).expect("invalid header value"),
+ )
+ })
+ .collect::<HeaderMap>();
let date = now.format(signature::LONG_DATETIME).to_string();
- all_headers.insert("x-amz-date".to_owned(), date);
- all_headers.insert("host".to_owned(), host);
+ all_headers.insert(
+ signature::payload::X_AMZ_DATE,
+ HeaderValue::from_str(&date).unwrap(),
+ );
+ all_headers.insert(HOST, HeaderValue::from_str(&host).unwrap());
let body_sha = match self.body_signature {
BodySignature::Unsigned => "UNSIGNED-PAYLOAD".to_owned(),
BodySignature::Classic => hex::encode(garage_util::data::sha256sum(&self.body)),
BodySignature::Streaming(size) => {
- all_headers.insert("content-encoding".to_owned(), "aws-chunked".to_owned());
all_headers.insert(
- "x-amz-decoded-content-length".to_owned(),
- self.body.len().to_string(),
+ CONTENT_ENCODING,
+ HeaderValue::from_str("aws-chunked").unwrap(),
+ );
+ all_headers.insert(
+ HeaderName::from_static("x-amz-decoded-content-length"),
+ HeaderValue::from_str(&self.body.len().to_string()).unwrap(),
);
// Get lenght of body by doing the conversion to a streaming body with an
// invalid signature (we don't know the seed) just to get its length. This
// is a pretty lazy and inefficient way to do it, but it's enought for test
// code.
all_headers.insert(
- "content-length".to_owned(),
+ CONTENT_LENGTH,
to_streaming_body(&self.body, size, String::new(), signer.clone(), now, "")
.len()
- .to_string(),
+ .to_string()
+ .try_into()
+ .unwrap(),
);
"STREAMING-AWS4-HMAC-SHA256-PAYLOAD".to_owned()
}
};
- all_headers.insert("x-amz-content-sha256".to_owned(), body_sha.clone());
+ all_headers.insert(
+ signature::payload::X_AMZ_CONTENT_SH256,
+ HeaderValue::from_str(&body_sha).unwrap(),
+ );
+
+ let mut signed_headers = all_headers.keys().cloned().collect::<Vec<_>>();
+ signed_headers.sort_by(|h1, h2| h1.as_str().cmp(h2.as_str()));
+ let signed_headers_str = signed_headers
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(";");
- let mut signed_headers = all_headers
- .keys()
- .map(|k| k.as_ref())
- .collect::<Vec<&str>>();
- signed_headers.sort();
- let signed_headers = signed_headers.join(";");
+ all_headers.extend(self.unsigned_headers.iter().map(|(k, v)| {
+ (
+ HeaderName::try_from(k).expect("invalid header name"),
+ HeaderValue::try_from(v).expect("invalid header value"),
+ )
+ }));
- all_headers.extend(self.unsigned_headers.clone());
+ let uri = Uri::try_from(&uri).unwrap();
+ let query = signature::payload::parse_query_map(&uri).unwrap();
let canonical_request = signature::payload::canonical_request(
self.service,
&self.method,
- &Uri::try_from(&uri).unwrap(),
+ uri.path(),
+ &query,
&all_headers,
&signed_headers,
&body_sha,
- );
+ )
+ .unwrap();
let string_to_sign = signature::payload::string_to_sign(&now, &scope, &canonical_request);
@@ -228,14 +262,15 @@ impl<'a> RequestBuilder<'a> {
let signature = hex::encode(signer.finalize().into_bytes());
let authorization = format!(
"AWS4-HMAC-SHA256 Credential={}/{},SignedHeaders={},Signature={}",
- self.requester.key.id, scope, signed_headers, signature
+ self.requester.key.id, scope, signed_headers_str, signature
+ );
+ all_headers.insert(
+ AUTHORIZATION,
+ HeaderValue::from_str(&authorization).unwrap(),
);
- all_headers.insert("authorization".to_owned(), authorization);
let mut request = Request::builder();
- for (k, v) in all_headers {
- request = request.header(k, v);
- }
+ *request.headers_mut().unwrap() = all_headers;
let body = if let BodySignature::Streaming(size) = self.body_signature {
to_streaming_body(&self.body, size, signature, streaming_signer, now, &scope)
diff --git a/src/garage/tests/s3/streaming_signature.rs b/src/garage/tests/s3/streaming_signature.rs
index 224b9ed5..351aa422 100644
--- a/src/garage/tests/s3/streaming_signature.rs
+++ b/src/garage/tests/s3/streaming_signature.rs
@@ -26,7 +26,7 @@ async fn test_putobject_streaming() {
.builder(bucket.clone())
.method(Method::PUT)
.path(STD_KEY.to_owned())
- .unsigned_headers(headers)
+ .signed_headers(headers)
.vhost_style(true)
.body(vec![])
.body_signature(BodySignature::Streaming(10))
diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml
index ce0ccff0..3060c133 100644
--- a/src/model/Cargo.toml
+++ b/src/model/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_model"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/net/Cargo.toml b/src/net/Cargo.toml
index a2674498..af7b4cbe 100644
--- a/src/net/Cargo.toml
+++ b/src/net/Cargo.toml
@@ -1,9 +1,9 @@
[package]
name = "garage_net"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
-license-file = "AGPL-3.0"
+license = "AGPL-3.0"
description = "Networking library for Garage RPC communication, forked from Netapp"
repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml
index 5d5750c4..de0e9e17 100644
--- a/src/rpc/Cargo.toml
+++ b/src/rpc/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_rpc"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 4f2aed7a..4cf21f9f 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_table"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml
index b4d8477c..fb7b632d 100644
--- a/src/util/Cargo.toml
+++ b/src/util/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_util"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml
index 3add5200..12bec5a7 100644
--- a/src/web/Cargo.toml
+++ b/src/web/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_web"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018"
license = "AGPL-3.0"