From 5768bf362262f78376af14517c4921941986192e Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 10 May 2022 13:16:57 +0200 Subject: First implementation of K2V (#293) **Specification:** View spec at [this URL](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md) - [x] Specify the structure of K2V triples - [x] Specify the DVVS format used for causality detection - [x] Specify the K2V index (just a counter of number of values per partition key) - [x] Specify single-item endpoints: ReadItem, InsertItem, DeleteItem - [x] Specify index endpoint: ReadIndex - [x] Specify multi-item endpoints: InsertBatch, ReadBatch, DeleteBatch - [x] Move to JSON objects instead of tuples - [x] Specify endpoints for polling for updates on single values (PollItem) **Implementation:** - [x] Table for K2V items, causal contexts - [x] Indexing mechanism and table for K2V index - [x] Make API handlers a bit more generic - [x] K2V API endpoint - [x] K2V API router - [x] ReadItem - [x] InsertItem - [x] DeleteItem - [x] PollItem - [x] ReadIndex - [x] InsertBatch - [x] ReadBatch - [x] DeleteBatch **Testing:** - [x] Just a simple Python script that does some requests to check visually that things are going right (does not contain parsing of results or assertions on returned values) - [x] Actual tests: - [x] Adapt testing framework - [x] Simple test with InsertItem + ReadItem - [x] Test with several Insert/Read/DeleteItem + ReadIndex - [x] Test all combinations of return formats for ReadItem - [x] Test with ReadBatch, InsertBatch, DeleteBatch - [x] Test with PollItem - [x] Test error codes - [ ] Fix most broken stuff - [x] test PollItem broken randomly - [x] when invalid causality tokens are given, errors should be 4xx not 5xx **Improvements:** - [x] Descending range queries - [x] Specify - [x] Implement - [x] Add test - [x] Batch updates to index counter - [x] Put K2V behind `k2v` feature flag Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/293 Co-authored-by: Alex Co-committed-by: Alex --- Cargo.lock | 17 + Cargo.nix | 203 ++-- Makefile | 2 +- doc/drafts/k2v-spec.md | 680 +++++++++++++ k2v_test.py | 158 +++ src/api/Cargo.toml | 5 + src/api/api_server.rs | 645 ------------- src/api/error.rs | 7 +- src/api/generic_server.rs | 202 ++++ src/api/helpers.rs | 188 +++- src/api/k2v/api_server.rs | 195 ++++ src/api/k2v/batch.rs | 368 +++++++ src/api/k2v/index.rs | 100 ++ src/api/k2v/item.rs | 230 +++++ src/api/k2v/mod.rs | 8 + src/api/k2v/range.rs | 96 ++ src/api/k2v/router.rs | 252 +++++ src/api/lib.rs | 22 +- src/api/router_macros.rs | 190 ++++ src/api/s3/api_server.rs | 401 ++++++++ src/api/s3/bucket.rs | 358 +++++++ src/api/s3/copy.rs | 660 +++++++++++++ src/api/s3/cors.rs | 442 +++++++++ src/api/s3/delete.rs | 170 ++++ src/api/s3/get.rs | 461 +++++++++ src/api/s3/list.rs | 1337 ++++++++++++++++++++++++++ src/api/s3/mod.rs | 14 + src/api/s3/post_object.rs | 507 ++++++++++ src/api/s3/put.rs | 753 +++++++++++++++ src/api/s3/router.rs | 1080 +++++++++++++++++++++ src/api/s3/website.rs | 369 +++++++ src/api/s3/xml.rs | 844 ++++++++++++++++ src/api/s3_bucket.rs | 352 ------- src/api/s3_copy.rs | 660 ------------- src/api/s3_cors.rs | 442 --------- src/api/s3_delete.rs | 170 ---- src/api/s3_get.rs | 461 --------- src/api/s3_list.rs | 1383 --------------------------- src/api/s3_post_object.rs | 499 ---------- src/api/s3_put.rs | 753 --------------- src/api/s3_router.rs | 1278 ------------------------- src/api/s3_website.rs | 369 ------- src/api/s3_xml.rs | 844 ---------------- src/api/signature/mod.rs | 9 +- src/api/signature/payload.rs | 15 +- src/api/signature/streaming.rs | 61 +- src/block/manager.rs | 2 +- src/garage/Cargo.toml | 8 + src/garage/admin.rs | 19 +- src/garage/cli/cmd.rs | 7 +- src/garage/repair.rs | 6 +- src/garage/server.rs | 26 +- src/garage/tests/common/client.rs | 2 +- src/garage/tests/common/custom_requester.rs | 55 +- src/garage/tests/common/garage.rs | 34 +- src/garage/tests/common/mod.rs | 11 +- src/garage/tests/k2v/batch.rs | 525 ++++++++++ src/garage/tests/k2v/errorcodes.rs | 141 +++ src/garage/tests/k2v/item.rs | 719 ++++++++++++++ src/garage/tests/k2v/mod.rs | 18 + src/garage/tests/k2v/poll.rs | 98 ++ src/garage/tests/k2v/simple.rs | 40 + src/garage/tests/lib.rs | 8 +- src/garage/tests/list.rs | 615 ------------ src/garage/tests/multipart.rs | 415 -------- src/garage/tests/objects.rs | 266 ------ src/garage/tests/s3/list.rs | 615 ++++++++++++ src/garage/tests/s3/mod.rs | 6 + src/garage/tests/s3/multipart.rs | 415 ++++++++ src/garage/tests/s3/objects.rs | 266 ++++++ src/garage/tests/s3/simple.rs | 31 + src/garage/tests/s3/streaming_signature.rs | 185 ++++ src/garage/tests/s3/website.rs | 324 +++++++ src/garage/tests/simple.rs | 31 - src/garage/tests/streaming_signature.rs | 185 ---- src/garage/tests/website.rs | 342 ------- src/model/Cargo.toml | 5 + src/model/block_ref_table.rs | 74 -- src/model/garage.rs | 97 +- src/model/helper/bucket.rs | 3 +- src/model/index_counter.rs | 305 ++++++ src/model/k2v/causality.rs | 96 ++ src/model/k2v/counter_table.rs | 20 + src/model/k2v/item_table.rs | 291 ++++++ src/model/k2v/mod.rs | 7 + src/model/k2v/poll.rs | 50 + src/model/k2v/rpc.rs | 343 +++++++ src/model/lib.rs | 9 +- src/model/object_table.rs | 334 ------- src/model/s3/block_ref_table.rs | 74 ++ src/model/s3/mod.rs | 3 + src/model/s3/object_table.rs | 337 +++++++ src/model/s3/version_table.rs | 207 ++++ src/model/version_table.rs | 204 ---- src/rpc/Cargo.toml | 1 + src/table/data.rs | 98 +- src/table/schema.rs | 2 +- src/table/table.rs | 126 ++- src/table/util.rs | 18 +- src/util/Cargo.toml | 3 + src/util/config.rs | 16 +- src/util/error.rs | 3 + src/web/web_server.rs | 4 +- 103 files changed, 15835 insertions(+), 10570 deletions(-) create mode 100644 doc/drafts/k2v-spec.md create mode 100755 k2v_test.py delete mode 100644 src/api/api_server.rs create mode 100644 src/api/generic_server.rs create mode 100644 src/api/k2v/api_server.rs create mode 100644 src/api/k2v/batch.rs create mode 100644 src/api/k2v/index.rs create mode 100644 src/api/k2v/item.rs create mode 100644 src/api/k2v/mod.rs create mode 100644 src/api/k2v/range.rs create mode 100644 src/api/k2v/router.rs create mode 100644 src/api/router_macros.rs create mode 100644 src/api/s3/api_server.rs create mode 100644 src/api/s3/bucket.rs create mode 100644 src/api/s3/copy.rs create mode 100644 src/api/s3/cors.rs create mode 100644 src/api/s3/delete.rs create mode 100644 src/api/s3/get.rs create mode 100644 src/api/s3/list.rs create mode 100644 src/api/s3/mod.rs create mode 100644 src/api/s3/post_object.rs create mode 100644 src/api/s3/put.rs create mode 100644 src/api/s3/router.rs create mode 100644 src/api/s3/website.rs create mode 100644 src/api/s3/xml.rs delete mode 100644 src/api/s3_bucket.rs delete mode 100644 src/api/s3_copy.rs delete mode 100644 src/api/s3_cors.rs delete mode 100644 src/api/s3_delete.rs delete mode 100644 src/api/s3_get.rs delete mode 100644 src/api/s3_list.rs delete mode 100644 src/api/s3_post_object.rs delete mode 100644 src/api/s3_put.rs delete mode 100644 src/api/s3_router.rs delete mode 100644 src/api/s3_website.rs delete mode 100644 src/api/s3_xml.rs create mode 100644 src/garage/tests/k2v/batch.rs create mode 100644 src/garage/tests/k2v/errorcodes.rs create mode 100644 src/garage/tests/k2v/item.rs create mode 100644 src/garage/tests/k2v/mod.rs create mode 100644 src/garage/tests/k2v/poll.rs create mode 100644 src/garage/tests/k2v/simple.rs delete mode 100644 src/garage/tests/list.rs delete mode 100644 src/garage/tests/multipart.rs delete mode 100644 src/garage/tests/objects.rs create mode 100644 src/garage/tests/s3/list.rs create mode 100644 src/garage/tests/s3/mod.rs create mode 100644 src/garage/tests/s3/multipart.rs create mode 100644 src/garage/tests/s3/objects.rs create mode 100644 src/garage/tests/s3/simple.rs create mode 100644 src/garage/tests/s3/streaming_signature.rs create mode 100644 src/garage/tests/s3/website.rs delete mode 100644 src/garage/tests/simple.rs delete mode 100644 src/garage/tests/streaming_signature.rs delete mode 100644 src/garage/tests/website.rs delete mode 100644 src/model/block_ref_table.rs create mode 100644 src/model/index_counter.rs create mode 100644 src/model/k2v/causality.rs create mode 100644 src/model/k2v/counter_table.rs create mode 100644 src/model/k2v/item_table.rs create mode 100644 src/model/k2v/mod.rs create mode 100644 src/model/k2v/poll.rs create mode 100644 src/model/k2v/rpc.rs delete mode 100644 src/model/object_table.rs create mode 100644 src/model/s3/block_ref_table.rs create mode 100644 src/model/s3/mod.rs create mode 100644 src/model/s3/object_table.rs create mode 100644 src/model/s3/version_table.rs delete mode 100644 src/model/version_table.rs diff --git a/Cargo.lock b/Cargo.lock index 1469b37b..de1ae5cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,6 +29,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "assert-json-diff" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -821,8 +831,10 @@ dependencies = [ name = "garage" version = "0.7.0" dependencies = [ + "assert-json-diff", "async-trait", "aws-sdk-s3", + "base64", "bytes 1.1.0", "chrono", "futures", @@ -846,6 +858,7 @@ dependencies = [ "rmp-serde 0.15.5", "serde", "serde_bytes", + "serde_json", "sha2", "sled", "static_init", @@ -876,6 +889,7 @@ dependencies = [ name = "garage_api" version = "0.7.0" dependencies = [ + "async-trait", "base64", "bytes 1.1.0", "chrono", @@ -886,6 +900,7 @@ dependencies = [ "futures-util", "garage_block", "garage_model 0.7.0", + "garage_rpc 0.7.0", "garage_table 0.7.0", "garage_util 0.7.0", "hex", @@ -966,6 +981,8 @@ version = "0.7.0" dependencies = [ "arc-swap", "async-trait", + "base64", + "blake2", "err-derive 0.3.1", "futures", "futures-util", diff --git a/Cargo.nix b/Cargo.nix index 49f0a3d0..39f409b6 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -98,6 +98,17 @@ in ]; }); + "registry+https://github.com/rust-lang/crates.io-index".assert-json-diff."2.0.1" = overridableMkRustCrate (profileName: rec { + name = "assert-json-diff"; + version = "2.0.1"; + registry = "registry+https://github.com/rust-lang/crates.io-index"; + src = fetchCratesIo { inherit name version; sha256 = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da"; }; + dependencies = { + serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; + serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.79" { inherit profileName; }; + }; + }); + "registry+https://github.com/rust-lang/crates.io-index".async-stream."0.3.3" = overridableMkRustCrate (profileName: rec { name = "async-stream"; version = "0.3.3"; @@ -554,7 +565,7 @@ in [ "default" ] [ "libc" ] [ "oldtime" ] - (lib.optional (rootFeatures' ? "garage_rpc") "serde") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "serde") [ "std" ] [ "time" ] [ "winapi" ] @@ -563,7 +574,7 @@ in libc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; num_integer = rustPackages."registry+https://github.com/rust-lang/crates.io-index".num-integer."0.1.44" { inherit profileName; }; num_traits = rustPackages."registry+https://github.com/rust-lang/crates.io-index".num-traits."0.2.14" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; time = rustPackages."registry+https://github.com/rust-lang/crates.io-index".time."0.1.44" { inherit profileName; }; ${ if hostPlatform.isWindows then "winapi" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".winapi."0.3.9" { inherit profileName; }; }; @@ -619,7 +630,7 @@ in registry = "registry+https://github.com/rust-lang/crates.io-index"; src = fetchCratesIo { inherit name version; sha256 = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b"; }; dependencies = { - ${ if hostPlatform.config == "aarch64-linux-android" || hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" || hostPlatform.config == "aarch64-apple-darwin" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; + ${ if hostPlatform.config == "aarch64-linux-android" || hostPlatform.config == "aarch64-apple-darwin" || hostPlatform.parsed.cpu.name == "aarch64" && hostPlatform.parsed.kernel.name == "linux" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; }; }); @@ -1178,6 +1189,10 @@ in version = "0.7.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/garage"); + features = builtins.concatLists [ + [ "k2v" ] + [ "kubernetes-discovery" ] + ]; dependencies = { async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }; bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; @@ -1206,11 +1221,14 @@ in tracing = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }; }; devDependencies = { + assert_json_diff = rustPackages."registry+https://github.com/rust-lang/crates.io-index".assert-json-diff."2.0.1" { inherit profileName; }; aws_sdk_s3 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".aws-sdk-s3."0.8.0" { inherit profileName; }; + base64 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }; chrono = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; }; hmac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; }; http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.6" { inherit profileName; }; hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }; + serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.79" { inherit profileName; }; sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.9" { inherit profileName; }; static_init = rustPackages."registry+https://github.com/rust-lang/crates.io-index".static_init."1.0.2" { inherit profileName; }; }; @@ -1241,41 +1259,46 @@ in version = "0.7.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/api"); - dependencies = { - base64 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }; - bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; - chrono = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; }; - crypto_mac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-mac."0.10.1" { inherit profileName; }; - err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }; - form_urlencoded = rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }; - futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }; - futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }; - garage_block = rustPackages."unknown".garage_block."0.7.0" { inherit profileName; }; - garage_model = rustPackages."unknown".garage_model."0.7.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.7.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.7.0" { inherit profileName; }; - hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; - hmac = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; }; - http = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.6" { inherit profileName; }; - http_range = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-range."0.1.5" { inherit profileName; }; - httpdate = rustPackages."registry+https://github.com/rust-lang/crates.io-index".httpdate."0.3.2" { inherit profileName; }; - hyper = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }; - idna = rustPackages."registry+https://github.com/rust-lang/crates.io-index".idna."0.2.3" { inherit profileName; }; - md5 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".md-5."0.9.1" { inherit profileName; }; - multer = rustPackages."registry+https://github.com/rust-lang/crates.io-index".multer."2.0.2" { inherit profileName; }; - nom = rustPackages."registry+https://github.com/rust-lang/crates.io-index".nom."7.1.1" { inherit profileName; }; - opentelemetry = rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }; - percent_encoding = rustPackages."registry+https://github.com/rust-lang/crates.io-index".percent-encoding."2.1.0" { inherit profileName; }; - pin_project = rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.0.10" { inherit profileName; }; - quick_xml = rustPackages."registry+https://github.com/rust-lang/crates.io-index".quick-xml."0.21.0" { inherit profileName; }; - roxmltree = rustPackages."registry+https://github.com/rust-lang/crates.io-index".roxmltree."0.14.1" { inherit profileName; }; - serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; - serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; - serde_json = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.79" { inherit profileName; }; - sha2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.9" { inherit profileName; }; - tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }; - tracing = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }; - url = rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.2.2" { inherit profileName; }; + features = builtins.concatLists [ + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_api") "k2v") + ]; + dependencies = { + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "async_trait" else null } = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "base64" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "bytes" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "chrono" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "crypto_mac" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".crypto-mac."0.10.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "err_derive" else null } = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "form_urlencoded" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "futures" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "futures_util" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "garage_block" else null } = rustPackages."unknown".garage_block."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "garage_model" else null } = rustPackages."unknown".garage_model."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "garage_rpc" else null } = rustPackages."unknown".garage_rpc."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "garage_table" else null } = rustPackages."unknown".garage_table."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "garage_util" else null } = rustPackages."unknown".garage_util."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "hex" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "hmac" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.10.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "http" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.6" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "http_range" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".http-range."0.1.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "httpdate" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".httpdate."0.3.2" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "hyper" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "idna" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".idna."0.2.3" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "md5" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".md-5."0.9.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "multer" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".multer."2.0.2" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "nom" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".nom."7.1.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "opentelemetry" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "percent_encoding" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".percent-encoding."2.1.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "pin_project" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project."1.0.10" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "quick_xml" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".quick-xml."0.21.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "roxmltree" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".roxmltree."0.14.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "serde_bytes" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "serde_json" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.79" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "sha2" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sha2."0.9.9" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "tokio" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "tracing" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_web" then "url" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".url."2.2.2" { inherit profileName; }; }; }); @@ -1336,28 +1359,33 @@ in version = "0.7.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/model"); - dependencies = { - arc_swap = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }; - async_trait = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }; - err_derive = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }; - futures = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }; - futures_util = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }; - garage_block = rustPackages."unknown".garage_block."0.7.0" { inherit profileName; }; - garage_model_050 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" { inherit profileName; }; - garage_rpc = rustPackages."unknown".garage_rpc."0.7.0" { inherit profileName; }; - garage_table = rustPackages."unknown".garage_table."0.7.0" { inherit profileName; }; - garage_util = rustPackages."unknown".garage_util."0.7.0" { inherit profileName; }; - hex = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; - netapp = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.4.4" { inherit profileName; }; - opentelemetry = rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }; - rand = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }; - rmp_serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; - serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; - serde_bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; - sled = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; - tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }; - tracing = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }; - zstd = rustPackages."registry+https://github.com/rust-lang/crates.io-index".zstd."0.9.2+zstd.1.5.1" { inherit profileName; }; + features = builtins.concatLists [ + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model") "k2v") + ]; + dependencies = { + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "arc_swap" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "async_trait" else null } = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "base64" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "blake2" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.9.2" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "err_derive" else null } = buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "futures" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "futures_util" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "garage_block" else null } = rustPackages."unknown".garage_block."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "garage_model_050" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".garage_model."0.5.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "garage_rpc" else null } = rustPackages."unknown".garage_rpc."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "garage_table" else null } = rustPackages."unknown".garage_table."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "garage_util" else null } = rustPackages."unknown".garage_util."0.7.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "hex" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "netapp" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.4.4" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "opentelemetry" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "rand" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "rmp_serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "serde_bytes" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "sled" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".sled."0.34.7" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "tokio" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "tracing" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tracing."0.1.32" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_web" then "zstd" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".zstd."0.9.2+zstd.1.5.1" { inherit profileName; }; }; }); @@ -1395,11 +1423,11 @@ in registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/rpc"); features = builtins.concatLists [ - (lib.optional (rootFeatures' ? "garage_rpc") "k8s-openapi") - (lib.optional (rootFeatures' ? "garage_rpc") "kube") - (lib.optional (rootFeatures' ? "garage_rpc") "kubernetes-discovery") - (lib.optional (rootFeatures' ? "garage_rpc") "openssl") - (lib.optional (rootFeatures' ? "garage_rpc") "schemars") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "k8s-openapi") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "kube") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "kubernetes-discovery") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "openssl") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "schemars") ]; dependencies = { ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "arc_swap" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }; @@ -1412,16 +1440,16 @@ in ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "gethostname" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "hex" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "hyper" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "k8s_openapi" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.13.1" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "kube" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.62.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "k8s_openapi" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.13.1" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "kube" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kube."0.62.0" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "sodiumoxide" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "netapp" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.4.4" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "openssl" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".openssl."0.10.38" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "openssl" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".openssl."0.10.38" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "opentelemetry" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "pnet_datalink" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "rand" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "rmp_serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "schemars" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "schemars" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "serde" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "serde_bytes" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }; ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web" then "serde_json" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_json."1.0.79" { inherit profileName; }; @@ -1510,6 +1538,9 @@ in version = "0.7.0"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/util"); + features = builtins.concatLists [ + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_util") "k2v") + ]; dependencies = { blake2 = rustPackages."registry+https://github.com/rust-lang/crates.io-index".blake2."0.9.2" { inherit profileName; }; chrono = rustPackages."registry+https://github.com/rust-lang/crates.io-index".chrono."0.4.19" { inherit profileName; }; @@ -2361,7 +2392,7 @@ in [ "os-poll" ] ]; dependencies = { - ${ if hostPlatform.parsed.kernel.name == "wasi" || hostPlatform.isUnix then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; + ${ if hostPlatform.isUnix || hostPlatform.parsed.kernel.name == "wasi" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }; ${ if hostPlatform.isWindows then "miow" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".miow."0.3.7" { inherit profileName; }; ${ if hostPlatform.isWindows then "ntapi" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".ntapi."0.3.7" { inherit profileName; }; @@ -3342,7 +3373,7 @@ in ]; dependencies = { ${ if hostPlatform.parsed.kernel.name == "android" || hostPlatform.parsed.kernel.name == "linux" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; - ${ if hostPlatform.parsed.kernel.name == "android" || hostPlatform.parsed.kernel.name == "linux" || hostPlatform.parsed.kernel.name == "dragonfly" || hostPlatform.parsed.kernel.name == "freebsd" || hostPlatform.parsed.kernel.name == "illumos" || hostPlatform.parsed.kernel.name == "netbsd" || hostPlatform.parsed.kernel.name == "openbsd" || hostPlatform.parsed.kernel.name == "solaris" then "once_cell" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.10.0" { inherit profileName; }; + ${ if hostPlatform.parsed.kernel.name == "dragonfly" || hostPlatform.parsed.kernel.name == "freebsd" || hostPlatform.parsed.kernel.name == "illumos" || hostPlatform.parsed.kernel.name == "netbsd" || hostPlatform.parsed.kernel.name == "openbsd" || hostPlatform.parsed.kernel.name == "solaris" || hostPlatform.parsed.kernel.name == "android" || hostPlatform.parsed.kernel.name == "linux" then "once_cell" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".once_cell."1.10.0" { inherit profileName; }; ${ if hostPlatform.parsed.cpu.name == "i686" || hostPlatform.parsed.cpu.name == "x86_64" || (hostPlatform.parsed.cpu.name == "aarch64" || hostPlatform.parsed.cpu.name == "armv6l" || hostPlatform.parsed.cpu.name == "armv7l") && (hostPlatform.parsed.kernel.name == "android" || hostPlatform.parsed.kernel.name == "fuchsia" || hostPlatform.parsed.kernel.name == "linux") then "spin" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".spin."0.5.2" { inherit profileName; }; untrusted = rustPackages."registry+https://github.com/rust-lang/crates.io-index".untrusted."0.7.1" { inherit profileName; }; ${ if hostPlatform.parsed.cpu.name == "wasm32" && hostPlatform.parsed.vendor.name == "unknown" && hostPlatform.parsed.kernel.name == "unknown" && hostPlatform.parsed.abi.name == "" then "web_sys" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".web-sys."0.3.56" { inherit profileName; }; @@ -3556,12 +3587,12 @@ in registry = "registry+https://github.com/rust-lang/crates.io-index"; src = fetchCratesIo { inherit name version; sha256 = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556"; }; features = builtins.concatLists [ - (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "OSX_10_9") - (lib.optional (rootFeatures' ? "garage_rpc") "default") + [ "OSX_10_9" ] + [ "default" ] ]; dependencies = { - ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "core_foundation_sys" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".core-foundation-sys."0.8.3" { inherit profileName; }; - ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "libc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; + core_foundation_sys = rustPackages."registry+https://github.com/rust-lang/crates.io-index".core-foundation-sys."0.8.3" { inherit profileName; }; + libc = rustPackages."registry+https://github.com/rust-lang/crates.io-index".libc."0.2.121" { inherit profileName; }; }; }); @@ -3652,12 +3683,12 @@ in src = fetchCratesIo { inherit name version; sha256 = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95"; }; features = builtins.concatLists [ [ "default" ] - (lib.optional (rootFeatures' ? "garage_rpc") "indexmap") - (lib.optional (rootFeatures' ? "garage_rpc") "preserve_order") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "indexmap") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "preserve_order") [ "std" ] ]; dependencies = { - ${ if rootFeatures' ? "garage_rpc" then "indexmap" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".indexmap."1.8.0" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "indexmap" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".indexmap."1.8.0" { inherit profileName; }; itoa = rustPackages."registry+https://github.com/rust-lang/crates.io-index".itoa."1.0.1" { inherit profileName; }; ryu = rustPackages."registry+https://github.com/rust-lang/crates.io-index".ryu."1.0.9" { inherit profileName; }; serde = rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.136" { inherit profileName; }; @@ -4157,8 +4188,8 @@ in (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_admin" || rootFeatures' ? "garage_api" || rootFeatures' ? "garage_block" || rootFeatures' ? "garage_model" || rootFeatures' ? "garage_rpc" || rootFeatures' ? "garage_table" || rootFeatures' ? "garage_web") "default") [ "futures-io" ] (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "io") - (lib.optional (rootFeatures' ? "garage_rpc") "slab") - (lib.optional (rootFeatures' ? "garage_rpc") "time") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "slab") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "time") ]; dependencies = { bytes = rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.1.0" { inherit profileName; }; @@ -4167,7 +4198,7 @@ in futures_sink = rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-sink."0.3.21" { inherit profileName; }; log = rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }; pin_project_lite = rustPackages."registry+https://github.com/rust-lang/crates.io-index".pin-project-lite."0.2.8" { inherit profileName; }; - ${ if rootFeatures' ? "garage_rpc" then "slab" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".slab."0.4.5" { inherit profileName; }; + ${ if rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc" then "slab" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".slab."0.4.5" { inherit profileName; }; tokio = rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }; }; }); @@ -4708,7 +4739,7 @@ in [ "in6addr" ] [ "inaddr" ] [ "ioapiset" ] - (lib.optional (rootFeatures' ? "garage_rpc") "knownfolders") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "knownfolders") (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "lmcons") (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "minschannel") [ "minwinbase" ] @@ -4718,13 +4749,13 @@ in [ "ntdef" ] [ "ntsecapi" ] [ "ntstatus" ] - (lib.optional (rootFeatures' ? "garage_rpc") "objbase") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "objbase") [ "processenv" ] [ "processthreadsapi" ] [ "profileapi" ] (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "schannel") (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "securitybaseapi") - (lib.optional (rootFeatures' ? "garage_rpc") "shlobj") + (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "shlobj") (lib.optional (rootFeatures' ? "garage" || rootFeatures' ? "garage_rpc") "sspi") [ "std" ] [ "synchapi" ] @@ -4792,8 +4823,8 @@ in ${ if hostPlatform.config == "aarch64-pc-windows-msvc" || hostPlatform.config == "aarch64-uwp-windows-msvc" then "windows_aarch64_msvc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_aarch64_msvc."0.32.0" { inherit profileName; }; ${ if hostPlatform.config == "i686-uwp-windows-gnu" || hostPlatform.config == "i686-pc-windows-gnu" then "windows_i686_gnu" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_i686_gnu."0.32.0" { inherit profileName; }; ${ if hostPlatform.config == "i686-pc-windows-msvc" || hostPlatform.config == "i686-uwp-windows-msvc" then "windows_i686_msvc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_i686_msvc."0.32.0" { inherit profileName; }; - ${ if hostPlatform.config == "x86_64-uwp-windows-gnu" || hostPlatform.config == "x86_64-pc-windows-gnu" then "windows_x86_64_gnu" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_x86_64_gnu."0.32.0" { inherit profileName; }; - ${ if hostPlatform.config == "x86_64-uwp-windows-msvc" || hostPlatform.config == "x86_64-pc-windows-msvc" then "windows_x86_64_msvc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_x86_64_msvc."0.32.0" { inherit profileName; }; + ${ if hostPlatform.config == "x86_64-pc-windows-gnu" || hostPlatform.config == "x86_64-uwp-windows-gnu" then "windows_x86_64_gnu" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_x86_64_gnu."0.32.0" { inherit profileName; }; + ${ if hostPlatform.config == "x86_64-pc-windows-msvc" || hostPlatform.config == "x86_64-uwp-windows-msvc" then "windows_x86_64_msvc" else null } = rustPackages."registry+https://github.com/rust-lang/crates.io-index".windows_x86_64_msvc."0.32.0" { inherit profileName; }; }; }); diff --git a/Makefile b/Makefile index c0ebc075..c70be9da 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ .PHONY: doc all release shell all: - clear; cargo build + clear; cargo build --features k2v doc: cd doc/book; mdbook build diff --git a/doc/drafts/k2v-spec.md b/doc/drafts/k2v-spec.md new file mode 100644 index 00000000..08809069 --- /dev/null +++ b/doc/drafts/k2v-spec.md @@ -0,0 +1,680 @@ +# Specification of the Garage K2V API (K2V = Key/Key/Value) + +- We are storing triplets of the form `(partition key, sort key, value)` -> no + user-defined fields, the client is responsible of writing whatever he wants + in the value (typically an encrypted blob). Values are binary blobs, which + are always represented as their base64 encoding in the JSON API. Partition + keys and sort keys are utf8 strings. + +- Triplets are stored in buckets; each bucket stores a separate set of triplets + +- Bucket names and access keys are the same as for accessing the S3 API + +- K2V triplets exist separately from S3 objects. K2V triplets don't exist for + the S3 API, and S3 objects don't exist for the K2V API. + +- Values stored for triplets have associated causality information, that enables + Garage to detect concurrent writes. In case of concurrent writes, Garage + keeps the concurrent values until a further write supersedes the concurrent + values. This is the same method as Riak KV implements. The method used is + based on DVVS (dotted version vector sets), described in the paper "Scalable + and Accurate Causality Tracking for Eventually Consistent Data Stores", as + well as [here](https://github.com/ricardobcl/Dotted-Version-Vectors) + + +## Data format + +### Triple format + +Triples in K2V are constituted of three fields: + +- a partition key (`pk`), an utf8 string that defines in what partition the + triplet is stored; triplets in different partitions cannot be listed together + in a ReadBatch command, or deleted together in a DeleteBatch command: a + separate command must be included in the ReadBatch/DeleteBatch call for each + partition key in which the client wants to read/delete lists of items + +- a sort key (`sk`), an utf8 string that defines the index of the triplet inside its + partition; triplets are uniquely idendified by their partition key + sort key + +- a value (`v`), an opaque binary blob associated to the partition key + sort key; + they are transmitted as binary when possible but in most case in the JSON API + they will be represented as strings using base64 encoding; a value can also + be `null` to indicate a deleted triplet (a `null` value is called a tombstone) + +### Causality information + +K2V supports storing several concurrent values associated to a pk+sk, in the +case where insertion or deletion operations are detected to be concurrent (i.e. +there is not one that was aware of the other, they are not causally dependant +one on the other). In practice, it even looks more like the opposite: to +overwrite a previously existing value, the client must give a "causality token" +that "proves" (not in a cryptographic sense) that it had seen a previous value. +Otherwise, the value written will not overwrite an existing value, it will just +create a new concurrent value. + +The causality token is a binary/b64-encoded representation of a context, +specified below. + +A set of concurrent values looks like this: + +``` +(node1, tdiscard1, (v1, t1), (v2, t2)) ; tdiscard1 < t1 < t2 +(node2, tdiscard2, (v3, t3) ; tdiscard2 < t3 +``` + +`tdiscard` for a node `i` means that all values inserted by node `i` with times +`<= tdiscard` are obsoleted, i.e. have been read by a client that overwrote it +afterwards. + +The associated context would be the following: `[(node1, t2), (node2, t3)]`, +i.e. if a node reads this set of values and inserts a new values, we will now +have `tdiscard1 = t2` and `tdiscard2 = t3`, to indicate that values v1, v2 and v3 +are obsoleted by the new write. + +**Basic insertion.** To insert a new value `v4` with context `[(node1, t2), (node2, t3)]`, in a +simple case where there was no insertion in-between reading the value +mentionned above and writing `v4`, and supposing that node2 receives the +InsertItem query: + +- `node2` generates a timestamp `t4` such that `t4 > t3`. +- the new state is as follows: + +``` +(node1, tdiscard1', ()) ; tdiscard1' = t2 +(node2, tdiscard2', (v4, t4)) ; tdiscard2' = t3 +``` + +**A more complex insertion example.** In the general case, other intermediate values could have +been written before `v4` with context `[(node1, t2), (node2, t3)]` is sent to the system. +For instance, here is a possible sequence of events: + +1. First we have the set of values v1, v2 and v3 described above. + A node reads it, it obtains values v1, v2 and v3 with context `[(node1, t2), (node2, t3)]`. + +2. A node writes a value `v5` with context `[(node1, t1)]`, i.e. `v5` is only a + successor of v1 but not of v2 or v3. Suppose node1 receives the write, it + will generate a new timestamp `t5` larger than all of the timestamps it + knows of, i.e. `t5 > t2`. We will now have: + +``` +(node1, tdiscard1'', (v2, t2), (v5, t5)) ; tdiscard1'' = t1 < t2 < t5 +(node2, tdiscard2, (v3, t3) ; tdiscard2 < t3 +``` + +3. Now `v4` is written with context `[(node1, t2), (node2, t3)]`, and node2 + processes the query. It will generate `t4 > t3` and the state will become: + +``` +(node1, tdiscard1', (v5, t5)) ; tdiscard1' = t2 < t5 +(node2, tdiscard2', (v4, t4)) ; tdiscard2' = t3 +``` + +**Generic algorithm for handling insertions:** A certain node n handles the +InsertItem and is responsible for the correctness of this procedure. + +1. Lock the key (or the whole table?) at this node to prevent concurrent updates of the value that would mess things up +2. Read current set of values +3. Generate a new timestamp that is larger than the largest timestamp for node n +4. Add the inserted value in the list of values of node n +5. Update the discard times to be the times set in the context, and accordingly discard overwritten values +6. Release lock +7. Propagate updated value to other nodes +8. Return to user when propagation achieved the write quorum (propagation to other nodes continues asynchronously) + +**Encoding of contexts:** + +Contexts consist in a list of (node id, timestamp) pairs. +They are encoded in binary as follows: + +``` +checksum: u64, [ node: u64, timestamp: u64 ]* +``` + +The checksum is just the XOR of all of the node IDs and timestamps. + +Once encoded in binary, contexts are written and transmitted in base64. + + +### Indexing + +K2V keeps an index, a secondary data structure that is updated asynchronously, +that keeps tracks of the number of triplets stored for each partition key. +This allows easy listing of all of the partition keys for which triplets exist +in a bucket, as the partition key becomes the sort key in the index. + +How indexing works: + +- Each node keeps a local count of how many items it stores for each partition, + in a local Sled tree that is updated atomically when an item is modified. +- These local counters are asynchronously stored in the index table which is + a regular Garage table spread in the network. Counters are stored as LWW values, + so basically the final table will have the following structure: + +``` +- pk: bucket +- sk: partition key for which we are counting +- v: lwwmap (node id -> number of items) +``` + +The final number of items present in the partition can be estimated by taking +the maximum of the values (i.e. the value for the node that announces having +the most items for that partition). In most cases the values for different node +IDs should all be the same; more precisely, three node IDs should map to the +same non-zero value, and all other node IDs that are present are tombstones +that map to zeroes. Note that we need to filter out values from nodes that are +no longer part of the cluster layout, as when nodes are removed they won't +necessarily have had the time to set their counters to zero. + +## Important details + +**THIS SECTION CONTAINS A FEW WARNINGS ON THE K2V API WHICH ARE IMPORTANT +TO UNDERSTAND IN ORDER TO USE IT CORRECTLY.** + +- **Internal server errors on updates do not mean that the update isn't stored.** + K2V will return an internal server error when it cannot reach a quorum of nodes on + which to save an updated value. However the value may still be stored on just one + node, which will then propagate it to other nodes asynchronously via anti-entropy. + +- **Batch operations are not transactions.** When calling InsertBatch or DeleteBatch, + items may appear partially inserted/deleted while the operation is being processed. + More importantly, if InsertBatch or DeleteBatch returns an internal server error, + some of the items to be inserted/deleted might end up inserted/deleted on the server, + while others may still have their old value. + +- **Concurrent values are deduplicated.** When inserting a value for a key, + Garage might internally end up + storing the value several times if there are network errors. These values will end up as + concurrent values for a key, with the same byte string (or `null` for a deletion). + Garage fixes this by deduplicating concurrent values when they are returned to the + user on read operations. Importantly, *Garage does not differentiate between duplicate + concurrent values due to the user making the same call twice, or Garage having to + do an internal retry*. This means that all duplicate concurrent values are deduplicated + when an item is read: if the user inserts twice concurrently the same value, they will + only read it once. + +## API Endpoints + +### Operations on single items + +**ReadItem: `GET //?sort_key=`** + + +Query parameters: + +| name | default value | meaning | +| - | - | - | +| `sort_key` | **mandatory** | The sort key of the item to read | + +Returns the item with specified partition key and sort key. Values can be +returned in either of two ways: + +1. a JSON array of base64-encoded values, or `null`'s for tombstones, with + header `Content-Type: application/json` + +2. in the case where there are no concurrent values, the single present value + can be returned directly as the response body (or an HTTP 204 NO CONTENT for + a tombstone), with header `Content-Type: application/octet-stream` + +The choice between return formats 1 and 2 is directed by the `Accept` HTTP header: + +- if the `Accept` header is not present, format 1 is always used + +- if `Accept` contains `application/json` but not `application/octet-stream`, + format 1 is always used + +- if `Accept` contains `application/octet-stream` but not `application/json`, + format 2 is used when there is a single value, and an HTTP error 409 (HTTP + 409 CONFLICT) is returned in the case of multiple concurrent values + (including concurrent tombstones) + +- if `Accept` contains both, format 2 is used when there is a single value, and + format 1 is used as a fallback in case of concurrent values + +- if `Accept` contains none, HTTP 406 NOT ACCEPTABLE is raised + +Example query: + +``` +GET /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1 +``` + +Example response: + +```json +HTTP/1.1 200 OK +X-Garage-Causality-Token: opaquetoken123 +Content-Type: application/json + +[ + "b64cryptoblob123", + "b64cryptoblob'123" +] +``` + +Example response in case the item is a tombstone: + +``` +HTTP/1.1 200 OK +X-Garage-Causality-Token: opaquetoken999 +Content-Type: application/json + +[ + null +] +``` + +Example query 2: + +``` +GET /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1 +Accept: application/octet-stream +``` + +Example response if multiple concurrent versions exist: + +``` +HTTP/1.1 409 CONFLICT +X-Garage-Causality-Token: opaquetoken123 +Content-Type: application/octet-stream +``` + +Example response in case of single value: + +``` +HTTP/1.1 200 OK +X-Garage-Causality-Token: opaquetoken123 +Content-Type: application/octet-stream + +cryptoblob123 +``` + +Example response in case of a single value that is a tombstone: + +``` +HTTP/1.1 204 NO CONTENT +X-Garage-Causality-Token: opaquetoken123 +Content-Type: application/octet-stream +``` + + +**PollItem: `GET //?sort_key=&causality_token=`** + +This endpoint will block until a new value is written to a key. + +The GET parameter `causality_token` should be set to the causality +token returned with the last read of the key, so that K2V knows +what values are concurrent or newer than the ones that the +client previously knew. + +This endpoint returns the new value in the same format as ReadItem. +If no new value is written and the timeout elapses, +an HTTP 304 NOT MODIFIED is returned. + +Query parameters: + +| name | default value | meaning | +| - | - | - | +| `sort_key` | **mandatory** | The sort key of the item to read | +| `causality_token` | **mandatory** | The causality token of the last known value or set of values | +| `timeout` | 300 | The timeout before 304 NOT MODIFIED is returned if the value isn't updated | + +The timeout can be set to any number of seconds, with a maximum of 600 seconds (10 minutes). + + +**InsertItem: `PUT //?sort_key=`** + +Inserts a single item. This request does not use JSON, the body is sent directly as a binary blob. + +To supersede previous values, the HTTP header `X-Garage-Causality-Token` should +be set to the causality token returned by a previous read on this key. This +header can be ommitted for the first writes to the key. + +Example query: + +``` +PUT /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1 +X-Garage-Causality-Token: opaquetoken123 + +myblobblahblahblah +``` + +Example response: + +``` +HTTP/1.1 200 OK +``` + +**DeleteItem: `DELETE //?sort_key=`** + +Deletes a single item. The HTTP header `X-Garage-Causality-Token` must be set +to the causality token returned by a previous read on this key, to indicate +which versions of the value should be deleted. The request will not process if +`X-Garage-Causality-Token` is not set. + +Example query: + +``` +DELETE /my_bucket/mailboxes?sort_key=INBOX HTTP/1.1 +X-Garage-Causality-Token: opaquetoken123 +``` + +Example response: + +``` +HTTP/1.1 204 NO CONTENT +``` + +### Operations on index + +**ReadIndex: `GET /?start=&end=&limit=`** + +Lists all partition keys in the bucket for which some triplets exist, and gives +for each the number of triplets (or an approximation thereof, this value is + asynchronously updated, and thus eventually consistent). + +Query parameters: + +| name | default value | meaning | +| - | - | - | +| `prefix` | `null` | Restrict listing to partition keys that start with this prefix | +| `start` | `null` | First partition key to list, in lexicographical order | +| `end` | `null` | Last partition key to list (excluded) | +| `limit` | `null` | Maximum number of partition keys to list | +| `reverse` | `false` | Iterate in reverse lexicographical order | + +The response consists in a JSON object that repeats the parameters of the query and gives the result (see below). + +The listing starts at partition key `start`, or if not specified at the +smallest partition key that exists. It returns partition keys in increasing +order, or decreasing order if `reverse` is set to `true`, +and stops when either of the following conditions is met: + +1. if `end` is specfied, the partition key `end` is reached or surpassed (if it + is reached exactly, it is not included in the result) + +2. if `limit` is specified, `limit` partition keys have been listed + +3. no more partition keys are available to list + +In case 2, and if there are more partition keys to list before condition 1 +triggers, then in the result `more` is set to `true` and `nextStart` is set to +the first partition key that couldn't be listed due to the limit. In the first +case (if the listing stopped because of the `end` parameter), `more` is not set +and the `nextStart` key is not specified. + +Note that if `reverse` is set to `true`, `start` is the highest key +(in lexicographical order) for which values are returned. +This means that if an `end` is specified, it must be smaller than `start`, +otherwise no values will be returned. + +Example query: + +``` +GET /my_bucket HTTP/1.1 +``` + +Example response: + +```json +HTTP/1.1 200 OK + +{ + prefix: null, + start: null, + end: null, + limit: null, + reverse: false, + partitionKeys: [ + { pk: "keys", n: 3043 }, + { pk: "mailbox:INBOX", n: 42 }, + { pk: "mailbox:Junk", n: 2991 }, + { pk: "mailbox:Trash", n: 10 }, + { pk: "mailboxes", n: 3 }, + ], + more: false, + nextStart: null, +} +``` + + +### Operations on batches of items + +**InsertBatch: `POST /`** + +Simple insertion and deletion of triplets. The body is just a list of items to +insert in the following format: +`{ pk: "", sk: "", ct: ""|null, v: ""|null }`. + +The causality token should be the one returned in a previous read request (e.g. +by ReadItem or ReadBatch), to indicate that this write takes into account the +values that were returned from these reads, and supersedes them causally. If +the triplet is inserted for the first time, the causality token should be set to +`null`. + +The value is expected to be a base64-encoded binary blob. The value `null` can +also be used to delete the triplet while preserving causality information: this +allows to know if a delete has happenned concurrently with an insert, in which +case both are preserved and returned on reads (see below). + +Partition keys and sort keys are utf8 strings which are stored sorted by +lexicographical ordering of their binary representation. + +Example query: + +```json +POST /my_bucket HTTP/1.1 + +[ + { pk: "mailbox:INBOX", sk: "001892831", ct: "opaquetoken321", v: "b64cryptoblob321updated" }, + { pk: "mailbox:INBOX", sk: "001892912", ct: null, v: "b64cryptoblob444" }, + { pk: "mailbox:INBOX", sk: "001892932", ct: "opaquetoken654", v: null }, +] +``` + +Example response: + +``` +HTTP/1.1 200 OK +``` + + +**ReadBatch: `POST /?search`**, or alternatively
+**ReadBatch: `SEARCH /`** + +Batch read of triplets in a bucket. + +The request body is a JSON list of searches, that each specify a range of +items to get (to get single items, set `singleItem` to `true`). A search is a +JSON struct with the following fields: + +| name | default value | meaning | +| - | - | - | +| `partitionKey` | **mandatory** | The partition key in which to search | +| `prefix` | `null` | Restrict items to list to those whose sort keys start with this prefix | +| `start` | `null` | The sort key of the first item to read | +| `end` | `null` | The sort key of the last item to read (excluded) | +| `limit` | `null` | The maximum number of items to return | +| `reverse` | `false` | Iterate in reverse lexicographical order on sort keys | +| `singleItem` | `false` | Whether to return only the item with sort key `start` | +| `conflictsOnly` | `false` | Whether to return only items that have several concurrent values | +| `tombstones` | `false` | Whether or not to return tombstone lines to indicate the presence of old deleted items | + + +For each of the searches, triplets are listed and returned separately. The +semantics of `prefix`, `start`, `end`, `limit` and `reverse` are the same as for ReadIndex. The +additionnal parameter `singleItem` allows to get a single item, whose sort key +is the one given in `start`. Parameters `conflictsOnly` and `tombstones` +control additional filters on the items that are returned. + +The result is a list of length the number of searches, that consists in for +each search a JSON object specified similarly to the result of ReadIndex, but +that lists triplets within a partition key. + +The format of returned tuples is as follows: `{ sk: "", ct: "", v: ["", ...] }`, with the following fields: + +- `sk` (sort key): any unicode string used as a sort key + +- `ct` (causality token): an opaque token served by the server (generally + base64-encoded) to be used in subsequent writes to this key + +- `v` (list of values): each value is a binary blob, always base64-encoded; + contains multiple items when concurrent values exists + +- in case of concurrent update and deletion, a `null` is added to the list of concurrent values + +- if the `tombstones` query parameter is set to `true`, tombstones are returned + for items that have been deleted (this can be usefull for inserting after an + item that has been deleted, so that the insert is not considered + concurrent with the delete). Tombstones are returned as tuples in the + same format with only `null` values + +Example query: + +```json +POST /my_bucket?search HTTP/1.1 + +[ + { + partitionKey: "mailboxes", + }, + { + partitionKey: "mailbox:INBOX", + start: "001892831", + limit: 3, + }, + { + partitionKey: "keys", + start: "0", + singleItem: true, + }, +] +``` + +Example associated response body: + +```json +HTTP/1.1 200 OK + +[ + { + partitionKey: "mailboxes", + prefix: null, + start: null, + end: null, + limit: null, + reverse: false, + conflictsOnly: false, + tombstones: false, + singleItem: false, + items: [ + { sk: "INBOX", ct: "opaquetoken123", v: ["b64cryptoblob123", "b64cryptoblob'123"] }, + { sk: "Trash", ct: "opaquetoken456", v: ["b64cryptoblob456"] }, + { sk: "Junk", ct: "opaquetoken789", v: ["b64cryptoblob789"] }, + ], + more: false, + nextStart: null, + }, + { + partitionKey: "mailbox::INBOX", + prefix: null, + start: "001892831", + end: null, + limit: 3, + reverse: false, + conflictsOnly: false, + tombstones: false, + singleItem: false, + items: [ + { sk: "001892831", ct: "opaquetoken321", v: ["b64cryptoblob321"] }, + { sk: "001892832", ct: "opaquetoken654", v: ["b64cryptoblob654"] }, + { sk: "001892874", ct: "opaquetoken987", v: ["b64cryptoblob987"] }, + ], + more: true, + nextStart: "001892898", + }, + { + partitionKey: "keys", + prefix: null, + start: "0", + end: null, + conflictsOnly: false, + tombstones: false, + limit: null, + reverse: false, + singleItem: true, + items: [ + { sk: "0", ct: "opaquetoken999", v: ["b64binarystuff999"] }, + ], + more: false, + nextStart: null, + }, +] +``` + + + +**DeleteBatch: `POST /?delete`** + +Batch deletion of triplets. The request format is the same for `POST +/?search` to indicate items or range of items, except that here they +are deleted instead of returned, but only the fields `partitionKey`, `prefix`, `start`, +`end`, and `singleItem` are supported. Causality information is not given by +the user: this request will internally list all triplets and write deletion +markers that supersede all of the versions that have been read. + +This request returns for each series of items to be deleted, the number of +matching items that have been found and deleted. + +Example query: + +```json +POST /my_bucket?delete HTTP/1.1 + +[ + { + partitionKey: "mailbox:OldMailbox", + }, + { + partitionKey: "mailbox:INBOX", + start: "0018928321", + singleItem: true, + }, +] +``` + +Example response: + +``` +HTTP/1.1 200 OK + +[ + { + partitionKey: "mailbox:OldMailbox", + prefix: null, + start: null, + end: null, + singleItem: false, + deletedItems: 35, + }, + { + partitionKey: "mailbox:INBOX", + prefix: null, + start: "0018928321", + end: null, + singleItem: true, + deletedItems: 1, + }, +] +``` + + +## Internals: causality tokens + +The method used is based on DVVS (dotted version vector sets). See: + +- the paper "Scalable and Accurate Causality Tracking for Eventually Consistent Data Stores" +- + +For DVVS to work, write operations (at each node) must take a lock on the data table. diff --git a/k2v_test.py b/k2v_test.py new file mode 100755 index 00000000..3219056e --- /dev/null +++ b/k2v_test.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python + +import os +import requests +from datetime import datetime + +# let's talk to our AWS Elasticsearch cluster +#from requests_aws4auth import AWS4Auth +#auth = AWS4Auth('GK31c2f218a2e44f485b94239e', +# 'b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835', +# 'us-east-1', +# 's3') + +from aws_requests_auth.aws_auth import AWSRequestsAuth +auth = AWSRequestsAuth(aws_access_key='GK31c2f218a2e44f485b94239e', + aws_secret_access_key='b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835', + aws_host='localhost:3812', + aws_region='us-east-1', + aws_service='k2v') + + +print("-- ReadIndex") +response = requests.get('http://localhost:3812/alex', + auth=auth) +print(response.headers) +print(response.text) + + +sort_keys = ["a", "b", "c", "d"] + +for sk in sort_keys: + print("-- (%s) Put initial (no CT)"%sk) + response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth, + data='{}: Hello, world!'.format(datetime.timestamp(datetime.now()))) + print(response.headers) + print(response.text) + + print("-- Get") + response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth) + print(response.headers) + print(response.text) + ct = response.headers["x-garage-causality-token"] + + print("-- ReadIndex") + response = requests.get('http://localhost:3812/alex', + auth=auth) + print(response.headers) + print(response.text) + + print("-- Put with CT") + response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth, + headers={'x-garage-causality-token': ct}, + data='{}: Good bye, world!'.format(datetime.timestamp(datetime.now()))) + print(response.headers) + print(response.text) + + print("-- Get") + response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth) + print(response.headers) + print(response.text) + + print("-- Put again with same CT (concurrent)") + response = requests.put('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth, + headers={'x-garage-causality-token': ct}, + data='{}: Concurrent value, oops'.format(datetime.timestamp(datetime.now()))) + print(response.headers) + print(response.text) + +for sk in sort_keys: + print("-- (%s) Get"%sk) + response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth) + print(response.headers) + print(response.text) + ct = response.headers["x-garage-causality-token"] + + print("-- Delete") + response = requests.delete('http://localhost:3812/alex/root?sort_key=%s'%sk, + headers={'x-garage-causality-token': ct}, + auth=auth) + print(response.headers) + print(response.text) + +print("-- ReadIndex") +response = requests.get('http://localhost:3812/alex', + auth=auth) +print(response.headers) +print(response.text) + +print("-- InsertBatch") +response = requests.post('http://localhost:3812/alex', + auth=auth, + data=''' +[ + {"pk": "root", "sk": "a", "ct": null, "v": "aW5pdGlhbCB0ZXN0Cg=="}, + {"pk": "root", "sk": "b", "ct": null, "v": "aW5pdGlhbCB0ZXN1Cg=="}, + {"pk": "root", "sk": "c", "ct": null, "v": "aW5pdGlhbCB0ZXN2Cg=="} +] +''') +print(response.headers) +print(response.text) + +print("-- ReadIndex") +response = requests.get('http://localhost:3812/alex', + auth=auth) +print(response.headers) +print(response.text) + +for sk in sort_keys: + print("-- (%s) Get"%sk) + response = requests.get('http://localhost:3812/alex/root?sort_key=%s'%sk, + auth=auth) + print(response.headers) + print(response.text) + ct = response.headers["x-garage-causality-token"] + +print("-- ReadBatch") +response = requests.post('http://localhost:3812/alex?search', + auth=auth, + data=''' +[ + {"partitionKey": "root"}, + {"partitionKey": "root", "tombstones": true}, + {"partitionKey": "root", "tombstones": true, "limit": 2}, + {"partitionKey": "root", "start": "c", "singleItem": true}, + {"partitionKey": "root", "start": "b", "end": "d", "tombstones": true} +] +''') +print(response.headers) +print(response.text) + + +print("-- DeleteBatch") +response = requests.post('http://localhost:3812/alex?delete', + auth=auth, + data=''' +[ + {"partitionKey": "root", "start": "b", "end": "c"} +] +''') +print(response.headers) +print(response.text) + +print("-- ReadBatch") +response = requests.post('http://localhost:3812/alex?search', + auth=auth, + data=''' +[ + {"partitionKey": "root"} +] +''') +print(response.headers) +print(response.text) diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 5e96b081..29b26e5e 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -18,7 +18,9 @@ garage_model = { version = "0.7.0", path = "../model" } garage_table = { version = "0.7.0", path = "../table" } garage_block = { version = "0.7.0", path = "../block" } garage_util = { version = "0.7.0", path = "../util" } +garage_rpc = { version = "0.7.0", path = "../rpc" } +async-trait = "0.1.7" base64 = "0.13" bytes = "1.0" chrono = "0.4" @@ -52,3 +54,6 @@ quick-xml = { version = "0.21", features = [ "serialize" ] } url = "2.1" opentelemetry = "0.17" + +[features] +k2v = [ "garage_util/k2v", "garage_model/k2v" ] diff --git a/src/api/api_server.rs b/src/api/api_server.rs deleted file mode 100644 index e7b86d9e..00000000 --- a/src/api/api_server.rs +++ /dev/null @@ -1,645 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use chrono::{DateTime, NaiveDateTime, Utc}; -use futures::future::Future; -use futures::prelude::*; -use hyper::header; -use hyper::server::conn::AddrStream; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Method, Request, Response, Server}; - -use opentelemetry::{ - global, - metrics::{Counter, ValueRecorder}, - trace::{FutureExt, TraceContextExt, Tracer}, - Context, KeyValue, -}; - -use garage_util::data::*; -use garage_util::error::Error as GarageError; -use garage_util::metrics::{gen_trace_id, RecordDuration}; - -use garage_model::garage::Garage; -use garage_model::key_table::Key; - -use garage_table::util::*; - -use crate::error::*; -use crate::signature::compute_scope; -use crate::signature::payload::check_payload_signature; -use crate::signature::streaming::SignedPayloadStream; -use crate::signature::LONG_DATETIME; - -use crate::helpers::*; -use crate::s3_bucket::*; -use crate::s3_copy::*; -use crate::s3_cors::*; -use crate::s3_delete::*; -use crate::s3_get::*; -use crate::s3_list::*; -use crate::s3_post_object::handle_post_object; -use crate::s3_put::*; -use crate::s3_router::{Authorization, Endpoint}; -use crate::s3_website::*; - -struct ApiMetrics { - request_counter: Counter, - error_counter: Counter, - request_duration: ValueRecorder, -} - -impl ApiMetrics { - fn new() -> Self { - let meter = global::meter("garage/api"); - Self { - request_counter: meter - .u64_counter("api.request_counter") - .with_description("Number of API calls to the various S3 API endpoints") - .init(), - error_counter: meter - .u64_counter("api.error_counter") - .with_description( - "Number of API calls to the various S3 API endpoints that resulted in errors", - ) - .init(), - request_duration: meter - .f64_value_recorder("api.request_duration") - .with_description("Duration of API calls to the various S3 API endpoints") - .init(), - } - } -} - -/// Run the S3 API server -pub async fn run_api_server( - garage: Arc, - shutdown_signal: impl Future, -) -> Result<(), GarageError> { - let addr = &garage.config.s3_api.api_bind_addr; - - let metrics = Arc::new(ApiMetrics::new()); - - let service = make_service_fn(|conn: &AddrStream| { - let garage = garage.clone(); - let metrics = metrics.clone(); - - let client_addr = conn.remote_addr(); - async move { - Ok::<_, GarageError>(service_fn(move |req: Request| { - let garage = garage.clone(); - let metrics = metrics.clone(); - - handler(garage, metrics, req, client_addr) - })) - } - }); - - let server = Server::bind(addr).serve(service); - - let graceful = server.with_graceful_shutdown(shutdown_signal); - info!("API server listening on http://{}", addr); - - graceful.await?; - Ok(()) -} - -async fn handler( - garage: Arc, - metrics: Arc, - req: Request, - addr: SocketAddr, -) -> Result, GarageError> { - let uri = req.uri().clone(); - info!("{} {} {}", addr, req.method(), uri); - debug!("{:?}", req); - - let tracer = opentelemetry::global::tracer("garage"); - let span = tracer - .span_builder("S3 API call (unknown)") - .with_trace_id(gen_trace_id()) - .with_attributes(vec![ - KeyValue::new("method", format!("{}", req.method())), - KeyValue::new("uri", req.uri().to_string()), - ]) - .start(&tracer); - - let res = handler_stage2(garage.clone(), metrics, req) - .with_context(Context::current_with_span(span)) - .await; - - match res { - Ok(x) => { - debug!("{} {:?}", x.status(), x.headers()); - Ok(x) - } - Err(e) => { - let body: Body = Body::from(e.aws_xml(&garage.config.s3_api.s3_region, uri.path())); - let mut http_error_builder = Response::builder() - .status(e.http_status_code()) - .header("Content-Type", "application/xml"); - - if let Some(header_map) = http_error_builder.headers_mut() { - e.add_headers(header_map) - } - - let http_error = http_error_builder.body(body)?; - - if e.http_status_code().is_server_error() { - warn!("Response: error {}, {}", e.http_status_code(), e); - } else { - info!("Response: error {}, {}", e.http_status_code(), e); - } - Ok(http_error) - } - } -} - -async fn handler_stage2( - garage: Arc, - metrics: Arc, - req: Request, -) -> Result, Error> { - let authority = req - .headers() - .get(header::HOST) - .ok_or_bad_request("Host header required")? - .to_str()?; - - let host = authority_to_host(authority)?; - - let bucket_name = garage - .config - .s3_api - .root_domain - .as_ref() - .and_then(|root_domain| host_to_bucket(&host, root_domain)); - - let (endpoint, bucket_name) = Endpoint::from_request(&req, bucket_name.map(ToOwned::to_owned))?; - debug!("Endpoint: {:?}", endpoint); - - let current_context = Context::current(); - let current_span = current_context.span(); - current_span.update_name::(format!("S3 API {}", endpoint.name())); - current_span.set_attribute(KeyValue::new("endpoint", endpoint.name())); - current_span.set_attribute(KeyValue::new( - "bucket", - bucket_name.clone().unwrap_or_default(), - )); - - let metrics_tags = &[KeyValue::new("api_endpoint", endpoint.name())]; - - let res = handler_stage3(garage, req, endpoint, bucket_name) - .record_duration(&metrics.request_duration, &metrics_tags[..]) - .await; - - metrics.request_counter.add(1, &metrics_tags[..]); - - let status_code = match &res { - Ok(r) => r.status(), - Err(e) => e.http_status_code(), - }; - if status_code.is_client_error() || status_code.is_server_error() { - metrics.error_counter.add( - 1, - &[ - metrics_tags[0].clone(), - KeyValue::new("status_code", status_code.as_str().to_string()), - ], - ); - } - - res -} - -async fn handler_stage3( - garage: Arc, - req: Request, - endpoint: Endpoint, - bucket_name: Option, -) -> Result, Error> { - // Some endpoints are processed early, before we even check for an API key - if let Endpoint::PostObject = endpoint { - return handle_post_object(garage, req, bucket_name.unwrap()).await; - } - if let Endpoint::Options = endpoint { - return handle_options_s3api(garage, &req, bucket_name).await; - } - - let (api_key, mut content_sha256) = check_payload_signature(&garage, &req).await?; - let api_key = api_key.ok_or_else(|| { - Error::Forbidden("Garage does not support anonymous access yet".to_string()) - })?; - - let req = match req.headers().get("x-amz-content-sha256") { - Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => { - let signature = content_sha256 - .take() - .ok_or_bad_request("No signature provided")?; - - let secret_key = &api_key - .state - .as_option() - .ok_or_internal_error("Deleted key state")? - .secret_key; - - let date = req - .headers() - .get("x-amz-date") - .ok_or_bad_request("Missing X-Amz-Date field")? - .to_str()?; - let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME) - .ok_or_bad_request("Invalid date")?; - let date: DateTime = DateTime::from_utc(date, Utc); - - let scope = compute_scope(&date, &garage.config.s3_api.s3_region); - let signing_hmac = crate::signature::signing_hmac( - &date, - secret_key, - &garage.config.s3_api.s3_region, - "s3", - ) - .ok_or_internal_error("Unable to build signing HMAC")?; - - req.map(move |body| { - Body::wrap_stream( - SignedPayloadStream::new( - body.map_err(Error::from), - signing_hmac, - date, - &scope, - signature, - ) - .map_err(Error::from), - ) - }) - } - _ => req, - }; - - let bucket_name = match bucket_name { - None => return handle_request_without_bucket(garage, req, api_key, endpoint).await, - Some(bucket) => bucket.to_string(), - }; - - // Special code path for CreateBucket API endpoint - if let Endpoint::CreateBucket {} = endpoint { - return handle_create_bucket(&garage, req, content_sha256, api_key, bucket_name).await; - } - - let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?; - let bucket = garage - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .filter(|b| !b.state.is_deleted()) - .ok_or(Error::NoSuchBucket)?; - - let allowed = match endpoint.authorization_type() { - Authorization::Read => api_key.allow_read(&bucket_id), - Authorization::Write => api_key.allow_write(&bucket_id), - Authorization::Owner => api_key.allow_owner(&bucket_id), - _ => unreachable!(), - }; - - if !allowed { - return Err(Error::Forbidden( - "Operation is not allowed for this key.".to_string(), - )); - } - - // Look up what CORS rule might apply to response. - // Requests for methods different than GET, HEAD or POST - // are always preflighted, i.e. the browser should make - // an OPTIONS call before to check it is allowed - let matching_cors_rule = match *req.method() { - Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)?, - _ => None, - }; - - let resp = match endpoint { - Endpoint::HeadObject { - key, part_number, .. - } => handle_head(garage, &req, bucket_id, &key, part_number).await, - Endpoint::GetObject { - key, part_number, .. - } => handle_get(garage, &req, bucket_id, &key, part_number).await, - Endpoint::UploadPart { - key, - part_number, - upload_id, - } => { - handle_put_part( - garage, - req, - bucket_id, - &key, - part_number, - &upload_id, - content_sha256, - ) - .await - } - Endpoint::CopyObject { key } => handle_copy(garage, &api_key, &req, bucket_id, &key).await, - Endpoint::UploadPartCopy { - key, - part_number, - upload_id, - } => { - handle_upload_part_copy( - garage, - &api_key, - &req, - bucket_id, - &key, - part_number, - &upload_id, - ) - .await - } - Endpoint::PutObject { key } => { - handle_put(garage, req, bucket_id, &key, content_sha256).await - } - Endpoint::AbortMultipartUpload { key, upload_id } => { - handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await - } - Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await, - Endpoint::CreateMultipartUpload { key } => { - handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await - } - Endpoint::CompleteMultipartUpload { key, upload_id } => { - handle_complete_multipart_upload( - garage, - req, - &bucket_name, - bucket_id, - &key, - &upload_id, - content_sha256, - ) - .await - } - Endpoint::CreateBucket {} => unreachable!(), - Endpoint::HeadBucket {} => { - let empty_body: Body = Body::from(vec![]); - let response = Response::builder().body(empty_body).unwrap(); - Ok(response) - } - Endpoint::DeleteBucket {} => { - handle_delete_bucket(&garage, bucket_id, bucket_name, api_key).await - } - Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage), - Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(), - Endpoint::ListObjects { - delimiter, - encoding_type, - marker, - max_keys, - prefix, - } => { - handle_list( - garage, - &ListObjectsQuery { - common: ListQueryCommon { - bucket_name, - bucket_id, - delimiter: delimiter.map(|d| d.to_string()), - page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), - prefix: prefix.unwrap_or_default(), - urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), - }, - is_v2: false, - marker, - continuation_token: None, - start_after: None, - }, - ) - .await - } - Endpoint::ListObjectsV2 { - delimiter, - encoding_type, - max_keys, - prefix, - continuation_token, - start_after, - list_type, - .. - } => { - if list_type == "2" { - handle_list( - garage, - &ListObjectsQuery { - common: ListQueryCommon { - bucket_name, - bucket_id, - delimiter: delimiter.map(|d| d.to_string()), - page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), - urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), - prefix: prefix.unwrap_or_default(), - }, - is_v2: true, - marker: None, - continuation_token, - start_after, - }, - ) - .await - } else { - Err(Error::BadRequest(format!( - "Invalid endpoint: list-type={}", - list_type - ))) - } - } - Endpoint::ListMultipartUploads { - delimiter, - encoding_type, - key_marker, - max_uploads, - prefix, - upload_id_marker, - } => { - handle_list_multipart_upload( - garage, - &ListMultipartUploadsQuery { - common: ListQueryCommon { - bucket_name, - bucket_id, - delimiter: delimiter.map(|d| d.to_string()), - page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000), - prefix: prefix.unwrap_or_default(), - urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), - }, - key_marker, - upload_id_marker, - }, - ) - .await - } - Endpoint::ListParts { - key, - max_parts, - part_number_marker, - upload_id, - } => { - handle_list_parts( - garage, - &ListPartsQuery { - bucket_name, - bucket_id, - key, - upload_id, - part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)), - max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000), - }, - ) - .await - } - Endpoint::DeleteObjects {} => { - handle_delete_objects(garage, bucket_id, req, content_sha256).await - } - Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await, - Endpoint::PutBucketWebsite {} => { - handle_put_website(garage, bucket_id, req, content_sha256).await - } - Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await, - Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await, - Endpoint::PutBucketCors {} => handle_put_cors(garage, bucket_id, req, content_sha256).await, - Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await, - endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), - }; - - // If request was a success and we have a CORS rule that applies to it, - // add the corresponding CORS headers to the response - let mut resp_ok = resp?; - if let Some(rule) = matching_cors_rule { - add_cors_headers(&mut resp_ok, rule) - .ok_or_internal_error("Invalid bucket CORS configuration")?; - } - - Ok(resp_ok) -} - -async fn handle_request_without_bucket( - garage: Arc, - _req: Request, - api_key: Key, - endpoint: Endpoint, -) -> Result, Error> { - match endpoint { - Endpoint::ListBuckets => handle_list_buckets(&garage, &api_key).await, - endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), - } -} - -#[allow(clippy::ptr_arg)] -pub async fn resolve_bucket( - garage: &Garage, - bucket_name: &String, - api_key: &Key, -) -> Result { - let api_key_params = api_key - .state - .as_option() - .ok_or_internal_error("Key should not be deleted at this point")?; - - if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) { - Ok(*bucket_id) - } else { - Ok(garage - .bucket_helper() - .resolve_global_bucket_name(bucket_name) - .await? - .ok_or(Error::NoSuchBucket)?) - } -} - -/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in -/// the host header of the request -/// -/// S3 internally manages only buckets and keys. This function splits -/// an HTTP path to get the corresponding bucket name and key. -pub fn parse_bucket_key<'a>( - path: &'a str, - host_bucket: Option<&'a str>, -) -> Result<(&'a str, Option<&'a str>), Error> { - let path = path.trim_start_matches('/'); - - if let Some(bucket) = host_bucket { - if !path.is_empty() { - return Ok((bucket, Some(path))); - } else { - return Ok((bucket, None)); - } - } - - let (bucket, key) = match path.find('/') { - Some(i) => { - let key = &path[i + 1..]; - if !key.is_empty() { - (&path[..i], Some(key)) - } else { - (&path[..i], None) - } - } - None => (path, None), - }; - if bucket.is_empty() { - return Err(Error::BadRequest("No bucket specified".to_string())); - } - Ok((bucket, key)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_bucket_containing_a_key() -> Result<(), Error> { - let (bucket, key) = parse_bucket_key("/my_bucket/a/super/file.jpg", None)?; - assert_eq!(bucket, "my_bucket"); - assert_eq!(key.expect("key must be set"), "a/super/file.jpg"); - Ok(()) - } - - #[test] - fn parse_bucket_containing_no_key() -> Result<(), Error> { - let (bucket, key) = parse_bucket_key("/my_bucket/", None)?; - assert_eq!(bucket, "my_bucket"); - assert!(key.is_none()); - let (bucket, key) = parse_bucket_key("/my_bucket", None)?; - assert_eq!(bucket, "my_bucket"); - assert!(key.is_none()); - Ok(()) - } - - #[test] - fn parse_bucket_containing_no_bucket() { - let parsed = parse_bucket_key("", None); - assert!(parsed.is_err()); - let parsed = parse_bucket_key("/", None); - assert!(parsed.is_err()); - let parsed = parse_bucket_key("////", None); - assert!(parsed.is_err()); - } - - #[test] - fn parse_bucket_with_vhost_and_key() -> Result<(), Error> { - let (bucket, key) = parse_bucket_key("/a/super/file.jpg", Some("my-bucket"))?; - assert_eq!(bucket, "my-bucket"); - assert_eq!(key.expect("key must be set"), "a/super/file.jpg"); - Ok(()) - } - - #[test] - fn parse_bucket_with_vhost_no_key() -> Result<(), Error> { - let (bucket, key) = parse_bucket_key("", Some("my-bucket"))?; - assert_eq!(bucket, "my-bucket"); - assert!(key.is_none()); - let (bucket, key) = parse_bucket_key("/", Some("my-bucket"))?; - assert_eq!(bucket, "my-bucket"); - assert!(key.is_none()); - Ok(()) - } -} diff --git a/src/api/error.rs b/src/api/error.rs index f53ed1fd..4b7254d2 100644 --- a/src/api/error.rs +++ b/src/api/error.rs @@ -7,7 +7,7 @@ use hyper::{HeaderMap, StatusCode}; use garage_model::helper::error::Error as HelperError; use garage_util::error::Error as GarageError; -use crate::s3_xml; +use crate::s3::xml as s3_xml; /// Errors of this crate #[derive(Debug, Error)] @@ -100,6 +100,10 @@ pub enum Error { #[error(display = "Bad request: {}", _0)] BadRequest(String), + /// The client asked for an invalid return format (invalid Accept header) + #[error(display = "Not acceptable: {}", _0)] + NotAcceptable(String), + /// The client sent a request for an action not supported by garage #[error(display = "Unimplemented action: {}", _0)] NotImplemented(String), @@ -140,6 +144,7 @@ impl Error { Error::BucketNotEmpty | Error::BucketAlreadyExists => StatusCode::CONFLICT, Error::PreconditionFailed => StatusCode::PRECONDITION_FAILED, Error::Forbidden(_) => StatusCode::FORBIDDEN, + Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE, Error::InternalError( GarageError::Timeout | GarageError::RemoteError(_) diff --git a/src/api/generic_server.rs b/src/api/generic_server.rs new file mode 100644 index 00000000..9281e596 --- /dev/null +++ b/src/api/generic_server.rs @@ -0,0 +1,202 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use async_trait::async_trait; + +use futures::future::Future; + +use hyper::server::conn::AddrStream; +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Request, Response, Server}; + +use opentelemetry::{ + global, + metrics::{Counter, ValueRecorder}, + trace::{FutureExt, SpanRef, TraceContextExt, Tracer}, + Context, KeyValue, +}; + +use garage_util::error::Error as GarageError; +use garage_util::metrics::{gen_trace_id, RecordDuration}; + +use crate::error::*; + +pub(crate) trait ApiEndpoint: Send + Sync + 'static { + fn name(&self) -> &'static str; + fn add_span_attributes(&self, span: SpanRef<'_>); +} + +#[async_trait] +pub(crate) trait ApiHandler: Send + Sync + 'static { + const API_NAME: &'static str; + const API_NAME_DISPLAY: &'static str; + + type Endpoint: ApiEndpoint; + + fn parse_endpoint(&self, r: &Request) -> Result; + async fn handle( + &self, + req: Request, + endpoint: Self::Endpoint, + ) -> Result, Error>; +} + +pub(crate) struct ApiServer { + region: String, + api_handler: A, + + // Metrics + request_counter: Counter, + error_counter: Counter, + request_duration: ValueRecorder, +} + +impl ApiServer { + pub fn new(region: String, api_handler: A) -> Arc { + let meter = global::meter("garage/api"); + Arc::new(Self { + region, + api_handler, + request_counter: meter + .u64_counter(format!("api.{}.request_counter", A::API_NAME)) + .with_description(format!( + "Number of API calls to the various {} API endpoints", + A::API_NAME_DISPLAY + )) + .init(), + error_counter: meter + .u64_counter(format!("api.{}.error_counter", A::API_NAME)) + .with_description(format!( + "Number of API calls to the various {} API endpoints that resulted in errors", + A::API_NAME_DISPLAY + )) + .init(), + request_duration: meter + .f64_value_recorder(format!("api.{}.request_duration", A::API_NAME)) + .with_description(format!( + "Duration of API calls to the various {} API endpoints", + A::API_NAME_DISPLAY + )) + .init(), + }) + } + + pub async fn run_server( + self: Arc, + bind_addr: SocketAddr, + shutdown_signal: impl Future, + ) -> Result<(), GarageError> { + let service = make_service_fn(|conn: &AddrStream| { + let this = self.clone(); + + let client_addr = conn.remote_addr(); + async move { + Ok::<_, GarageError>(service_fn(move |req: Request| { + let this = this.clone(); + + this.handler(req, client_addr) + })) + } + }); + + let server = Server::bind(&bind_addr).serve(service); + + let graceful = server.with_graceful_shutdown(shutdown_signal); + info!( + "{} API server listening on http://{}", + A::API_NAME_DISPLAY, + bind_addr + ); + + graceful.await?; + Ok(()) + } + + async fn handler( + self: Arc, + req: Request, + addr: SocketAddr, + ) -> Result, GarageError> { + let uri = req.uri().clone(); + info!("{} {} {}", addr, req.method(), uri); + debug!("{:?}", req); + + let tracer = opentelemetry::global::tracer("garage"); + let span = tracer + .span_builder(format!("{} API call (unknown)", A::API_NAME_DISPLAY)) + .with_trace_id(gen_trace_id()) + .with_attributes(vec![ + KeyValue::new("method", format!("{}", req.method())), + KeyValue::new("uri", req.uri().to_string()), + ]) + .start(&tracer); + + let res = self + .handler_stage2(req) + .with_context(Context::current_with_span(span)) + .await; + + match res { + Ok(x) => { + debug!("{} {:?}", x.status(), x.headers()); + Ok(x) + } + Err(e) => { + let body: Body = Body::from(e.aws_xml(&self.region, uri.path())); + let mut http_error_builder = Response::builder() + .status(e.http_status_code()) + .header("Content-Type", "application/xml"); + + if let Some(header_map) = http_error_builder.headers_mut() { + e.add_headers(header_map) + } + + let http_error = http_error_builder.body(body)?; + + if e.http_status_code().is_server_error() { + warn!("Response: error {}, {}", e.http_status_code(), e); + } else { + info!("Response: error {}, {}", e.http_status_code(), e); + } + Ok(http_error) + } + } + } + + async fn handler_stage2(&self, req: Request) -> Result, Error> { + let endpoint = self.api_handler.parse_endpoint(&req)?; + debug!("Endpoint: {}", endpoint.name()); + + let current_context = Context::current(); + let current_span = current_context.span(); + current_span.update_name::(format!("S3 API {}", endpoint.name())); + current_span.set_attribute(KeyValue::new("endpoint", endpoint.name())); + endpoint.add_span_attributes(current_span); + + let metrics_tags = &[KeyValue::new("api_endpoint", endpoint.name())]; + + let res = self + .api_handler + .handle(req, endpoint) + .record_duration(&self.request_duration, &metrics_tags[..]) + .await; + + self.request_counter.add(1, &metrics_tags[..]); + + let status_code = match &res { + Ok(r) => r.status(), + Err(e) => e.http_status_code(), + }; + if status_code.is_client_error() || status_code.is_server_error() { + self.error_counter.add( + 1, + &[ + metrics_tags[0].clone(), + KeyValue::new("status_code", status_code.as_str().to_string()), + ], + ); + } + + res + } +} diff --git a/src/api/helpers.rs b/src/api/helpers.rs index c2709bb3..a994b82f 100644 --- a/src/api/helpers.rs +++ b/src/api/helpers.rs @@ -1,6 +1,25 @@ -use crate::Error; use idna::domain_to_unicode; +use garage_util::data::*; + +use garage_model::garage::Garage; +use garage_model::key_table::Key; + +use crate::error::*; + +/// What kind of authorization is required to perform a given action +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Authorization { + /// No authorization is required + None, + /// Having Read permission on bucket + Read, + /// Having Write permission on bucket + Write, + /// Having Owner permission on bucket + Owner, +} + /// Host to bucket /// /// Convert a host, like "bucket.garage-site.tld" to the corresponding bucket "bucket", @@ -60,10 +79,142 @@ pub fn authority_to_host(authority: &str) -> Result { authority.map(|h| domain_to_unicode(h).0) } +#[allow(clippy::ptr_arg)] +pub async fn resolve_bucket( + garage: &Garage, + bucket_name: &String, + api_key: &Key, +) -> Result { + let api_key_params = api_key + .state + .as_option() + .ok_or_internal_error("Key should not be deleted at this point")?; + + if let Some(Some(bucket_id)) = api_key_params.local_aliases.get(bucket_name) { + Ok(*bucket_id) + } else { + Ok(garage + .bucket_helper() + .resolve_global_bucket_name(bucket_name) + .await? + .ok_or(Error::NoSuchBucket)?) + } +} + +/// Extract the bucket name and the key name from an HTTP path and possibly a bucket provided in +/// the host header of the request +/// +/// S3 internally manages only buckets and keys. This function splits +/// an HTTP path to get the corresponding bucket name and key. +pub fn parse_bucket_key<'a>( + path: &'a str, + host_bucket: Option<&'a str>, +) -> Result<(&'a str, Option<&'a str>), Error> { + let path = path.trim_start_matches('/'); + + if let Some(bucket) = host_bucket { + if !path.is_empty() { + return Ok((bucket, Some(path))); + } else { + return Ok((bucket, None)); + } + } + + let (bucket, key) = match path.find('/') { + Some(i) => { + let key = &path[i + 1..]; + if !key.is_empty() { + (&path[..i], Some(key)) + } else { + (&path[..i], None) + } + } + None => (path, None), + }; + if bucket.is_empty() { + return Err(Error::BadRequest("No bucket specified".to_string())); + } + Ok((bucket, key)) +} + +const UTF8_BEFORE_LAST_CHAR: char = '\u{10FFFE}'; + +/// Compute the key after the prefix +pub fn key_after_prefix(pfx: &str) -> Option { + let mut next = pfx.to_string(); + while !next.is_empty() { + let tail = next.pop().unwrap(); + if tail >= char::MAX { + continue; + } + + // Circumvent a limitation of RangeFrom that overflow earlier than needed + // See: https://doc.rust-lang.org/core/ops/struct.RangeFrom.html + let new_tail = if tail == UTF8_BEFORE_LAST_CHAR { + char::MAX + } else { + (tail..).nth(1).unwrap() + }; + + next.push(new_tail); + return Some(next); + } + + None +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn parse_bucket_containing_a_key() -> Result<(), Error> { + let (bucket, key) = parse_bucket_key("/my_bucket/a/super/file.jpg", None)?; + assert_eq!(bucket, "my_bucket"); + assert_eq!(key.expect("key must be set"), "a/super/file.jpg"); + Ok(()) + } + + #[test] + fn parse_bucket_containing_no_key() -> Result<(), Error> { + let (bucket, key) = parse_bucket_key("/my_bucket/", None)?; + assert_eq!(bucket, "my_bucket"); + assert!(key.is_none()); + let (bucket, key) = parse_bucket_key("/my_bucket", None)?; + assert_eq!(bucket, "my_bucket"); + assert!(key.is_none()); + Ok(()) + } + + #[test] + fn parse_bucket_containing_no_bucket() { + let parsed = parse_bucket_key("", None); + assert!(parsed.is_err()); + let parsed = parse_bucket_key("/", None); + assert!(parsed.is_err()); + let parsed = parse_bucket_key("////", None); + assert!(parsed.is_err()); + } + + #[test] + fn parse_bucket_with_vhost_and_key() -> Result<(), Error> { + let (bucket, key) = parse_bucket_key("/a/super/file.jpg", Some("my-bucket"))?; + assert_eq!(bucket, "my-bucket"); + assert_eq!(key.expect("key must be set"), "a/super/file.jpg"); + Ok(()) + } + + #[test] + fn parse_bucket_with_vhost_no_key() -> Result<(), Error> { + let (bucket, key) = parse_bucket_key("", Some("my-bucket"))?; + assert_eq!(bucket, "my-bucket"); + assert!(key.is_none()); + let (bucket, key) = parse_bucket_key("/", Some("my-bucket"))?; + assert_eq!(bucket, "my-bucket"); + assert!(key.is_none()); + Ok(()) + } + #[test] fn authority_to_host_with_port() -> Result<(), Error> { let domain = authority_to_host("[::1]:3902")?; @@ -111,4 +262,39 @@ mod tests { assert_eq!(host_to_bucket("not-garage.tld", "garage.tld"), None); assert_eq!(host_to_bucket("not-garage.tld", ".garage.tld"), None); } + + #[test] + fn test_key_after_prefix() { + use std::iter::FromIterator; + + assert_eq!(UTF8_BEFORE_LAST_CHAR as u32, (char::MAX as u32) - 1); + assert_eq!(key_after_prefix("a/b/").unwrap().as_str(), "a/b0"); + assert_eq!(key_after_prefix("€").unwrap().as_str(), "₭"); + assert_eq!( + key_after_prefix("􏿽").unwrap().as_str(), + String::from(char::from_u32(0x10FFFE).unwrap()) + ); + + // When the last character is the biggest UTF8 char + let a = String::from_iter(['a', char::MAX].iter()); + assert_eq!(key_after_prefix(a.as_str()).unwrap().as_str(), "b"); + + // When all characters are the biggest UTF8 char + let b = String::from_iter([char::MAX; 3].iter()); + assert!(key_after_prefix(b.as_str()).is_none()); + + // Check utf8 surrogates + let c = String::from('\u{D7FF}'); + assert_eq!( + key_after_prefix(c.as_str()).unwrap().as_str(), + String::from('\u{E000}') + ); + + // Check the character before the biggest one + let d = String::from('\u{10FFFE}'); + assert_eq!( + key_after_prefix(d.as_str()).unwrap().as_str(), + String::from(char::MAX) + ); + } } diff --git a/src/api/k2v/api_server.rs b/src/api/k2v/api_server.rs new file mode 100644 index 00000000..5f5e9030 --- /dev/null +++ b/src/api/k2v/api_server.rs @@ -0,0 +1,195 @@ +use std::sync::Arc; + +use async_trait::async_trait; + +use futures::future::Future; +use hyper::{Body, Method, Request, Response}; + +use opentelemetry::{trace::SpanRef, KeyValue}; + +use garage_table::util::*; +use garage_util::error::Error as GarageError; + +use garage_model::garage::Garage; + +use crate::error::*; +use crate::generic_server::*; + +use crate::signature::payload::check_payload_signature; +use crate::signature::streaming::*; + +use crate::helpers::*; +use crate::k2v::batch::*; +use crate::k2v::index::*; +use crate::k2v::item::*; +use crate::k2v::router::Endpoint; +use crate::s3::cors::*; + +pub struct K2VApiServer { + garage: Arc, +} + +pub(crate) struct K2VApiEndpoint { + bucket_name: String, + endpoint: Endpoint, +} + +impl K2VApiServer { + pub async fn run( + garage: Arc, + shutdown_signal: impl Future, + ) -> Result<(), GarageError> { + if let Some(cfg) = &garage.config.k2v_api { + let bind_addr = cfg.api_bind_addr; + + ApiServer::new( + garage.config.s3_api.s3_region.clone(), + K2VApiServer { garage }, + ) + .run_server(bind_addr, shutdown_signal) + .await + } else { + Ok(()) + } + } +} + +#[async_trait] +impl ApiHandler for K2VApiServer { + const API_NAME: &'static str = "k2v"; + const API_NAME_DISPLAY: &'static str = "K2V"; + + type Endpoint = K2VApiEndpoint; + + fn parse_endpoint(&self, req: &Request) -> Result { + let (endpoint, bucket_name) = Endpoint::from_request(req)?; + + Ok(K2VApiEndpoint { + bucket_name, + endpoint, + }) + } + + async fn handle( + &self, + req: Request, + endpoint: K2VApiEndpoint, + ) -> Result, Error> { + let K2VApiEndpoint { + bucket_name, + endpoint, + } = endpoint; + let garage = self.garage.clone(); + + // The OPTIONS method is procesed early, before we even check for an API key + if let Endpoint::Options = endpoint { + return handle_options_s3api(garage, &req, Some(bucket_name)).await; + } + + let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?; + let api_key = api_key.ok_or_else(|| { + Error::Forbidden("Garage does not support anonymous access yet".to_string()) + })?; + + let req = parse_streaming_body( + &api_key, + req, + &mut content_sha256, + &garage.config.s3_api.s3_region, + "k2v", + )?; + + let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?; + let bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .filter(|b| !b.state.is_deleted()) + .ok_or(Error::NoSuchBucket)?; + + let allowed = match endpoint.authorization_type() { + Authorization::Read => api_key.allow_read(&bucket_id), + Authorization::Write => api_key.allow_write(&bucket_id), + Authorization::Owner => api_key.allow_owner(&bucket_id), + _ => unreachable!(), + }; + + if !allowed { + return Err(Error::Forbidden( + "Operation is not allowed for this key.".to_string(), + )); + } + + // Look up what CORS rule might apply to response. + // Requests for methods different than GET, HEAD or POST + // are always preflighted, i.e. the browser should make + // an OPTIONS call before to check it is allowed + let matching_cors_rule = match *req.method() { + Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)?, + _ => None, + }; + + let resp = match endpoint { + Endpoint::DeleteItem { + partition_key, + sort_key, + } => handle_delete_item(garage, req, bucket_id, &partition_key, &sort_key).await, + Endpoint::InsertItem { + partition_key, + sort_key, + } => handle_insert_item(garage, req, bucket_id, &partition_key, &sort_key).await, + Endpoint::ReadItem { + partition_key, + sort_key, + } => handle_read_item(garage, &req, bucket_id, &partition_key, &sort_key).await, + Endpoint::PollItem { + partition_key, + sort_key, + causality_token, + timeout, + } => { + handle_poll_item( + garage, + &req, + bucket_id, + partition_key, + sort_key, + causality_token, + timeout, + ) + .await + } + Endpoint::ReadIndex { + prefix, + start, + end, + limit, + reverse, + } => handle_read_index(garage, bucket_id, prefix, start, end, limit, reverse).await, + Endpoint::InsertBatch {} => handle_insert_batch(garage, bucket_id, req).await, + Endpoint::ReadBatch {} => handle_read_batch(garage, bucket_id, req).await, + Endpoint::DeleteBatch {} => handle_delete_batch(garage, bucket_id, req).await, + Endpoint::Options => unreachable!(), + }; + + // If request was a success and we have a CORS rule that applies to it, + // add the corresponding CORS headers to the response + let mut resp_ok = resp?; + if let Some(rule) = matching_cors_rule { + add_cors_headers(&mut resp_ok, rule) + .ok_or_internal_error("Invalid bucket CORS configuration")?; + } + + Ok(resp_ok) + } +} + +impl ApiEndpoint for K2VApiEndpoint { + fn name(&self) -> &'static str { + self.endpoint.name() + } + + fn add_span_attributes(&self, span: SpanRef<'_>) { + span.set_attribute(KeyValue::new("bucket", self.bucket_name.clone())); + } +} diff --git a/src/api/k2v/batch.rs b/src/api/k2v/batch.rs new file mode 100644 index 00000000..4ecddeb9 --- /dev/null +++ b/src/api/k2v/batch.rs @@ -0,0 +1,368 @@ +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + +use garage_util::data::*; +use garage_util::error::Error as GarageError; + +use garage_table::{EnumerationOrder, TableSchema}; + +use garage_model::garage::Garage; +use garage_model::k2v::causality::*; +use garage_model::k2v::item_table::*; + +use crate::error::*; +use crate::k2v::range::read_range; + +pub async fn handle_insert_batch( + garage: Arc, + bucket_id: Uuid, + req: Request, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + let items: Vec = + serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?; + + let mut items2 = vec![]; + for it in items { + let ct = it + .ct + .map(|s| CausalContext::parse(&s)) + .transpose() + .ok_or_bad_request("Invalid causality token")?; + let v = match it.v { + Some(vs) => { + DvvsValue::Value(base64::decode(vs).ok_or_bad_request("Invalid base64 value")?) + } + None => DvvsValue::Deleted, + }; + items2.push((it.pk, it.sk, ct, v)); + } + + garage.k2v.rpc.insert_batch(bucket_id, items2).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +pub async fn handle_read_batch( + garage: Arc, + bucket_id: Uuid, + req: Request, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + let queries: Vec = + serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?; + + let resp_results = futures::future::join_all( + queries + .into_iter() + .map(|q| handle_read_batch_query(&garage, bucket_id, q)), + ) + .await; + + let mut resps: Vec = vec![]; + for resp in resp_results { + resps.push(resp?); + } + + let resp_json = serde_json::to_string_pretty(&resps).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +async fn handle_read_batch_query( + garage: &Arc, + bucket_id: Uuid, + query: ReadBatchQuery, +) -> Result { + let partition = K2VItemPartition { + bucket_id, + partition_key: query.partition_key.clone(), + }; + + let filter = ItemFilter { + exclude_only_tombstones: !query.tombstones, + conflicts_only: query.conflicts_only, + }; + + let (items, more, next_start) = if query.single_item { + if query.prefix.is_some() || query.end.is_some() || query.limit.is_some() || query.reverse { + return Err(Error::BadRequest("Batch query parameters 'prefix', 'end', 'limit' and 'reverse' must not be set when singleItem is true.".into())); + } + let sk = query + .start + .as_ref() + .ok_or_bad_request("start should be specified if single_item is set")?; + let item = garage + .k2v + .item_table + .get(&partition, sk) + .await? + .filter(|e| K2VItemTable::matches_filter(e, &filter)); + match item { + Some(i) => (vec![ReadBatchResponseItem::from(i)], false, None), + None => (vec![], false, None), + } + } else { + let (items, more, next_start) = read_range( + &garage.k2v.item_table, + &partition, + &query.prefix, + &query.start, + &query.end, + query.limit, + Some(filter), + EnumerationOrder::from_reverse(query.reverse), + ) + .await?; + + let items = items + .into_iter() + .map(ReadBatchResponseItem::from) + .collect::>(); + + (items, more, next_start) + }; + + Ok(ReadBatchResponse { + partition_key: query.partition_key, + prefix: query.prefix, + start: query.start, + end: query.end, + limit: query.limit, + reverse: query.reverse, + single_item: query.single_item, + conflicts_only: query.conflicts_only, + tombstones: query.tombstones, + items, + more, + next_start, + }) +} + +pub async fn handle_delete_batch( + garage: Arc, + bucket_id: Uuid, + req: Request, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + let queries: Vec = + serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?; + + let resp_results = futures::future::join_all( + queries + .into_iter() + .map(|q| handle_delete_batch_query(&garage, bucket_id, q)), + ) + .await; + + let mut resps: Vec = vec![]; + for resp in resp_results { + resps.push(resp?); + } + + let resp_json = serde_json::to_string_pretty(&resps).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +async fn handle_delete_batch_query( + garage: &Arc, + bucket_id: Uuid, + query: DeleteBatchQuery, +) -> Result { + let partition = K2VItemPartition { + bucket_id, + partition_key: query.partition_key.clone(), + }; + + let filter = ItemFilter { + exclude_only_tombstones: true, + conflicts_only: false, + }; + + let deleted_items = if query.single_item { + if query.prefix.is_some() || query.end.is_some() { + return Err(Error::BadRequest("Batch query parameters 'prefix' and 'end' must not be set when singleItem is true.".into())); + } + let sk = query + .start + .as_ref() + .ok_or_bad_request("start should be specified if single_item is set")?; + let item = garage + .k2v + .item_table + .get(&partition, sk) + .await? + .filter(|e| K2VItemTable::matches_filter(e, &filter)); + match item { + Some(i) => { + let cc = i.causal_context(); + garage + .k2v + .rpc + .insert( + bucket_id, + i.partition.partition_key, + i.sort_key, + Some(cc), + DvvsValue::Deleted, + ) + .await?; + 1 + } + None => 0, + } + } else { + let (items, more, _next_start) = read_range( + &garage.k2v.item_table, + &partition, + &query.prefix, + &query.start, + &query.end, + None, + Some(filter), + EnumerationOrder::Forward, + ) + .await?; + assert!(!more); + + // TODO delete items + let items = items + .into_iter() + .map(|i| { + let cc = i.causal_context(); + ( + i.partition.partition_key, + i.sort_key, + Some(cc), + DvvsValue::Deleted, + ) + }) + .collect::>(); + let n = items.len(); + + garage.k2v.rpc.insert_batch(bucket_id, items).await?; + + n + }; + + Ok(DeleteBatchResponse { + partition_key: query.partition_key, + prefix: query.prefix, + start: query.start, + end: query.end, + single_item: query.single_item, + deleted_items, + }) +} + +#[derive(Deserialize)] +struct InsertBatchItem { + pk: String, + sk: String, + ct: Option, + v: Option, +} + +#[derive(Deserialize)] +struct ReadBatchQuery { + #[serde(rename = "partitionKey")] + partition_key: String, + #[serde(default)] + prefix: Option, + #[serde(default)] + start: Option, + #[serde(default)] + end: Option, + #[serde(default)] + limit: Option, + #[serde(default)] + reverse: bool, + #[serde(default, rename = "singleItem")] + single_item: bool, + #[serde(default, rename = "conflictsOnly")] + conflicts_only: bool, + #[serde(default)] + tombstones: bool, +} + +#[derive(Serialize)] +struct ReadBatchResponse { + #[serde(rename = "partitionKey")] + partition_key: String, + prefix: Option, + start: Option, + end: Option, + limit: Option, + reverse: bool, + #[serde(rename = "singleItem")] + single_item: bool, + #[serde(rename = "conflictsOnly")] + conflicts_only: bool, + tombstones: bool, + + items: Vec, + more: bool, + #[serde(rename = "nextStart")] + next_start: Option, +} + +#[derive(Serialize)] +struct ReadBatchResponseItem { + sk: String, + ct: String, + v: Vec>, +} + +impl ReadBatchResponseItem { + fn from(i: K2VItem) -> Self { + let ct = i.causal_context().serialize(); + let v = i + .values() + .iter() + .map(|v| match v { + DvvsValue::Value(x) => Some(base64::encode(x)), + DvvsValue::Deleted => None, + }) + .collect::>(); + Self { + sk: i.sort_key, + ct, + v, + } + } +} + +#[derive(Deserialize)] +struct DeleteBatchQuery { + #[serde(rename = "partitionKey")] + partition_key: String, + #[serde(default)] + prefix: Option, + #[serde(default)] + start: Option, + #[serde(default)] + end: Option, + #[serde(default, rename = "singleItem")] + single_item: bool, +} + +#[derive(Serialize)] +struct DeleteBatchResponse { + #[serde(rename = "partitionKey")] + partition_key: String, + prefix: Option, + start: Option, + end: Option, + #[serde(rename = "singleItem")] + single_item: bool, + + #[serde(rename = "deletedItems")] + deleted_items: usize, +} diff --git a/src/api/k2v/index.rs b/src/api/k2v/index.rs new file mode 100644 index 00000000..896dbcf0 --- /dev/null +++ b/src/api/k2v/index.rs @@ -0,0 +1,100 @@ +use std::sync::Arc; + +use hyper::{Body, Response, StatusCode}; +use serde::Serialize; + +use garage_util::data::*; +use garage_util::error::Error as GarageError; + +use garage_rpc::ring::Ring; +use garage_table::util::*; + +use garage_model::garage::Garage; +use garage_model::k2v::counter_table::{BYTES, CONFLICTS, ENTRIES, VALUES}; + +use crate::error::*; +use crate::k2v::range::read_range; + +pub async fn handle_read_index( + garage: Arc, + bucket_id: Uuid, + prefix: Option, + start: Option, + end: Option, + limit: Option, + reverse: Option, +) -> Result, Error> { + let reverse = reverse.unwrap_or(false); + + let ring: Arc = garage.system.ring.borrow().clone(); + + let (partition_keys, more, next_start) = read_range( + &garage.k2v.counter_table.table, + &bucket_id, + &prefix, + &start, + &end, + limit, + Some((DeletedFilter::NotDeleted, ring.layout.node_id_vec.clone())), + EnumerationOrder::from_reverse(reverse), + ) + .await?; + + let s_entries = ENTRIES.to_string(); + let s_conflicts = CONFLICTS.to_string(); + let s_values = VALUES.to_string(); + let s_bytes = BYTES.to_string(); + + let resp = ReadIndexResponse { + prefix, + start, + end, + limit, + reverse, + partition_keys: partition_keys + .into_iter() + .map(|part| { + let vals = part.filtered_values(&ring); + ReadIndexResponseEntry { + pk: part.sk, + entries: *vals.get(&s_entries).unwrap_or(&0), + conflicts: *vals.get(&s_conflicts).unwrap_or(&0), + values: *vals.get(&s_values).unwrap_or(&0), + bytes: *vals.get(&s_bytes).unwrap_or(&0), + } + }) + .collect::>(), + more, + next_start, + }; + + let resp_json = serde_json::to_string_pretty(&resp).map_err(GarageError::from)?; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(resp_json))?) +} + +#[derive(Serialize)] +struct ReadIndexResponse { + prefix: Option, + start: Option, + end: Option, + limit: Option, + reverse: bool, + + #[serde(rename = "partitionKeys")] + partition_keys: Vec, + + more: bool, + #[serde(rename = "nextStart")] + next_start: Option, +} + +#[derive(Serialize)] +struct ReadIndexResponseEntry { + pk: String, + entries: i64, + conflicts: i64, + values: i64, + bytes: i64, +} diff --git a/src/api/k2v/item.rs b/src/api/k2v/item.rs new file mode 100644 index 00000000..1860863e --- /dev/null +++ b/src/api/k2v/item.rs @@ -0,0 +1,230 @@ +use std::sync::Arc; + +use http::header; + +use hyper::{Body, Request, Response, StatusCode}; + +use garage_util::data::*; + +use garage_model::garage::Garage; +use garage_model::k2v::causality::*; +use garage_model::k2v::item_table::*; + +use crate::error::*; + +pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token"; + +pub enum ReturnFormat { + Json, + Binary, + Either, +} + +impl ReturnFormat { + pub fn from(req: &Request) -> Result { + let accept = match req.headers().get(header::ACCEPT) { + Some(a) => a.to_str()?, + None => return Ok(Self::Json), + }; + + let accept = accept.split(',').map(|s| s.trim()).collect::>(); + let accept_json = accept.contains(&"application/json") || accept.contains(&"*/*"); + let accept_binary = accept.contains(&"application/octet-stream") || accept.contains(&"*/*"); + + match (accept_json, accept_binary) { + (true, true) => Ok(Self::Either), + (true, false) => Ok(Self::Json), + (false, true) => Ok(Self::Binary), + (false, false) => Err(Error::NotAcceptable("Invalid Accept: header value, must contain either application/json or application/octet-stream (or both)".into())), + } + } + + pub fn make_response(&self, item: &K2VItem) -> Result, Error> { + let vals = item.values(); + + if vals.is_empty() { + return Err(Error::NoSuchKey); + } + + let ct = item.causal_context().serialize(); + match self { + Self::Binary if vals.len() > 1 => Ok(Response::builder() + .header(X_GARAGE_CAUSALITY_TOKEN, ct) + .status(StatusCode::CONFLICT) + .body(Body::empty())?), + Self::Binary => { + assert!(vals.len() == 1); + Self::make_binary_response(ct, vals[0]) + } + Self::Either if vals.len() == 1 => Self::make_binary_response(ct, vals[0]), + _ => Self::make_json_response(ct, &vals[..]), + } + } + + fn make_binary_response(ct: String, v: &DvvsValue) -> Result, Error> { + match v { + DvvsValue::Deleted => Ok(Response::builder() + .header(X_GARAGE_CAUSALITY_TOKEN, ct) + .header(header::CONTENT_TYPE, "application/octet-stream") + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?), + DvvsValue::Value(v) => Ok(Response::builder() + .header(X_GARAGE_CAUSALITY_TOKEN, ct) + .header(header::CONTENT_TYPE, "application/octet-stream") + .status(StatusCode::OK) + .body(Body::from(v.to_vec()))?), + } + } + + fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result, Error> { + let items = v + .iter() + .map(|v| match v { + DvvsValue::Deleted => serde_json::Value::Null, + DvvsValue::Value(v) => serde_json::Value::String(base64::encode(v)), + }) + .collect::>(); + let json_body = + serde_json::to_string_pretty(&items).ok_or_internal_error("JSON encoding error")?; + Ok(Response::builder() + .header(X_GARAGE_CAUSALITY_TOKEN, ct) + .header(header::CONTENT_TYPE, "application/json") + .status(StatusCode::OK) + .body(Body::from(json_body))?) + } +} + +/// Handle ReadItem request +#[allow(clippy::ptr_arg)] +pub async fn handle_read_item( + garage: Arc, + req: &Request, + bucket_id: Uuid, + partition_key: &str, + sort_key: &String, +) -> Result, Error> { + let format = ReturnFormat::from(req)?; + + let item = garage + .k2v + .item_table + .get( + &K2VItemPartition { + bucket_id, + partition_key: partition_key.to_string(), + }, + sort_key, + ) + .await? + .ok_or(Error::NoSuchKey)?; + + format.make_response(&item) +} + +pub async fn handle_insert_item( + garage: Arc, + req: Request, + bucket_id: Uuid, + partition_key: &str, + sort_key: &str, +) -> Result, Error> { + let causal_context = req + .headers() + .get(X_GARAGE_CAUSALITY_TOKEN) + .map(|s| s.to_str()) + .transpose()? + .map(CausalContext::parse) + .transpose() + .ok_or_bad_request("Invalid causality token")?; + + let body = hyper::body::to_bytes(req.into_body()).await?; + let value = DvvsValue::Value(body.to_vec()); + + garage + .k2v + .rpc + .insert( + bucket_id, + partition_key.to_string(), + sort_key.to_string(), + causal_context, + value, + ) + .await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +pub async fn handle_delete_item( + garage: Arc, + req: Request, + bucket_id: Uuid, + partition_key: &str, + sort_key: &str, +) -> Result, Error> { + let causal_context = req + .headers() + .get(X_GARAGE_CAUSALITY_TOKEN) + .map(|s| s.to_str()) + .transpose()? + .map(CausalContext::parse) + .transpose() + .ok_or_bad_request("Invalid causality token")?; + + let value = DvvsValue::Deleted; + + garage + .k2v + .rpc + .insert( + bucket_id, + partition_key.to_string(), + sort_key.to_string(), + causal_context, + value, + ) + .await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +/// Handle ReadItem request +#[allow(clippy::ptr_arg)] +pub async fn handle_poll_item( + garage: Arc, + req: &Request, + bucket_id: Uuid, + partition_key: String, + sort_key: String, + causality_token: String, + timeout_secs: Option, +) -> Result, Error> { + let format = ReturnFormat::from(req)?; + + let causal_context = + CausalContext::parse(&causality_token).ok_or_bad_request("Invalid causality token")?; + + let item = garage + .k2v + .rpc + .poll( + bucket_id, + partition_key, + sort_key, + causal_context, + timeout_secs.unwrap_or(300) * 1000, + ) + .await?; + + if let Some(item) = item { + format.make_response(&item) + } else { + Ok(Response::builder() + .status(StatusCode::NOT_MODIFIED) + .body(Body::empty())?) + } +} diff --git a/src/api/k2v/mod.rs b/src/api/k2v/mod.rs new file mode 100644 index 00000000..ee210ad5 --- /dev/null +++ b/src/api/k2v/mod.rs @@ -0,0 +1,8 @@ +pub mod api_server; +mod router; + +mod batch; +mod index; +mod item; + +mod range; diff --git a/src/api/k2v/range.rs b/src/api/k2v/range.rs new file mode 100644 index 00000000..cd019723 --- /dev/null +++ b/src/api/k2v/range.rs @@ -0,0 +1,96 @@ +//! Utility module for retrieving ranges of items in Garage tables +//! Implements parameters (prefix, start, end, limit) as specified +//! for endpoints ReadIndex, ReadBatch and DeleteBatch + +use std::sync::Arc; + +use garage_table::replication::TableShardedReplication; +use garage_table::*; + +use crate::error::*; +use crate::helpers::key_after_prefix; + +/// Read range in a Garage table. +/// Returns (entries, more?, nextStart) +#[allow(clippy::too_many_arguments)] +pub(crate) async fn read_range( + table: &Arc>, + partition_key: &F::P, + prefix: &Option, + start: &Option, + end: &Option, + limit: Option, + filter: Option, + enumeration_order: EnumerationOrder, +) -> Result<(Vec, bool, Option), Error> +where + F: TableSchema + 'static, +{ + let (mut start, mut start_ignore) = match (prefix, start) { + (None, None) => (None, false), + (None, Some(s)) => (Some(s.clone()), false), + (Some(p), Some(s)) => { + if !s.starts_with(p) { + return Err(Error::BadRequest(format!( + "Start key '{}' does not start with prefix '{}'", + s, p + ))); + } + (Some(s.clone()), false) + } + (Some(p), None) if enumeration_order == EnumerationOrder::Reverse => { + let start = key_after_prefix(p) + .ok_or_internal_error("Sorry, can't list this prefix in reverse order")?; + (Some(start), true) + } + (Some(p), None) => (Some(p.clone()), false), + }; + + let mut entries = vec![]; + loop { + let n_get = std::cmp::min( + 1000, + limit.map(|x| x as usize).unwrap_or(usize::MAX - 10) - entries.len() + 2, + ); + let get_ret = table + .get_range( + partition_key, + start.clone(), + filter.clone(), + n_get, + enumeration_order, + ) + .await?; + + let get_ret_len = get_ret.len(); + + for entry in get_ret { + if start_ignore && Some(entry.sort_key()) == start.as_ref() { + continue; + } + if let Some(p) = prefix { + if !entry.sort_key().starts_with(p) { + return Ok((entries, false, None)); + } + } + if let Some(e) = end { + if entry.sort_key() == e { + return Ok((entries, false, None)); + } + } + if let Some(l) = limit { + if entries.len() >= l as usize { + return Ok((entries, true, Some(entry.sort_key().clone()))); + } + } + entries.push(entry); + } + + if get_ret_len < n_get { + return Ok((entries, false, None)); + } + + start = Some(entries.last().unwrap().sort_key().clone()); + start_ignore = true; + } +} diff --git a/src/api/k2v/router.rs b/src/api/k2v/router.rs new file mode 100644 index 00000000..f948ffce --- /dev/null +++ b/src/api/k2v/router.rs @@ -0,0 +1,252 @@ +use crate::error::*; + +use std::borrow::Cow; + +use hyper::{Method, Request}; + +use crate::helpers::Authorization; +use crate::router_macros::{generateQueryParameters, router_match}; + +router_match! {@func + + +/// List of all K2V API endpoints. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Endpoint { + DeleteBatch { + }, + DeleteItem { + partition_key: String, + sort_key: String, + }, + InsertBatch { + }, + InsertItem { + partition_key: String, + sort_key: String, + }, + Options, + PollItem { + partition_key: String, + sort_key: String, + causality_token: String, + timeout: Option, + }, + ReadBatch { + }, + ReadIndex { + prefix: Option, + start: Option, + end: Option, + limit: Option, + reverse: Option, + }, + ReadItem { + partition_key: String, + sort_key: String, + }, +}} + +impl Endpoint { + /// Determine which S3 endpoint a request is for using the request, and a bucket which was + /// possibly extracted from the Host header. + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets + pub fn from_request(req: &Request) -> Result<(Self, String), Error> { + let uri = req.uri(); + let path = uri.path().trim_start_matches('/'); + let query = uri.query(); + + let (bucket, partition_key) = path + .split_once('/') + .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/'))) + .unwrap_or((path.to_owned(), "")); + + if bucket.is_empty() { + return Err(Error::BadRequest("Missing bucket name".to_owned())); + } + + if *req.method() == Method::OPTIONS { + return Ok((Self::Options, bucket)); + } + + let partition_key = percent_encoding::percent_decode_str(partition_key) + .decode_utf8()? + .into_owned(); + + let mut query = QueryParameters::from_query(query.unwrap_or_default())?; + + let method_search = Method::from_bytes(b"SEARCH").unwrap(); + let res = match *req.method() { + Method::GET => Self::from_get(partition_key, &mut query)?, + //&Method::HEAD => Self::from_head(partition_key, &mut query)?, + Method::POST => Self::from_post(partition_key, &mut query)?, + Method::PUT => Self::from_put(partition_key, &mut query)?, + Method::DELETE => Self::from_delete(partition_key, &mut query)?, + _ if req.method() == method_search => Self::from_search(partition_key, &mut query)?, + _ => return Err(Error::BadRequest("Unknown method".to_owned())), + }; + + if let Some(message) = query.nonempty_message() { + debug!("Unused query parameter: {}", message) + } + Ok((res, bucket)) + } + + /// Determine which endpoint a request is for, knowing it is a GET. + fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout), + EMPTY => ReadItem (query::sort_key), + ], + no_key: [ + EMPTY => ReadIndex (query_opt::prefix, query_opt::start, query_opt::end, opt_parse::limit, opt_parse::reverse), + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a SEARCH. + fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + ], + no_key: [ + EMPTY => ReadBatch, + ] + } + } + + /* + /// Determine which endpoint a request is for, knowing it is a HEAD. + fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), + ], + no_key: [ + EMPTY => HeadBucket, + ] + } + } + */ + + /// Determine which endpoint a request is for, knowing it is a POST. + fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + ], + no_key: [ + EMPTY => InsertBatch, + DELETE => DeleteBatch, + SEARCH => ReadBatch, + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a PUT. + fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + EMPTY => InsertItem (query::sort_key), + + ], + no_key: [ + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a DELETE. + fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + key: [ + EMPTY => DeleteItem (query::sort_key), + ], + no_key: [ + ] + } + } + + /// Get the partition key the request target. Returns None for requests which don't use a partition key. + #[allow(dead_code)] + pub fn get_partition_key(&self) -> Option<&str> { + router_match! { + @extract + self, + partition_key, + [ + DeleteItem, + InsertItem, + PollItem, + ReadItem, + ] + } + } + + /// Get the sort key the request target. Returns None for requests which don't use a sort key. + #[allow(dead_code)] + pub fn get_sort_key(&self) -> Option<&str> { + router_match! { + @extract + self, + sort_key, + [ + DeleteItem, + InsertItem, + PollItem, + ReadItem, + ] + } + } + + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + let readonly = router_match! { + @match + self, + [ + PollItem, + ReadBatch, + ReadIndex, + ReadItem, + ] + }; + if readonly { + Authorization::Read + } else { + Authorization::Write + } + } +} + +// parameter name => struct field +generateQueryParameters! { + "prefix" => prefix, + "start" => start, + "causality_token" => causality_token, + "end" => end, + "limit" => limit, + "reverse" => reverse, + "sort_key" => sort_key, + "timeout" => timeout +} + +mod keywords { + //! This module contain all query parameters with no associated value + //! used to differentiate endpoints. + pub const EMPTY: &str = ""; + + pub const DELETE: &str = "delete"; + pub const SEARCH: &str = "search"; +} diff --git a/src/api/lib.rs b/src/api/lib.rs index de60ec53..0078f7b5 100644 --- a/src/api/lib.rs +++ b/src/api/lib.rs @@ -6,22 +6,12 @@ pub mod error; pub use error::Error; mod encoding; - -mod api_server; -pub use api_server::run_api_server; - +mod generic_server; +pub mod helpers; +mod router_macros; /// This mode is public only to help testing. Don't expect stability here pub mod signature; -pub mod helpers; -mod s3_bucket; -mod s3_copy; -pub mod s3_cors; -mod s3_delete; -pub mod s3_get; -mod s3_list; -mod s3_post_object; -mod s3_put; -mod s3_router; -mod s3_website; -mod s3_xml; +#[cfg(feature = "k2v")] +pub mod k2v; +pub mod s3; diff --git a/src/api/router_macros.rs b/src/api/router_macros.rs new file mode 100644 index 00000000..8471407c --- /dev/null +++ b/src/api/router_macros.rs @@ -0,0 +1,190 @@ +/// This macro is used to generate very repetitive match {} blocks in this module +/// It is _not_ made to be used anywhere else +macro_rules! router_match { + (@match $enum:expr , [ $($endpoint:ident,)* ]) => {{ + // usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] } + // returns true if the variant was one of the listed variants, false otherwise. + use Endpoint::*; + match $enum { + $( + $endpoint { .. } => true, + )* + _ => false + } + }}; + (@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{ + // usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] } + // returns Some(field_value), or None if the variant was not one of the listed variants. + use Endpoint::*; + match $enum { + $( + $endpoint {$param, ..} => Some($param), + )* + _ => None + } + }}; + (@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr), + key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*], + no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{ + // usage: router_match {@gen_parser (keyword, key, query, header), + // key: [ + // SOME_KEYWORD => VariantWithKey, + // ... + // ], + // no_key: [ + // SOME_KEYWORD => VariantWithoutKey, + // ... + // ] + // } + // See in from_{method} for more detailed usage. + use Endpoint::*; + use keywords::*; + match ($keyword, !$key.is_empty()){ + $( + ($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k { + $key, + $($( + $param_k: router_match!(@@parse_param $query, $conv_k, $param_k), + )*)? + }), + )* + $( + ($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk { + $($( + $param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk), + )*)? + }), + )* + (kw, _) => Err(Error::BadRequest(format!("Invalid endpoint: {}", kw))) + } + }}; + + (@@parse_param $query:expr, query_opt, $param:ident) => {{ + // extract optional query parameter + $query.$param.take().map(|param| param.into_owned()) + }}; + (@@parse_param $query:expr, query, $param:ident) => {{ + // extract mendatory query parameter + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned() + }}; + (@@parse_param $query:expr, opt_parse, $param:ident) => {{ + // extract and parse optional query parameter + // missing parameter is file, however parse error is reported as an error + $query.$param + .take() + .map(|param| param.parse()) + .transpose() + .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? + }}; + (@@parse_param $query:expr, parse, $param:ident) => {{ + // extract and parse mandatory query parameter + // both missing and un-parseable parameters are reported as errors + $query.$param.take().ok_or_bad_request("Missing argument for endpoint")? + .parse() + .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? + }}; + (@func + $(#[$doc:meta])* + pub enum Endpoint { + $( + $(#[$outer:meta])* + $variant:ident $({ + $($name:ident: $ty:ty,)* + })?, + )* + }) => { + $(#[$doc])* + pub enum Endpoint { + $( + $(#[$outer])* + $variant $({ + $($name: $ty, )* + })?, + )* + } + impl Endpoint { + pub fn name(&self) -> &'static str { + match self { + $(Endpoint::$variant $({ $($name: _,)* .. })? => stringify!($variant),)* + } + } + } + }; + (@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => { + $($then)* + }; + (@if () then ($($then:tt)*) else ($($else:tt)*)) => { + $($else)* + }; +} + +/// This macro is used to generate part of the code in this module. It must be called only one, and +/// is useless outside of this module. +macro_rules! generateQueryParameters { + ( $($rest:expr => $name:ident),* ) => { + /// Struct containing all query parameters used in endpoints. Think of it as an HashMap, + /// but with keys statically known. + #[derive(Debug, Default)] + struct QueryParameters<'a> { + keyword: Option>, + $( + $name: Option>, + )* + } + + impl<'a> QueryParameters<'a> { + /// Build this struct from the query part of an URI. + fn from_query(query: &'a str) -> Result { + let mut res: Self = Default::default(); + for (k, v) in url::form_urlencoded::parse(query.as_bytes()) { + let repeated = match k.as_ref() { + $( + $rest => if !v.is_empty() { + res.$name.replace(v).is_some() + } else { + false + }, + )* + _ => { + if k.starts_with("response-") || k.starts_with("X-Amz-") { + false + } else if v.as_ref().is_empty() { + if res.keyword.replace(k).is_some() { + return Err(Error::BadRequest("Multiple keywords".to_owned())); + } + continue; + } else { + debug!("Received an unknown query parameter: '{}'", k); + false + } + } + }; + if repeated { + return Err(Error::BadRequest(format!( + "Query parameter repeated: '{}'", + k + ))); + } + } + Ok(res) + } + + /// Get an error message in case not all parameters where used when extracting them to + /// build an Enpoint variant + fn nonempty_message(&self) -> Option<&str> { + if self.keyword.is_some() { + Some("Keyword not used") + } $( + else if self.$name.is_some() { + Some(concat!("'", $rest, "'")) + } + )* else { + None + } + } + } + } +} + +pub(crate) use generateQueryParameters; +pub(crate) use router_match; diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs new file mode 100644 index 00000000..78a69d53 --- /dev/null +++ b/src/api/s3/api_server.rs @@ -0,0 +1,401 @@ +use std::sync::Arc; + +use async_trait::async_trait; + +use futures::future::Future; +use hyper::header; +use hyper::{Body, Method, Request, Response}; + +use opentelemetry::{trace::SpanRef, KeyValue}; + +use garage_table::util::*; +use garage_util::error::Error as GarageError; + +use garage_model::garage::Garage; +use garage_model::key_table::Key; + +use crate::error::*; +use crate::generic_server::*; + +use crate::signature::payload::check_payload_signature; +use crate::signature::streaming::*; + +use crate::helpers::*; +use crate::s3::bucket::*; +use crate::s3::copy::*; +use crate::s3::cors::*; +use crate::s3::delete::*; +use crate::s3::get::*; +use crate::s3::list::*; +use crate::s3::post_object::handle_post_object; +use crate::s3::put::*; +use crate::s3::router::Endpoint; +use crate::s3::website::*; + +pub struct S3ApiServer { + garage: Arc, +} + +pub(crate) struct S3ApiEndpoint { + bucket_name: Option, + endpoint: Endpoint, +} + +impl S3ApiServer { + pub async fn run( + garage: Arc, + shutdown_signal: impl Future, + ) -> Result<(), GarageError> { + let addr = garage.config.s3_api.api_bind_addr; + + ApiServer::new( + garage.config.s3_api.s3_region.clone(), + S3ApiServer { garage }, + ) + .run_server(addr, shutdown_signal) + .await + } + + async fn handle_request_without_bucket( + &self, + _req: Request, + api_key: Key, + endpoint: Endpoint, + ) -> Result, Error> { + match endpoint { + Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await, + endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), + } + } +} + +#[async_trait] +impl ApiHandler for S3ApiServer { + const API_NAME: &'static str = "s3"; + const API_NAME_DISPLAY: &'static str = "S3"; + + type Endpoint = S3ApiEndpoint; + + fn parse_endpoint(&self, req: &Request) -> Result { + let authority = req + .headers() + .get(header::HOST) + .ok_or_bad_request("Host header required")? + .to_str()?; + + let host = authority_to_host(authority)?; + + let bucket_name = self + .garage + .config + .s3_api + .root_domain + .as_ref() + .and_then(|root_domain| host_to_bucket(&host, root_domain)); + + let (endpoint, bucket_name) = + Endpoint::from_request(req, bucket_name.map(ToOwned::to_owned))?; + + Ok(S3ApiEndpoint { + bucket_name, + endpoint, + }) + } + + async fn handle( + &self, + req: Request, + endpoint: S3ApiEndpoint, + ) -> Result, Error> { + let S3ApiEndpoint { + bucket_name, + endpoint, + } = endpoint; + let garage = self.garage.clone(); + + // Some endpoints are processed early, before we even check for an API key + if let Endpoint::PostObject = endpoint { + return handle_post_object(garage, req, bucket_name.unwrap()).await; + } + if let Endpoint::Options = endpoint { + return handle_options_s3api(garage, &req, bucket_name).await; + } + + let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?; + let api_key = api_key.ok_or_else(|| { + Error::Forbidden("Garage does not support anonymous access yet".to_string()) + })?; + + let req = parse_streaming_body( + &api_key, + req, + &mut content_sha256, + &garage.config.s3_api.s3_region, + "s3", + )?; + + let bucket_name = match bucket_name { + None => { + return self + .handle_request_without_bucket(req, api_key, endpoint) + .await + } + Some(bucket) => bucket.to_string(), + }; + + // Special code path for CreateBucket API endpoint + if let Endpoint::CreateBucket {} = endpoint { + return handle_create_bucket(&garage, req, content_sha256, api_key, bucket_name).await; + } + + let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?; + let bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .filter(|b| !b.state.is_deleted()) + .ok_or(Error::NoSuchBucket)?; + + let allowed = match endpoint.authorization_type() { + Authorization::Read => api_key.allow_read(&bucket_id), + Authorization::Write => api_key.allow_write(&bucket_id), + Authorization::Owner => api_key.allow_owner(&bucket_id), + _ => unreachable!(), + }; + + if !allowed { + return Err(Error::Forbidden( + "Operation is not allowed for this key.".to_string(), + )); + } + + // Look up what CORS rule might apply to response. + // Requests for methods different than GET, HEAD or POST + // are always preflighted, i.e. the browser should make + // an OPTIONS call before to check it is allowed + let matching_cors_rule = match *req.method() { + Method::GET | Method::HEAD | Method::POST => find_matching_cors_rule(&bucket, &req)?, + _ => None, + }; + + let resp = match endpoint { + Endpoint::HeadObject { + key, part_number, .. + } => handle_head(garage, &req, bucket_id, &key, part_number).await, + Endpoint::GetObject { + key, part_number, .. + } => handle_get(garage, &req, bucket_id, &key, part_number).await, + Endpoint::UploadPart { + key, + part_number, + upload_id, + } => { + handle_put_part( + garage, + req, + bucket_id, + &key, + part_number, + &upload_id, + content_sha256, + ) + .await + } + Endpoint::CopyObject { key } => { + handle_copy(garage, &api_key, &req, bucket_id, &key).await + } + Endpoint::UploadPartCopy { + key, + part_number, + upload_id, + } => { + handle_upload_part_copy( + garage, + &api_key, + &req, + bucket_id, + &key, + part_number, + &upload_id, + ) + .await + } + Endpoint::PutObject { key } => { + handle_put(garage, req, bucket_id, &key, content_sha256).await + } + Endpoint::AbortMultipartUpload { key, upload_id } => { + handle_abort_multipart_upload(garage, bucket_id, &key, &upload_id).await + } + Endpoint::DeleteObject { key, .. } => handle_delete(garage, bucket_id, &key).await, + Endpoint::CreateMultipartUpload { key } => { + handle_create_multipart_upload(garage, &req, &bucket_name, bucket_id, &key).await + } + Endpoint::CompleteMultipartUpload { key, upload_id } => { + handle_complete_multipart_upload( + garage, + req, + &bucket_name, + bucket_id, + &key, + &upload_id, + content_sha256, + ) + .await + } + Endpoint::CreateBucket {} => unreachable!(), + Endpoint::HeadBucket {} => { + let empty_body: Body = Body::from(vec![]); + let response = Response::builder().body(empty_body).unwrap(); + Ok(response) + } + Endpoint::DeleteBucket {} => { + handle_delete_bucket(&garage, bucket_id, bucket_name, api_key).await + } + Endpoint::GetBucketLocation {} => handle_get_bucket_location(garage), + Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(), + Endpoint::ListObjects { + delimiter, + encoding_type, + marker, + max_keys, + prefix, + } => { + handle_list( + garage, + &ListObjectsQuery { + common: ListQueryCommon { + bucket_name, + bucket_id, + delimiter: delimiter.map(|d| d.to_string()), + page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + prefix: prefix.unwrap_or_default(), + urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), + }, + is_v2: false, + marker, + continuation_token: None, + start_after: None, + }, + ) + .await + } + Endpoint::ListObjectsV2 { + delimiter, + encoding_type, + max_keys, + prefix, + continuation_token, + start_after, + list_type, + .. + } => { + if list_type == "2" { + handle_list( + garage, + &ListObjectsQuery { + common: ListQueryCommon { + bucket_name, + bucket_id, + delimiter: delimiter.map(|d| d.to_string()), + page_size: max_keys.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), + prefix: prefix.unwrap_or_default(), + }, + is_v2: true, + marker: None, + continuation_token, + start_after, + }, + ) + .await + } else { + Err(Error::BadRequest(format!( + "Invalid endpoint: list-type={}", + list_type + ))) + } + } + Endpoint::ListMultipartUploads { + delimiter, + encoding_type, + key_marker, + max_uploads, + prefix, + upload_id_marker, + } => { + handle_list_multipart_upload( + garage, + &ListMultipartUploadsQuery { + common: ListQueryCommon { + bucket_name, + bucket_id, + delimiter: delimiter.map(|d| d.to_string()), + page_size: max_uploads.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + prefix: prefix.unwrap_or_default(), + urlencode_resp: encoding_type.map(|e| e == "url").unwrap_or(false), + }, + key_marker, + upload_id_marker, + }, + ) + .await + } + Endpoint::ListParts { + key, + max_parts, + part_number_marker, + upload_id, + } => { + handle_list_parts( + garage, + &ListPartsQuery { + bucket_name, + bucket_id, + key, + upload_id, + part_number_marker: part_number_marker.map(|p| p.clamp(1, 10000)), + max_parts: max_parts.map(|p| p.clamp(1, 1000)).unwrap_or(1000), + }, + ) + .await + } + Endpoint::DeleteObjects {} => { + handle_delete_objects(garage, bucket_id, req, content_sha256).await + } + Endpoint::GetBucketWebsite {} => handle_get_website(&bucket).await, + Endpoint::PutBucketWebsite {} => { + handle_put_website(garage, bucket_id, req, content_sha256).await + } + Endpoint::DeleteBucketWebsite {} => handle_delete_website(garage, bucket_id).await, + Endpoint::GetBucketCors {} => handle_get_cors(&bucket).await, + Endpoint::PutBucketCors {} => { + handle_put_cors(garage, bucket_id, req, content_sha256).await + } + Endpoint::DeleteBucketCors {} => handle_delete_cors(garage, bucket_id).await, + endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())), + }; + + // If request was a success and we have a CORS rule that applies to it, + // add the corresponding CORS headers to the response + let mut resp_ok = resp?; + if let Some(rule) = matching_cors_rule { + add_cors_headers(&mut resp_ok, rule) + .ok_or_internal_error("Invalid bucket CORS configuration")?; + } + + Ok(resp_ok) + } +} + +impl ApiEndpoint for S3ApiEndpoint { + fn name(&self) -> &'static str { + self.endpoint.name() + } + + fn add_span_attributes(&self, span: SpanRef<'_>) { + span.set_attribute(KeyValue::new( + "bucket", + self.bucket_name.clone().unwrap_or_default(), + )); + } +} diff --git a/src/api/s3/bucket.rs b/src/api/s3/bucket.rs new file mode 100644 index 00000000..93048a8c --- /dev/null +++ b/src/api/s3/bucket.rs @@ -0,0 +1,358 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; + +use garage_model::bucket_alias_table::*; +use garage_model::bucket_table::Bucket; +use garage_model::garage::Garage; +use garage_model::key_table::Key; +use garage_model::permission::BucketKeyPerm; +use garage_model::s3::object_table::ObjectFilter; +use garage_table::util::*; +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::time::*; + +use crate::error::*; +use crate::s3::xml as s3_xml; +use crate::signature::verify_signed_content; + +pub fn handle_get_bucket_location(garage: Arc) -> Result, Error> { + let loc = s3_xml::LocationConstraint { + xmlns: (), + region: garage.config.s3_api.s3_region.to_string(), + }; + let xml = s3_xml::to_xml_with_header(&loc)?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml.into_bytes()))?) +} + +pub fn handle_get_bucket_versioning() -> Result, Error> { + let versioning = s3_xml::VersioningConfiguration { + xmlns: (), + status: None, + }; + + let xml = s3_xml::to_xml_with_header(&versioning)?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml.into_bytes()))?) +} + +pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result, Error> { + let key_p = api_key.params().ok_or_internal_error( + "Key should not be in deleted state at this point (in handle_list_buckets)", + )?; + + // Collect buckets user has access to + let ids = api_key + .state + .as_option() + .unwrap() + .authorized_buckets + .items() + .iter() + .filter(|(_, perms)| perms.is_any()) + .map(|(id, _)| *id) + .collect::>(); + + let mut buckets_by_id = HashMap::new(); + let mut aliases = HashMap::new(); + + for bucket_id in ids.iter() { + let bucket = garage.bucket_table.get(&EmptyKey, bucket_id).await?; + if let Some(bucket) = bucket { + for (alias, _, _active) in bucket.aliases().iter().filter(|(_, _, active)| *active) { + let alias_opt = garage.bucket_alias_table.get(&EmptyKey, alias).await?; + if let Some(alias_ent) = alias_opt { + if *alias_ent.state.get() == Some(*bucket_id) { + aliases.insert(alias_ent.name().to_string(), *bucket_id); + } + } + } + if let Deletable::Present(param) = bucket.state { + buckets_by_id.insert(bucket_id, param); + } + } + } + + for (alias, _, id_opt) in key_p.local_aliases.items() { + if let Some(id) = id_opt { + aliases.insert(alias.clone(), *id); + } + } + + // Generate response + let list_buckets = s3_xml::ListAllMyBucketsResult { + owner: s3_xml::Owner { + display_name: s3_xml::Value(key_p.name.get().to_string()), + id: s3_xml::Value(api_key.key_id.to_string()), + }, + buckets: s3_xml::BucketList { + entries: aliases + .iter() + .filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p))) + .map(|(name, _id, param)| s3_xml::Bucket { + creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)), + name: s3_xml::Value(name.to_string()), + }) + .collect(), + }, + }; + + let xml = s3_xml::to_xml_with_header(&list_buckets)?; + trace!("xml: {}", xml); + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml))?) +} + +pub async fn handle_create_bucket( + garage: &Garage, + req: Request, + content_sha256: Option, + api_key: Key, + bucket_name: String, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let cmd = + parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?; + + if let Some(location_constraint) = cmd { + if location_constraint != garage.config.s3_api.s3_region { + return Err(Error::BadRequest(format!( + "Cannot satisfy location constraint `{}`: buckets can only be created in region `{}`", + location_constraint, + garage.config.s3_api.s3_region + ))); + } + } + + let key_params = api_key + .params() + .ok_or_internal_error("Key should not be deleted at this point")?; + + let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name) + { + Some(*bucket_id) + } else { + garage + .bucket_helper() + .resolve_global_bucket_name(&bucket_name) + .await? + }; + + if let Some(bucket_id) = existing_bucket { + // Check we have write or owner permission on the bucket, + // in that case it's fine, return 200 OK, bucket exists; + // otherwise return a forbidden error. + let kp = api_key.bucket_permissions(&bucket_id); + if !(kp.allow_write || kp.allow_owner) { + return Err(Error::BucketAlreadyExists); + } + } else { + // Create the bucket! + if !is_valid_bucket_name(&bucket_name) { + return Err(Error::BadRequest(format!( + "{}: {}", + bucket_name, INVALID_BUCKET_NAME_MESSAGE + ))); + } + + let bucket = Bucket::new(); + garage.bucket_table.insert(&bucket).await?; + + garage + .bucket_helper() + .set_bucket_key_permissions(bucket.id, &api_key.key_id, BucketKeyPerm::ALL_PERMISSIONS) + .await?; + + garage + .bucket_helper() + .set_local_bucket_alias(bucket.id, &api_key.key_id, &bucket_name) + .await?; + } + + Ok(Response::builder() + .header("Location", format!("/{}", bucket_name)) + .body(Body::empty()) + .unwrap()) +} + +pub async fn handle_delete_bucket( + garage: &Garage, + bucket_id: Uuid, + bucket_name: String, + api_key: Key, +) -> Result, Error> { + let key_params = api_key + .params() + .ok_or_internal_error("Key should not be deleted at this point")?; + + let is_local_alias = matches!(key_params.local_aliases.get(&bucket_name), Some(Some(_))); + + let mut bucket = garage + .bucket_helper() + .get_existing_bucket(bucket_id) + .await?; + let bucket_state = bucket.state.as_option().unwrap(); + + // If the bucket has no other aliases, this is a true deletion. + // Otherwise, it is just an alias removal. + + let has_other_global_aliases = bucket_state + .aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .any(|(n, _, _)| is_local_alias || (*n != bucket_name)); + + let has_other_local_aliases = bucket_state + .local_aliases + .items() + .iter() + .filter(|(_, _, active)| *active) + .any(|((k, n), _, _)| !is_local_alias || *n != bucket_name || *k != api_key.key_id); + + if !has_other_global_aliases && !has_other_local_aliases { + // Delete bucket + + // Check bucket is empty + let objects = garage + .object_table + .get_range( + &bucket_id, + None, + Some(ObjectFilter::IsData), + 10, + EnumerationOrder::Forward, + ) + .await?; + if !objects.is_empty() { + return Err(Error::BucketNotEmpty); + } + + // --- done checking, now commit --- + // 1. delete bucket alias + if is_local_alias { + garage + .bucket_helper() + .unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) + .await?; + } else { + garage + .bucket_helper() + .unset_global_bucket_alias(bucket_id, &bucket_name) + .await?; + } + + // 2. delete authorization from keys that had access + for (key_id, _) in bucket.authorized_keys() { + garage + .bucket_helper() + .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) + .await?; + } + + // 3. delete bucket + bucket.state = Deletable::delete(); + garage.bucket_table.insert(&bucket).await?; + } else if is_local_alias { + // Just unalias + garage + .bucket_helper() + .unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) + .await?; + } else { + // Just unalias (but from global namespace) + garage + .bucket_helper() + .unset_global_bucket_alias(bucket_id, &bucket_name) + .await?; + } + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option> { + // Returns None if invalid data + // Returns Some(None) if no location constraint is given + // Returns Some(Some("xxxx")) where xxxx is the given location constraint + + let xml_str = std::str::from_utf8(xml_bytes).ok()?; + if xml_str.trim_matches(char::is_whitespace).is_empty() { + return Some(None); + } + + let xml = roxmltree::Document::parse(xml_str).ok()?; + + let cbc = xml.root().first_child()?; + if !cbc.has_tag_name("CreateBucketConfiguration") { + return None; + } + + let mut ret = None; + for item in cbc.children() { + println!("{:?}", item); + if item.has_tag_name("LocationConstraint") { + if ret != None { + return None; + } + ret = Some(item.text()?.to_string()); + } else if !item.is_text() { + return None; + } + } + + Some(ret) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn create_bucket() { + assert_eq!(parse_create_bucket_xml(br#""#), Some(None)); + assert_eq!( + parse_create_bucket_xml( + br#" + + + "# + ), + Some(None) + ); + assert_eq!( + parse_create_bucket_xml( + br#" + + Europe + + "# + ), + Some(Some("Europe".into())) + ); + assert_eq!( + parse_create_bucket_xml( + br#" + + + "# + ), + None + ); + } +} diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs new file mode 100644 index 00000000..4e94d887 --- /dev/null +++ b/src/api/s3/copy.rs @@ -0,0 +1,660 @@ +use std::pin::Pin; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use futures::{stream, stream::Stream, StreamExt, TryFutureExt}; +use md5::{Digest as Md5Digest, Md5}; + +use hyper::{Body, Request, Response}; +use serde::Serialize; + +use garage_table::*; +use garage_util::data::*; +use garage_util::time::*; + +use garage_model::garage::Garage; +use garage_model::key_table::Key; +use garage_model::s3::block_ref_table::*; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::error::*; +use crate::helpers::{parse_bucket_key, resolve_bucket}; +use crate::s3::put::{decode_upload_id, get_headers}; +use crate::s3::xml::{self as s3_xml, xmlns_tag}; + +pub async fn handle_copy( + garage: Arc, + api_key: &Key, + req: &Request, + dest_bucket_id: Uuid, + dest_key: &str, +) -> Result, Error> { + let copy_precondition = CopyPreconditionHeaders::parse(req)?; + + let source_object = get_copy_source(&garage, api_key, req).await?; + + let (source_version, source_version_data, source_version_meta) = + extract_source_info(&source_object)?; + + // Check precondition, e.g. x-amz-copy-source-if-match + copy_precondition.check(source_version, &source_version_meta.etag)?; + + // Generate parameters for copied object + let new_uuid = gen_uuid(); + let new_timestamp = now_msec(); + + // Implement x-amz-metadata-directive: REPLACE + let new_meta = match req.headers().get("x-amz-metadata-directive") { + Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta { + headers: get_headers(req.headers())?, + size: source_version_meta.size, + etag: source_version_meta.etag.clone(), + }, + _ => source_version_meta.clone(), + }; + + let etag = new_meta.etag.to_string(); + + // Save object copy + match source_version_data { + ObjectVersionData::DeleteMarker => unreachable!(), + ObjectVersionData::Inline(_meta, bytes) => { + let dest_object_version = ObjectVersion { + uuid: new_uuid, + timestamp: new_timestamp, + state: ObjectVersionState::Complete(ObjectVersionData::Inline( + new_meta, + bytes.clone(), + )), + }; + let dest_object = Object::new( + dest_bucket_id, + dest_key.to_string(), + vec![dest_object_version], + ); + garage.object_table.insert(&dest_object).await?; + } + ObjectVersionData::FirstBlock(_meta, first_block_hash) => { + // Get block list from source version + let source_version = garage + .version_table + .get(&source_version.uuid, &EmptyKey) + .await?; + let source_version = source_version.ok_or(Error::NoSuchKey)?; + + // Write an "uploading" marker in Object table + // This holds a reference to the object in the Version table + // so that it won't be deleted, e.g. by repair_versions. + let tmp_dest_object_version = ObjectVersion { + uuid: new_uuid, + timestamp: new_timestamp, + state: ObjectVersionState::Uploading(new_meta.headers.clone()), + }; + let tmp_dest_object = Object::new( + dest_bucket_id, + dest_key.to_string(), + vec![tmp_dest_object_version], + ); + garage.object_table.insert(&tmp_dest_object).await?; + + // Write version in the version table. Even with empty block list, + // this means that the BlockRef entries linked to this version cannot be + // marked as deleted (they are marked as deleted only if the Version + // doesn't exist or is marked as deleted). + let mut dest_version = + Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false); + garage.version_table.insert(&dest_version).await?; + + // Fill in block list for version and insert block refs + for (bk, bv) in source_version.blocks.items().iter() { + dest_version.blocks.put(*bk, *bv); + } + let dest_block_refs = dest_version + .blocks + .items() + .iter() + .map(|b| BlockRef { + block: b.1.hash, + version: new_uuid, + deleted: false.into(), + }) + .collect::>(); + futures::try_join!( + garage.version_table.insert(&dest_version), + garage.block_ref_table.insert_many(&dest_block_refs[..]), + )?; + + // Insert final object + // We do this last because otherwise there is a race condition in the case where + // the copy call has the same source and destination (this happens, rclone does + // it to update the modification timestamp for instance). If we did this concurrently + // with the stuff before, the block's reference counts could be decremented before + // they are incremented again for the new version, leading to data being deleted. + let dest_object_version = ObjectVersion { + uuid: new_uuid, + timestamp: new_timestamp, + state: ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + new_meta, + *first_block_hash, + )), + }; + let dest_object = Object::new( + dest_bucket_id, + dest_key.to_string(), + vec![dest_object_version], + ); + garage.object_table.insert(&dest_object).await?; + } + } + + let last_modified = msec_to_rfc3339(new_timestamp); + let result = CopyObjectResult { + last_modified: s3_xml::Value(last_modified), + etag: s3_xml::Value(format!("\"{}\"", etag)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .header("x-amz-version-id", hex::encode(new_uuid)) + .header( + "x-amz-copy-source-version-id", + hex::encode(source_version.uuid), + ) + .body(Body::from(xml))?) +} + +pub async fn handle_upload_part_copy( + garage: Arc, + api_key: &Key, + req: &Request, + dest_bucket_id: Uuid, + dest_key: &str, + part_number: u64, + upload_id: &str, +) -> Result, Error> { + let copy_precondition = CopyPreconditionHeaders::parse(req)?; + + let dest_version_uuid = decode_upload_id(upload_id)?; + + let dest_key = dest_key.to_string(); + let (source_object, dest_object) = futures::try_join!( + get_copy_source(&garage, api_key, req), + garage + .object_table + .get(&dest_bucket_id, &dest_key) + .map_err(Error::from), + )?; + let dest_object = dest_object.ok_or(Error::NoSuchKey)?; + + let (source_object_version, source_version_data, source_version_meta) = + extract_source_info(&source_object)?; + + // Check precondition on source, e.g. x-amz-copy-source-if-match + copy_precondition.check(source_object_version, &source_version_meta.etag)?; + + // Check source range is valid + let source_range = match req.headers().get("x-amz-copy-source-range") { + Some(range) => { + let range_str = range.to_str()?; + let mut ranges = http_range::HttpRange::parse(range_str, source_version_meta.size) + .map_err(|e| (e, source_version_meta.size))?; + if ranges.len() != 1 { + return Err(Error::BadRequest( + "Invalid x-amz-copy-source-range header: exactly 1 range must be given".into(), + )); + } else { + ranges.pop().unwrap() + } + } + None => http_range::HttpRange { + start: 0, + length: source_version_meta.size, + }, + }; + + // Check destination version is indeed in uploading state + if !dest_object + .versions() + .iter() + .any(|v| v.uuid == dest_version_uuid && v.is_uploading()) + { + return Err(Error::NoSuchUpload); + } + + // Check source version is not inlined + match source_version_data { + ObjectVersionData::DeleteMarker => unreachable!(), + ObjectVersionData::Inline(_meta, _bytes) => { + // This is only for small files, we don't bother handling this. + // (in AWS UploadPartCopy works for parts at least 5MB which + // is never the case of an inline object) + return Err(Error::BadRequest( + "Source object is too small (minimum part size is 5Mb)".into(), + )); + } + ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (), + }; + + // Fetch source versin with its block list, + // and destination version to check part hasn't yet been uploaded + let (source_version, dest_version) = futures::try_join!( + garage + .version_table + .get(&source_object_version.uuid, &EmptyKey), + garage.version_table.get(&dest_version_uuid, &EmptyKey), + )?; + let source_version = source_version.ok_or(Error::NoSuchKey)?; + + // Check this part number hasn't yet been uploaded + if let Some(dv) = dest_version { + if dv.has_part_number(part_number) { + return Err(Error::BadRequest(format!( + "Part number {} has already been uploaded", + part_number + ))); + } + } + + // We want to reuse blocks from the source version as much as possible. + // However, we still need to get the data from these blocks + // because we need to know it to calculate the MD5sum of the part + // which is used as its ETag. + + // First, calculate what blocks we want to keep, + // and the subrange of the block to take, if the bounds of the + // requested range are in the middle. + let (range_begin, range_end) = (source_range.start, source_range.start + source_range.length); + + let mut blocks_to_copy = vec![]; + let mut current_offset = 0; + for (_bk, block) in source_version.blocks.items().iter() { + let (block_begin, block_end) = (current_offset, current_offset + block.size); + + if block_begin < range_end && block_end > range_begin { + let subrange_begin = if block_begin < range_begin { + Some(range_begin - block_begin) + } else { + None + }; + let subrange_end = if block_end > range_end { + Some(range_end - block_begin) + } else { + None + }; + let range_to_copy = match (subrange_begin, subrange_end) { + (Some(b), Some(e)) => Some(b as usize..e as usize), + (None, Some(e)) => Some(0..e as usize), + (Some(b), None) => Some(b as usize..block.size as usize), + (None, None) => None, + }; + + blocks_to_copy.push((block.hash, range_to_copy)); + } + + current_offset = block_end; + } + + // Now, actually copy the blocks + let mut md5hasher = Md5::new(); + + // First, create a stream that is able to read the source blocks + // and extract the subrange if necessary. + // The second returned value is an Option, that is Some + // if and only if the block returned is a block that already existed + // in the Garage data store (thus we don't need to save it again). + let garage2 = garage.clone(); + let source_blocks = stream::iter(blocks_to_copy) + .flat_map(|(block_hash, range_to_copy)| { + let garage3 = garage2.clone(); + stream::once(async move { + let data = garage3.block_manager.rpc_get_block(&block_hash).await?; + match range_to_copy { + Some(r) => Ok((data[r].to_vec(), None)), + None => Ok((data, Some(block_hash))), + } + }) + }) + .peekable(); + + // The defragmenter is a custom stream (defined below) that concatenates + // consecutive block parts when they are too small. + // It returns a series of (Vec, Option). + // When it is done, it returns an empty vec. + // Same as the previous iterator, the Option is Some(_) if and only if + // it's an existing block of the Garage data store. + let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks)); + + let mut current_offset = 0; + let mut next_block = defragmenter.next().await?; + + loop { + let (data, existing_block_hash) = next_block; + if data.is_empty() { + break; + } + + md5hasher.update(&data[..]); + + let must_upload = existing_block_hash.is_none(); + let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); + + let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); + version.blocks.put( + VersionBlockKey { + part_number, + offset: current_offset, + }, + VersionBlock { + hash: final_hash, + size: data.len() as u64, + }, + ); + current_offset += data.len() as u64; + + let block_ref = BlockRef { + block: final_hash, + version: dest_version_uuid, + deleted: false.into(), + }; + + let garage2 = garage.clone(); + let res = futures::try_join!( + // Thing 1: if the block is not exactly a block that existed before, + // we need to insert that data as a new block. + async move { + if must_upload { + garage2.block_manager.rpc_put_block(final_hash, data).await + } else { + Ok(()) + } + }, + // Thing 2: we need to insert the block in the version + garage.version_table.insert(&version), + // Thing 3: we need to add a block reference + garage.block_ref_table.insert(&block_ref), + // Thing 4: we need to prefetch the next block + defragmenter.next(), + )?; + next_block = res.3; + } + + let data_md5sum = md5hasher.finalize(); + let etag = hex::encode(data_md5sum); + + // Put the part's ETag in the Versiontable + let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); + version.parts_etags.put(part_number, etag.clone()); + garage.version_table.insert(&version).await?; + + // LGTM + let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult { + xmlns: (), + etag: s3_xml::Value(format!("\"{}\"", etag)), + last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)), + })?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .header( + "x-amz-copy-source-version-id", + hex::encode(source_object_version.uuid), + ) + .body(Body::from(resp_xml))?) +} + +async fn get_copy_source( + garage: &Garage, + api_key: &Key, + req: &Request, +) -> Result { + let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; + let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; + + let (source_bucket, source_key) = parse_bucket_key(©_source, None)?; + let source_bucket_id = resolve_bucket(garage, &source_bucket.to_string(), api_key).await?; + + if !api_key.allow_read(&source_bucket_id) { + return Err(Error::Forbidden(format!( + "Reading from bucket {} not allowed for this key", + source_bucket + ))); + } + + let source_key = source_key.ok_or_bad_request("No source key specified")?; + + let source_object = garage + .object_table + .get(&source_bucket_id, &source_key.to_string()) + .await? + .ok_or(Error::NoSuchKey)?; + + Ok(source_object) +} + +fn extract_source_info( + source_object: &Object, +) -> Result<(&ObjectVersion, &ObjectVersionData, &ObjectVersionMeta), Error> { + let source_version = source_object + .versions() + .iter() + .rev() + .find(|v| v.is_complete()) + .ok_or(Error::NoSuchKey)?; + + let source_version_data = match &source_version.state { + ObjectVersionState::Complete(x) => x, + _ => unreachable!(), + }; + + let source_version_meta = match source_version_data { + ObjectVersionData::DeleteMarker => { + return Err(Error::NoSuchKey); + } + ObjectVersionData::Inline(meta, _bytes) => meta, + ObjectVersionData::FirstBlock(meta, _fbh) => meta, + }; + + Ok((source_version, source_version_data, source_version_meta)) +} + +struct CopyPreconditionHeaders { + copy_source_if_match: Option>, + copy_source_if_modified_since: Option, + copy_source_if_none_match: Option>, + copy_source_if_unmodified_since: Option, +} + +impl CopyPreconditionHeaders { + fn parse(req: &Request) -> Result { + Ok(Self { + copy_source_if_match: req + .headers() + .get("x-amz-copy-source-if-match") + .map(|x| x.to_str()) + .transpose()? + .map(|x| { + x.split(',') + .map(|m| m.trim().trim_matches('"').to_string()) + .collect::>() + }), + copy_source_if_modified_since: req + .headers() + .get("x-amz-copy-source-if-modified-since") + .map(|x| x.to_str()) + .transpose()? + .map(httpdate::parse_http_date) + .transpose() + .ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?, + copy_source_if_none_match: req + .headers() + .get("x-amz-copy-source-if-none-match") + .map(|x| x.to_str()) + .transpose()? + .map(|x| { + x.split(',') + .map(|m| m.trim().trim_matches('"').to_string()) + .collect::>() + }), + copy_source_if_unmodified_since: req + .headers() + .get("x-amz-copy-source-if-unmodified-since") + .map(|x| x.to_str()) + .transpose()? + .map(httpdate::parse_http_date) + .transpose() + .ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?, + }) + } + + fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> { + let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp); + + let ok = match ( + &self.copy_source_if_match, + &self.copy_source_if_unmodified_since, + &self.copy_source_if_none_match, + &self.copy_source_if_modified_since, + ) { + // TODO I'm not sure all of the conditions are evaluated correctly here + + // If we have both if-match and if-unmodified-since, + // basically we don't care about if-unmodified-since, + // because in the spec it says that if if-match evaluates to + // true but if-unmodified-since evaluates to false, + // the copy is still done. + (Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"), + (None, Some(ius), None, None) => v_date <= *ius, + + // If we have both if-none-match and if-modified-since, + // then both of the two conditions must evaluate to true + (None, None, Some(inm), Some(ims)) => { + !inm.iter().any(|x| x == etag || x == "*") && v_date > *ims + } + (None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"), + (None, None, None, Some(ims)) => v_date > *ims, + (None, None, None, None) => true, + _ => { + return Err(Error::BadRequest( + "Invalid combination of x-amz-copy-source-if-xxxxx headers".into(), + )) + } + }; + + if ok { + Ok(()) + } else { + Err(Error::PreconditionFailed) + } + } +} + +type BlockStreamItemOk = (Vec, Option); +type BlockStreamItem = Result; + +struct Defragmenter> { + block_size: usize, + block_stream: Pin>>, + buffer: Vec, + hash: Option, +} + +impl> Defragmenter { + fn new(block_size: usize, block_stream: Pin>>) -> Self { + Self { + block_size, + block_stream, + buffer: vec![], + hash: None, + } + } + + async fn next(&mut self) -> BlockStreamItem { + // Fill buffer while we can + while let Some(res) = self.block_stream.as_mut().peek().await { + let (peeked_next_block, _) = match res { + Ok(t) => t, + Err(_) => { + self.block_stream.next().await.unwrap()?; + unreachable!() + } + }; + + if self.buffer.is_empty() { + let (next_block, next_block_hash) = self.block_stream.next().await.unwrap()?; + self.buffer = next_block; + self.hash = next_block_hash; + } else if self.buffer.len() + peeked_next_block.len() > self.block_size { + break; + } else { + let (next_block, _) = self.block_stream.next().await.unwrap()?; + self.buffer.extend(next_block); + self.hash = None; + } + } + + Ok((std::mem::take(&mut self.buffer), self.hash.take())) + } +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct CopyObjectResult { + #[serde(rename = "LastModified")] + pub last_modified: s3_xml::Value, + #[serde(rename = "ETag")] + pub etag: s3_xml::Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct CopyPartResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "LastModified")] + pub last_modified: s3_xml::Value, + #[serde(rename = "ETag")] + pub etag: s3_xml::Value, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::s3::xml::to_xml_with_header; + + #[test] + fn copy_object_result() -> Result<(), Error> { + let copy_result = CopyObjectResult { + last_modified: s3_xml::Value(msec_to_rfc3339(0)), + etag: s3_xml::Value("\"9b2cf535f27731c974343645a3985328\"".to_string()), + }; + assert_eq!( + to_xml_with_header(©_result)?, + "\ +\ + 1970-01-01T00:00:00.000Z\ + "9b2cf535f27731c974343645a3985328"\ +\ + " + ); + Ok(()) + } + + #[test] + fn serialize_copy_part_result() -> Result<(), Error> { + let expected_retval = "\ +\ + 2011-04-11T20:34:56.000Z\ + "9b2cf535f27731c974343645a3985328"\ +"; + let v = CopyPartResult { + xmlns: (), + last_modified: s3_xml::Value("2011-04-11T20:34:56.000Z".into()), + etag: s3_xml::Value("\"9b2cf535f27731c974343645a3985328\"".into()), + }; + println!("{}", to_xml_with_header(&v)?); + + assert_eq!(to_xml_with_header(&v)?, expected_retval); + + Ok(()) + } +} diff --git a/src/api/s3/cors.rs b/src/api/s3/cors.rs new file mode 100644 index 00000000..37ea2e43 --- /dev/null +++ b/src/api/s3/cors.rs @@ -0,0 +1,442 @@ +use quick_xml::de::from_reader; +use std::sync::Arc; + +use http::header::{ + ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, + ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD, +}; +use hyper::{header::HeaderName, Body, Method, Request, Response, StatusCode}; + +use serde::{Deserialize, Serialize}; + +use crate::error::*; +use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; +use crate::signature::verify_signed_content; + +use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule}; +use garage_model::garage::Garage; +use garage_table::*; +use garage_util::data::*; + +pub async fn handle_get_cors(bucket: &Bucket) -> Result, Error> { + let param = bucket + .params() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + if let Some(cors) = param.cors_config.get() { + let wc = CorsConfiguration { + xmlns: (), + cors_rules: cors + .iter() + .map(CorsRule::from_garage_cors_rule) + .collect::>(), + }; + let xml = to_xml_with_header(&wc)?; + Ok(Response::builder() + .status(StatusCode::OK) + .header(http::header::CONTENT_TYPE, "application/xml") + .body(Body::from(xml))?) + } else { + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) + } +} + +pub async fn handle_delete_cors( + garage: Arc, + bucket_id: Uuid, +) -> Result, Error> { + let mut bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .ok_or(Error::NoSuchBucket)?; + + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + param.cors_config.update(None); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +pub async fn handle_put_cors( + garage: Arc, + bucket_id: Uuid, + req: Request, + content_sha256: Option, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let mut bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .ok_or(Error::NoSuchBucket)?; + + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + let conf: CorsConfiguration = from_reader(&body as &[u8])?; + conf.validate()?; + + param + .cors_config + .update(Some(conf.into_garage_cors_config()?)); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +pub async fn handle_options_s3api( + garage: Arc, + req: &Request, + bucket_name: Option, +) -> Result, Error> { + // FIXME: CORS rules of buckets with local aliases are + // not taken into account. + + // If the bucket name is a global bucket name, + // we try to apply the CORS rules of that bucket. + // If a user has a local bucket name that has + // the same name, its CORS rules won't be applied + // and will be shadowed by the rules of the globally + // existing bucket (but this is inevitable because + // OPTIONS calls are not auhtenticated). + if let Some(bn) = bucket_name { + let helper = garage.bucket_helper(); + let bucket_id = helper.resolve_global_bucket_name(&bn).await?; + if let Some(id) = bucket_id { + let bucket = garage + .bucket_table + .get(&EmptyKey, &id) + .await? + .filter(|b| !b.state.is_deleted()) + .ok_or(Error::NoSuchBucket)?; + handle_options_for_bucket(req, &bucket) + } else { + // If there is a bucket name in the request, but that name + // does not correspond to a global alias for a bucket, + // then it's either a non-existing bucket or a local bucket. + // We have no way of knowing, because the request is not + // authenticated and thus we can't resolve local aliases. + // We take the permissive approach of allowing everything, + // because we don't want to prevent web apps that use + // local bucket names from making API calls. + Ok(Response::builder() + .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(ACCESS_CONTROL_ALLOW_METHODS, "*") + .status(StatusCode::OK) + .body(Body::empty())?) + } + } else { + // If there is no bucket name in the request, + // we are doing a ListBuckets call, which we want to allow + // for all origins. + Ok(Response::builder() + .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(ACCESS_CONTROL_ALLOW_METHODS, "GET") + .status(StatusCode::OK) + .body(Body::empty())?) + } +} + +pub fn handle_options_for_bucket( + req: &Request, + bucket: &Bucket, +) -> Result, Error> { + let origin = req + .headers() + .get("Origin") + .ok_or_bad_request("Missing Origin header")? + .to_str()?; + let request_method = req + .headers() + .get(ACCESS_CONTROL_REQUEST_METHOD) + .ok_or_bad_request("Missing Access-Control-Request-Method header")? + .to_str()?; + let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) { + Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::>(), + None => vec![], + }; + + if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { + let matching_rule = cors_config + .iter() + .find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter())); + if let Some(rule) = matching_rule { + let mut resp = Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?; + add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?; + return Ok(resp); + } + } + + Err(Error::Forbidden("This CORS request is not allowed.".into())) +} + +pub fn find_matching_cors_rule<'a>( + bucket: &'a Bucket, + req: &Request, +) -> Result, Error> { + if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { + if let Some(origin) = req.headers().get("Origin") { + let origin = origin.to_str()?; + let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) { + Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::>(), + None => vec![], + }; + return Ok(cors_config.iter().find(|rule| { + cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter()) + })); + } + } + Ok(None) +} + +fn cors_rule_matches<'a, HI, S>( + rule: &GarageCorsRule, + origin: &'a str, + method: &'a str, + mut request_headers: HI, +) -> bool +where + HI: Iterator, + S: AsRef, +{ + rule.allow_origins.iter().any(|x| x == "*" || x == origin) + && rule.allow_methods.iter().any(|x| x == "*" || x == method) + && request_headers.all(|h| { + rule.allow_headers + .iter() + .any(|x| x == "*" || x == h.as_ref()) + }) +} + +pub fn add_cors_headers( + resp: &mut Response, + rule: &GarageCorsRule, +) -> Result<(), http::header::InvalidHeaderValue> { + let h = resp.headers_mut(); + h.insert( + ACCESS_CONTROL_ALLOW_ORIGIN, + rule.allow_origins.join(", ").parse()?, + ); + h.insert( + ACCESS_CONTROL_ALLOW_METHODS, + rule.allow_methods.join(", ").parse()?, + ); + h.insert( + ACCESS_CONTROL_ALLOW_HEADERS, + rule.allow_headers.join(", ").parse()?, + ); + h.insert( + ACCESS_CONTROL_EXPOSE_HEADERS, + rule.expose_headers.join(", ").parse()?, + ); + Ok(()) +} + +// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +#[serde(rename = "CORSConfiguration")] +pub struct CorsConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + #[serde(rename = "CORSRule")] + pub cors_rules: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct CorsRule { + #[serde(rename = "ID")] + pub id: Option, + #[serde(rename = "MaxAgeSeconds")] + pub max_age_seconds: Option, + #[serde(rename = "AllowedOrigin")] + pub allowed_origins: Vec, + #[serde(rename = "AllowedMethod")] + pub allowed_methods: Vec, + #[serde(rename = "AllowedHeader", default)] + pub allowed_headers: Vec, + #[serde(rename = "ExposeHeader", default)] + pub expose_headers: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllowedMethod { + #[serde(rename = "AllowedMethod")] + pub allowed_method: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct AllowedHeader { + #[serde(rename = "AllowedHeader")] + pub allowed_header: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct ExposeHeader { + #[serde(rename = "ExposeHeader")] + pub expose_header: Value, +} + +impl CorsConfiguration { + pub fn validate(&self) -> Result<(), Error> { + for r in self.cors_rules.iter() { + r.validate()?; + } + Ok(()) + } + + pub fn into_garage_cors_config(self) -> Result, Error> { + Ok(self + .cors_rules + .iter() + .map(CorsRule::to_garage_cors_rule) + .collect()) + } +} + +impl CorsRule { + pub fn validate(&self) -> Result<(), Error> { + for method in self.allowed_methods.iter() { + method + .0 + .parse::() + .ok_or_bad_request("Invalid CORSRule method")?; + } + for header in self + .allowed_headers + .iter() + .chain(self.expose_headers.iter()) + { + header + .0 + .parse::() + .ok_or_bad_request("Invalid HTTP header name")?; + } + Ok(()) + } + + pub fn to_garage_cors_rule(&self) -> GarageCorsRule { + let convert_vec = + |vval: &[Value]| vval.iter().map(|x| x.0.to_owned()).collect::>(); + GarageCorsRule { + id: self.id.as_ref().map(|x| x.0.to_owned()), + max_age_seconds: self.max_age_seconds.as_ref().map(|x| x.0 as u64), + allow_origins: convert_vec(&self.allowed_origins), + allow_methods: convert_vec(&self.allowed_methods), + allow_headers: convert_vec(&self.allowed_headers), + expose_headers: convert_vec(&self.expose_headers), + } + } + + pub fn from_garage_cors_rule(rule: &GarageCorsRule) -> Self { + let convert_vec = |vval: &[String]| { + vval.iter() + .map(|x| Value(x.clone())) + .collect::>() + }; + Self { + id: rule.id.as_ref().map(|x| Value(x.clone())), + max_age_seconds: rule.max_age_seconds.map(|x| IntValue(x as i64)), + allowed_origins: convert_vec(&rule.allow_origins), + allowed_methods: convert_vec(&rule.allow_methods), + allowed_headers: convert_vec(&rule.allow_headers), + expose_headers: convert_vec(&rule.expose_headers), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize() -> Result<(), Error> { + let message = r#" + + + http://www.example.com + + PUT + POST + DELETE + + * + + + * + GET + + + qsdfjklm + 12345 + https://perdu.com + + GET + DELETE + * + * + +"#; + let conf: CorsConfiguration = from_str(message).unwrap(); + let ref_value = CorsConfiguration { + xmlns: (), + cors_rules: vec![ + CorsRule { + id: None, + max_age_seconds: None, + allowed_origins: vec!["http://www.example.com".into()], + allowed_methods: vec!["PUT".into(), "POST".into(), "DELETE".into()], + allowed_headers: vec!["*".into()], + expose_headers: vec![], + }, + CorsRule { + id: None, + max_age_seconds: None, + allowed_origins: vec!["*".into()], + allowed_methods: vec!["GET".into()], + allowed_headers: vec![], + expose_headers: vec![], + }, + CorsRule { + id: Some("qsdfjklm".into()), + max_age_seconds: Some(IntValue(12345)), + allowed_origins: vec!["https://perdu.com".into()], + allowed_methods: vec!["GET".into(), "DELETE".into()], + allowed_headers: vec!["*".into()], + expose_headers: vec!["*".into()], + }, + ], + }; + assert_eq! { + ref_value, + conf + }; + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + Ok(()) + } +} diff --git a/src/api/s3/delete.rs b/src/api/s3/delete.rs new file mode 100644 index 00000000..1e3f1249 --- /dev/null +++ b/src/api/s3/delete.rs @@ -0,0 +1,170 @@ +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; + +use garage_util::data::*; +use garage_util::time::*; + +use garage_model::garage::Garage; +use garage_model::s3::object_table::*; + +use crate::error::*; +use crate::s3::xml as s3_xml; +use crate::signature::verify_signed_content; + +async fn handle_delete_internal( + garage: &Garage, + bucket_id: Uuid, + key: &str, +) -> Result<(Uuid, Uuid), Error> { + let object = garage + .object_table + .get(&bucket_id, &key.to_string()) + .await? + .ok_or(Error::NoSuchKey)?; // No need to delete + + let interesting_versions = object.versions().iter().filter(|v| { + !matches!( + v.state, + ObjectVersionState::Aborted + | ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) + ) + }); + + let mut version_to_delete = None; + let mut timestamp = now_msec(); + for v in interesting_versions { + if v.timestamp + 1 > timestamp || version_to_delete.is_none() { + version_to_delete = Some(v.uuid); + } + timestamp = std::cmp::max(timestamp, v.timestamp + 1); + } + + let deleted_version = version_to_delete.ok_or(Error::NoSuchKey)?; + + let version_uuid = gen_uuid(); + + let object = Object::new( + bucket_id, + key.into(), + vec![ObjectVersion { + uuid: version_uuid, + timestamp, + state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), + }], + ); + + garage.object_table.insert(&object).await?; + + Ok((deleted_version, version_uuid)) +} + +pub async fn handle_delete( + garage: Arc, + bucket_id: Uuid, + key: &str, +) -> Result, Error> { + let (_deleted_version, delete_marker_version) = + handle_delete_internal(&garage, bucket_id, key).await?; + + Ok(Response::builder() + .header("x-amz-version-id", hex::encode(delete_marker_version)) + .status(StatusCode::NO_CONTENT) + .body(Body::from(vec![])) + .unwrap()) +} + +pub async fn handle_delete_objects( + garage: Arc, + bucket_id: Uuid, + req: Request, + content_sha256: Option, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; + let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?; + + let mut ret_deleted = Vec::new(); + let mut ret_errors = Vec::new(); + + for obj in cmd.objects.iter() { + match handle_delete_internal(&garage, bucket_id, &obj.key).await { + Ok((deleted_version, delete_marker_version)) => { + if cmd.quiet { + continue; + } + ret_deleted.push(s3_xml::Deleted { + key: s3_xml::Value(obj.key.clone()), + version_id: s3_xml::Value(hex::encode(deleted_version)), + delete_marker_version_id: s3_xml::Value(hex::encode(delete_marker_version)), + }); + } + Err(e) => { + ret_errors.push(s3_xml::DeleteError { + code: s3_xml::Value(e.aws_code().to_string()), + key: Some(s3_xml::Value(obj.key.clone())), + message: s3_xml::Value(format!("{}", e)), + version_id: None, + }); + } + } + } + + let xml = s3_xml::to_xml_with_header(&s3_xml::DeleteResult { + xmlns: (), + deleted: ret_deleted, + errors: ret_errors, + })?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml))?) +} + +struct DeleteRequest { + quiet: bool, + objects: Vec, +} + +struct DeleteObject { + key: String, +} + +fn parse_delete_objects_xml(xml: &roxmltree::Document) -> Option { + let mut ret = DeleteRequest { + quiet: false, + objects: vec![], + }; + + let root = xml.root(); + let delete = root.first_child()?; + + if !delete.has_tag_name("Delete") { + return None; + } + + for item in delete.children() { + if item.has_tag_name("Object") { + let key = item.children().find(|e| e.has_tag_name("Key"))?; + let key_str = key.text()?; + ret.objects.push(DeleteObject { + key: key_str.to_string(), + }); + } else if item.has_tag_name("Quiet") { + if item.text()? == "true" { + ret.quiet = true; + } else { + ret.quiet = false; + } + } else { + return None; + } + } + + Some(ret) +} diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs new file mode 100644 index 00000000..3edf22a6 --- /dev/null +++ b/src/api/s3/get.rs @@ -0,0 +1,461 @@ +//! Function related to GET and HEAD requests +use std::sync::Arc; +use std::time::{Duration, UNIX_EPOCH}; + +use futures::stream::*; +use http::header::{ + ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, IF_MODIFIED_SINCE, + IF_NONE_MATCH, LAST_MODIFIED, RANGE, +}; +use hyper::body::Bytes; +use hyper::{Body, Request, Response, StatusCode}; + +use garage_table::EmptyKey; +use garage_util::data::*; + +use garage_model::garage::Garage; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::error::*; + +const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count"; + +fn object_headers( + version: &ObjectVersion, + version_meta: &ObjectVersionMeta, +) -> http::response::Builder { + debug!("Version meta: {:?}", version_meta); + + let date = UNIX_EPOCH + Duration::from_millis(version.timestamp); + let date_str = httpdate::fmt_http_date(date); + + let mut resp = Response::builder() + .header(CONTENT_TYPE, version_meta.headers.content_type.to_string()) + .header(LAST_MODIFIED, date_str) + .header(ACCEPT_RANGES, "bytes".to_string()); + + if !version_meta.etag.is_empty() { + resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag)); + } + + for (k, v) in version_meta.headers.other.iter() { + resp = resp.header(k, v.to_string()); + } + + resp +} + +fn try_answer_cached( + version: &ObjectVersion, + version_meta: &ObjectVersionMeta, + req: &Request, +) -> Option> { + // It is possible, and is even usually the case, [that both If-None-Match and + // If-Modified-Since] are present in a request. In this situation If-None-Match takes + // precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational + // being that etag based matching is more accurate, it has no issue with sub-second precision + // for instance (in case of very fast updates) + let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) { + let none_match = none_match.to_str().ok()?; + let expected = format!("\"{}\"", version_meta.etag); + let found = none_match + .split(',') + .map(str::trim) + .any(|etag| etag == expected || etag == "\"*\""); + found + } else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) { + let modified_since = modified_since.to_str().ok()?; + let client_date = httpdate::parse_http_date(modified_since).ok()?; + let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp); + client_date >= server_date + } else { + false + }; + + if cached { + Some( + Response::builder() + .status(StatusCode::NOT_MODIFIED) + .body(Body::empty()) + .unwrap(), + ) + } else { + None + } +} + +/// Handle HEAD request +pub async fn handle_head( + garage: Arc, + req: &Request, + bucket_id: Uuid, + key: &str, + part_number: Option, +) -> Result, Error> { + let object = garage + .object_table + .get(&bucket_id, &key.to_string()) + .await? + .ok_or(Error::NoSuchKey)?; + + let object_version = object + .versions() + .iter() + .rev() + .find(|v| v.is_data()) + .ok_or(Error::NoSuchKey)?; + + let version_data = match &object_version.state { + ObjectVersionState::Complete(c) => c, + _ => unreachable!(), + }; + + let version_meta = match version_data { + ObjectVersionData::Inline(meta, _) => meta, + ObjectVersionData::FirstBlock(meta, _) => meta, + _ => unreachable!(), + }; + + if let Some(cached) = try_answer_cached(object_version, version_meta, req) { + return Ok(cached); + } + + if let Some(pn) = part_number { + match version_data { + ObjectVersionData::Inline(_, bytes) => { + if pn != 1 { + return Err(Error::InvalidPart); + } + Ok(object_headers(object_version, version_meta) + .header(CONTENT_LENGTH, format!("{}", bytes.len())) + .header( + CONTENT_RANGE, + format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()), + ) + .header(X_AMZ_MP_PARTS_COUNT, "1") + .status(StatusCode::PARTIAL_CONTENT) + .body(Body::empty())?) + } + ObjectVersionData::FirstBlock(_, _) => { + let version = garage + .version_table + .get(&object_version.uuid, &EmptyKey) + .await? + .ok_or(Error::NoSuchKey)?; + + let (part_offset, part_end) = + calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; + let n_parts = version.parts_etags.items().len(); + + Ok(object_headers(object_version, version_meta) + .header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) + .header( + CONTENT_RANGE, + format!( + "bytes {}-{}/{}", + part_offset, + part_end - 1, + version_meta.size + ), + ) + .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) + .status(StatusCode::PARTIAL_CONTENT) + .body(Body::empty())?) + } + _ => unreachable!(), + } + } else { + Ok(object_headers(object_version, version_meta) + .header(CONTENT_LENGTH, format!("{}", version_meta.size)) + .status(StatusCode::OK) + .body(Body::empty())?) + } +} + +/// Handle GET request +pub async fn handle_get( + garage: Arc, + req: &Request, + bucket_id: Uuid, + key: &str, + part_number: Option, +) -> Result, Error> { + let object = garage + .object_table + .get(&bucket_id, &key.to_string()) + .await? + .ok_or(Error::NoSuchKey)?; + + let last_v = object + .versions() + .iter() + .rev() + .find(|v| v.is_complete()) + .ok_or(Error::NoSuchKey)?; + + let last_v_data = match &last_v.state { + ObjectVersionState::Complete(x) => x, + _ => unreachable!(), + }; + let last_v_meta = match last_v_data { + ObjectVersionData::DeleteMarker => return Err(Error::NoSuchKey), + ObjectVersionData::Inline(meta, _) => meta, + ObjectVersionData::FirstBlock(meta, _) => meta, + }; + + if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) { + return Ok(cached); + } + + match (part_number, parse_range_header(req, last_v_meta.size)?) { + (Some(_), Some(_)) => { + return Err(Error::BadRequest( + "Cannot specify both partNumber and Range header".into(), + )); + } + (Some(pn), None) => { + return handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await; + } + (None, Some(range)) => { + return handle_get_range( + garage, + last_v, + last_v_data, + last_v_meta, + range.start, + range.start + range.length, + ) + .await; + } + (None, None) => (), + } + + let resp_builder = object_headers(last_v, last_v_meta) + .header(CONTENT_LENGTH, format!("{}", last_v_meta.size)) + .status(StatusCode::OK); + + match &last_v_data { + ObjectVersionData::DeleteMarker => unreachable!(), + ObjectVersionData::Inline(_, bytes) => { + let body: Body = Body::from(bytes.to_vec()); + Ok(resp_builder.body(body)?) + } + ObjectVersionData::FirstBlock(_, first_block_hash) => { + let read_first_block = garage.block_manager.rpc_get_block(first_block_hash); + let get_next_blocks = garage.version_table.get(&last_v.uuid, &EmptyKey); + + let (first_block, version) = futures::try_join!(read_first_block, get_next_blocks)?; + let version = version.ok_or(Error::NoSuchKey)?; + + let mut blocks = version + .blocks + .items() + .iter() + .map(|(_, vb)| (vb.hash, None)) + .collect::>(); + blocks[0].1 = Some(first_block); + + let body_stream = futures::stream::iter(blocks) + .map(move |(hash, data_opt)| { + let garage = garage.clone(); + async move { + if let Some(data) = data_opt { + Ok(Bytes::from(data)) + } else { + garage + .block_manager + .rpc_get_block(&hash) + .await + .map(Bytes::from) + } + } + }) + .buffered(2); + + let body = hyper::body::Body::wrap_stream(body_stream); + Ok(resp_builder.body(body)?) + } + } +} + +async fn handle_get_range( + garage: Arc, + version: &ObjectVersion, + version_data: &ObjectVersionData, + version_meta: &ObjectVersionMeta, + begin: u64, + end: u64, +) -> Result, Error> { + let resp_builder = object_headers(version, version_meta) + .header(CONTENT_LENGTH, format!("{}", end - begin)) + .header( + CONTENT_RANGE, + format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), + ) + .status(StatusCode::PARTIAL_CONTENT); + + match &version_data { + ObjectVersionData::DeleteMarker => unreachable!(), + ObjectVersionData::Inline(_meta, bytes) => { + if end as usize <= bytes.len() { + let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec()); + Ok(resp_builder.body(body)?) + } else { + None.ok_or_internal_error( + "Requested range not present in inline bytes when it should have been", + ) + } + } + ObjectVersionData::FirstBlock(_meta, _first_block_hash) => { + let version = garage + .version_table + .get(&version.uuid, &EmptyKey) + .await? + .ok_or(Error::NoSuchKey)?; + + let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); + Ok(resp_builder.body(body)?) + } + } +} + +async fn handle_get_part( + garage: Arc, + object_version: &ObjectVersion, + version_data: &ObjectVersionData, + version_meta: &ObjectVersionMeta, + part_number: u64, +) -> Result, Error> { + let resp_builder = + object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT); + + match version_data { + ObjectVersionData::Inline(_, bytes) => { + if part_number != 1 { + return Err(Error::InvalidPart); + } + Ok(resp_builder + .header(CONTENT_LENGTH, format!("{}", bytes.len())) + .header( + CONTENT_RANGE, + format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()), + ) + .header(X_AMZ_MP_PARTS_COUNT, "1") + .body(Body::from(bytes.to_vec()))?) + } + ObjectVersionData::FirstBlock(_, _) => { + let version = garage + .version_table + .get(&object_version.uuid, &EmptyKey) + .await? + .ok_or(Error::NoSuchKey)?; + + let (begin, end) = + calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; + let n_parts = version.parts_etags.items().len(); + + let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); + + Ok(resp_builder + .header(CONTENT_LENGTH, format!("{}", end - begin)) + .header( + CONTENT_RANGE, + format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), + ) + .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) + .body(body)?) + } + _ => unreachable!(), + } +} + +fn parse_range_header( + req: &Request, + total_size: u64, +) -> Result, Error> { + let range = match req.headers().get(RANGE) { + Some(range) => { + let range_str = range.to_str()?; + let mut ranges = + http_range::HttpRange::parse(range_str, total_size).map_err(|e| (e, total_size))?; + if ranges.len() > 1 { + // garage does not support multi-range requests yet, so we respond with the entire + // object when multiple ranges are requested + None + } else { + ranges.pop() + } + } + None => None, + }; + Ok(range) +} + +fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> { + let mut offset = 0; + for (i, (bk, bv)) in v.blocks.items().iter().enumerate() { + if bk.part_number == part_number { + let size: u64 = v.blocks.items()[i..] + .iter() + .take_while(|(k, _)| k.part_number == part_number) + .map(|(_, v)| v.size) + .sum(); + return Some((offset, offset + size)); + } + offset += bv.size; + } + None +} + +fn body_from_blocks_range( + garage: Arc, + all_blocks: &[(VersionBlockKey, VersionBlock)], + begin: u64, + end: u64, +) -> Body { + // We will store here the list of blocks that have an intersection with the requested + // range, as well as their "true offset", which is their actual offset in the complete + // file (whereas block.offset designates the offset of the block WITHIN THE PART + // block.part_number, which is not the same in the case of a multipart upload) + let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min( + all_blocks.len(), + 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize, + )); + let mut true_offset = 0; + for (_, b) in all_blocks.iter() { + if true_offset >= end { + break; + } + // Keep only blocks that have an intersection with the requested range + if true_offset < end && true_offset + b.size > begin { + blocks.push((*b, true_offset)); + } + true_offset += b.size; + } + + let body_stream = futures::stream::iter(blocks) + .map(move |(block, true_offset)| { + let garage = garage.clone(); + async move { + let data = garage.block_manager.rpc_get_block(&block.hash).await?; + let data = Bytes::from(data); + let start_in_block = if true_offset > begin { + 0 + } else { + begin - true_offset + }; + let end_in_block = if true_offset + block.size < end { + block.size + } else { + end - true_offset + }; + Result::::Ok( + data.slice(start_in_block as usize..end_in_block as usize), + ) + } + }) + .buffered(2); + + hyper::body::Body::wrap_stream(body_stream) +} diff --git a/src/api/s3/list.rs b/src/api/s3/list.rs new file mode 100644 index 00000000..e2848c57 --- /dev/null +++ b/src/api/s3/list.rs @@ -0,0 +1,1337 @@ +use std::cmp::Ordering; +use std::collections::{BTreeMap, BTreeSet}; +use std::iter::{Iterator, Peekable}; +use std::sync::Arc; + +use hyper::{Body, Response}; + +use garage_util::data::*; +use garage_util::error::Error as GarageError; +use garage_util::time::*; + +use garage_model::garage::Garage; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::Version; + +use garage_table::{EmptyKey, EnumerationOrder}; + +use crate::encoding::*; +use crate::error::*; +use crate::helpers::key_after_prefix; +use crate::s3::put as s3_put; +use crate::s3::xml as s3_xml; + +const DUMMY_NAME: &str = "Dummy Key"; +const DUMMY_KEY: &str = "GKDummyKey"; + +#[derive(Debug)] +pub struct ListQueryCommon { + pub bucket_name: String, + pub bucket_id: Uuid, + pub delimiter: Option, + pub page_size: usize, + pub prefix: String, + pub urlencode_resp: bool, +} + +#[derive(Debug)] +pub struct ListObjectsQuery { + pub is_v2: bool, + pub marker: Option, + pub continuation_token: Option, + pub start_after: Option, + pub common: ListQueryCommon, +} + +#[derive(Debug)] +pub struct ListMultipartUploadsQuery { + pub key_marker: Option, + pub upload_id_marker: Option, + pub common: ListQueryCommon, +} + +#[derive(Debug)] +pub struct ListPartsQuery { + pub bucket_name: String, + pub bucket_id: Uuid, + pub key: String, + pub upload_id: String, + pub part_number_marker: Option, + pub max_parts: u64, +} + +pub async fn handle_list( + garage: Arc, + query: &ListObjectsQuery, +) -> Result, Error> { + let io = |bucket, key, count| { + let t = &garage.object_table; + async move { + t.get_range( + &bucket, + key, + Some(ObjectFilter::IsData), + count, + EnumerationOrder::Forward, + ) + .await + } + }; + + debug!("ListObjects {:?}", query); + let mut acc = query.build_accumulator(); + let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?; + + let result = s3_xml::ListBucketResult { + xmlns: (), + // Sending back request information + name: s3_xml::Value(query.common.bucket_name.to_string()), + prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp), + max_keys: s3_xml::IntValue(query.common.page_size as i64), + delimiter: query + .common + .delimiter + .as_ref() + .map(|x| uriencode_maybe(x, query.common.urlencode_resp)), + encoding_type: match query.common.urlencode_resp { + true => Some(s3_xml::Value("url".to_string())), + false => None, + }, + marker: match (!query.is_v2, &query.marker) { + (true, Some(k)) => Some(uriencode_maybe(k, query.common.urlencode_resp)), + _ => None, + }, + start_after: match (query.is_v2, &query.start_after) { + (true, Some(sa)) => Some(uriencode_maybe(sa, query.common.urlencode_resp)), + _ => None, + }, + continuation_token: match (query.is_v2, &query.continuation_token) { + (true, Some(ct)) => Some(s3_xml::Value(ct.to_string())), + _ => None, + }, + + // Pagination + is_truncated: s3_xml::Value(format!("{}", pagination.is_some())), + key_count: Some(s3_xml::IntValue( + acc.keys.len() as i64 + acc.common_prefixes.len() as i64, + )), + next_marker: match (!query.is_v2, &pagination) { + (true, Some(RangeBegin::AfterKey { key: k })) + | ( + true, + Some(RangeBegin::IncludingKey { + fallback_key: Some(k), + .. + }), + ) => Some(uriencode_maybe(k, query.common.urlencode_resp)), + _ => None, + }, + next_continuation_token: match (query.is_v2, &pagination) { + (true, Some(RangeBegin::AfterKey { key })) => Some(s3_xml::Value(format!( + "]{}", + base64::encode(key.as_bytes()) + ))), + (true, Some(RangeBegin::IncludingKey { key, .. })) => Some(s3_xml::Value(format!( + "[{}", + base64::encode(key.as_bytes()) + ))), + _ => None, + }, + + // Body + contents: acc + .keys + .iter() + .map(|(key, info)| s3_xml::ListBucketItem { + key: uriencode_maybe(key, query.common.urlencode_resp), + last_modified: s3_xml::Value(msec_to_rfc3339(info.last_modified)), + size: s3_xml::IntValue(info.size as i64), + etag: s3_xml::Value(format!("\"{}\"", info.etag)), + storage_class: s3_xml::Value("STANDARD".to_string()), + }) + .collect(), + common_prefixes: acc + .common_prefixes + .iter() + .map(|pfx| s3_xml::CommonPrefix { + prefix: uriencode_maybe(pfx, query.common.urlencode_resp), + }) + .collect(), + }; + + let xml = s3_xml::to_xml_with_header(&result)?; + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml.into_bytes()))?) +} + +pub async fn handle_list_multipart_upload( + garage: Arc, + query: &ListMultipartUploadsQuery, +) -> Result, Error> { + let io = |bucket, key, count| { + let t = &garage.object_table; + async move { + t.get_range( + &bucket, + key, + Some(ObjectFilter::IsUploading), + count, + EnumerationOrder::Forward, + ) + .await + } + }; + + debug!("ListMultipartUploads {:?}", query); + let mut acc = query.build_accumulator(); + let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?; + + let result = s3_xml::ListMultipartUploadsResult { + xmlns: (), + + // Sending back some information about the request + bucket: s3_xml::Value(query.common.bucket_name.to_string()), + prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp), + delimiter: query + .common + .delimiter + .as_ref() + .map(|d| uriencode_maybe(d, query.common.urlencode_resp)), + max_uploads: s3_xml::IntValue(query.common.page_size as i64), + key_marker: query + .key_marker + .as_ref() + .map(|m| uriencode_maybe(m, query.common.urlencode_resp)), + upload_id_marker: query + .upload_id_marker + .as_ref() + .map(|m| s3_xml::Value(m.to_string())), + encoding_type: match query.common.urlencode_resp { + true => Some(s3_xml::Value("url".to_string())), + false => None, + }, + + // Handling pagination + is_truncated: s3_xml::Value(format!("{}", pagination.is_some())), + next_key_marker: match &pagination { + None => None, + Some(RangeBegin::AfterKey { key }) + | Some(RangeBegin::AfterUpload { key, .. }) + | Some(RangeBegin::IncludingKey { key, .. }) => { + Some(uriencode_maybe(key, query.common.urlencode_resp)) + } + }, + next_upload_id_marker: match pagination { + Some(RangeBegin::AfterUpload { upload, .. }) => { + Some(s3_xml::Value(hex::encode(upload))) + } + Some(RangeBegin::IncludingKey { .. }) => Some(s3_xml::Value("include".to_string())), + _ => None, + }, + + // Result body + upload: acc + .keys + .iter() + .map(|(uuid, info)| s3_xml::ListMultipartItem { + initiated: s3_xml::Value(msec_to_rfc3339(info.timestamp)), + key: uriencode_maybe(&info.key, query.common.urlencode_resp), + upload_id: s3_xml::Value(hex::encode(uuid)), + storage_class: s3_xml::Value("STANDARD".to_string()), + initiator: s3_xml::Initiator { + display_name: s3_xml::Value(DUMMY_NAME.to_string()), + id: s3_xml::Value(DUMMY_KEY.to_string()), + }, + owner: s3_xml::Owner { + display_name: s3_xml::Value(DUMMY_NAME.to_string()), + id: s3_xml::Value(DUMMY_KEY.to_string()), + }, + }) + .collect(), + common_prefixes: acc + .common_prefixes + .iter() + .map(|c| s3_xml::CommonPrefix { + prefix: s3_xml::Value(c.to_string()), + }) + .collect(), + }; + + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml.into_bytes()))?) +} + +pub async fn handle_list_parts( + garage: Arc, + query: &ListPartsQuery, +) -> Result, Error> { + debug!("ListParts {:?}", query); + + let upload_id = s3_put::decode_upload_id(&query.upload_id)?; + + let (object, version) = futures::try_join!( + garage.object_table.get(&query.bucket_id, &query.key), + garage.version_table.get(&upload_id, &EmptyKey), + )?; + + let (info, next) = fetch_part_info(query, object, version, upload_id)?; + + let result = s3_xml::ListPartsResult { + xmlns: (), + bucket: s3_xml::Value(query.bucket_name.to_string()), + key: s3_xml::Value(query.key.to_string()), + upload_id: s3_xml::Value(query.upload_id.to_string()), + part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)), + next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)), + max_parts: s3_xml::IntValue(query.max_parts as i64), + is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()), + parts: info + .iter() + .map(|part| s3_xml::PartItem { + etag: s3_xml::Value(format!("\"{}\"", part.etag)), + last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)), + part_number: s3_xml::IntValue(part.part_number as i64), + size: s3_xml::IntValue(part.size as i64), + }) + .collect(), + initiator: s3_xml::Initiator { + display_name: s3_xml::Value(DUMMY_NAME.to_string()), + id: s3_xml::Value(DUMMY_KEY.to_string()), + }, + owner: s3_xml::Owner { + display_name: s3_xml::Value(DUMMY_NAME.to_string()), + id: s3_xml::Value(DUMMY_KEY.to_string()), + }, + storage_class: s3_xml::Value("STANDARD".to_string()), + }; + + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(Body::from(xml.into_bytes()))?) +} + +/* + * Private enums and structs + */ + +#[derive(Debug)] +struct ObjectInfo { + last_modified: u64, + size: u64, + etag: String, +} + +#[derive(Debug, PartialEq)] +struct UploadInfo { + key: String, + timestamp: u64, +} + +#[derive(Debug, PartialEq)] +struct PartInfo { + etag: String, + timestamp: u64, + part_number: u64, + size: u64, +} + +enum ExtractionResult { + NoMore, + Filled, + FilledAtUpload { + key: String, + upload: Uuid, + }, + Extracted { + key: String, + }, + // Fallback key is used for legacy APIs that only support + // exlusive pagination (and not inclusive one). + SkipTo { + key: String, + fallback_key: Option, + }, +} + +#[derive(PartialEq, Clone, Debug)] +enum RangeBegin { + // Fallback key is used for legacy APIs that only support + // exlusive pagination (and not inclusive one). + IncludingKey { + key: String, + fallback_key: Option, + }, + AfterKey { + key: String, + }, + AfterUpload { + key: String, + upload: Uuid, + }, +} +type Pagination = Option; + +/* + * Fetch list entries + */ + +async fn fetch_list_entries( + query: &ListQueryCommon, + begin: RangeBegin, + acc: &mut impl ExtractAccumulator, + mut io: F, +) -> Result +where + R: futures::Future, GarageError>>, + F: FnMut(Uuid, Option, usize) -> R, +{ + let mut cursor = begin; + // +1 is needed as we may need to skip the 1st key + // (range is inclusive while most S3 requests are exclusive) + let count = query.page_size + 1; + + loop { + let start_key = match cursor { + RangeBegin::AfterKey { ref key } + | RangeBegin::AfterUpload { ref key, .. } + | RangeBegin::IncludingKey { ref key, .. } => Some(key.clone()), + }; + + // Fetch objects + let objects = io(query.bucket_id, start_key.clone(), count).await?; + + debug!( + "List: get range {:?} (max {}), results: {}", + start_key, + count, + objects.len() + ); + let server_more = objects.len() >= count; + + let prev_req_cursor = cursor.clone(); + let mut iter = objects.iter().peekable(); + + // Drop the first key if needed + // Only AfterKey requires it according to the S3 spec and our implem. + match (&cursor, iter.peek()) { + (RangeBegin::AfterKey { key }, Some(object)) if &object.key == key => iter.next(), + (_, _) => None, + }; + + while let Some(object) = iter.peek() { + if !object.key.starts_with(&query.prefix) { + // If the key is not in the requested prefix, we're done + return Ok(None); + } + + cursor = match acc.extract(query, &cursor, &mut iter) { + ExtractionResult::Extracted { key } => RangeBegin::AfterKey { key }, + ExtractionResult::SkipTo { key, fallback_key } => { + RangeBegin::IncludingKey { key, fallback_key } + } + ExtractionResult::FilledAtUpload { key, upload } => { + return Ok(Some(RangeBegin::AfterUpload { key, upload })) + } + ExtractionResult::Filled => return Ok(Some(cursor)), + ExtractionResult::NoMore => return Ok(None), + }; + } + + if !server_more { + // We did not fully fill the accumulator despite exhausting all the data we have, + // we're done + return Ok(None); + } + + if prev_req_cursor == cursor { + unreachable!("No progress has been done in the loop. This is a bug, please report it."); + } + } +} + +fn fetch_part_info( + query: &ListPartsQuery, + object: Option, + version: Option, + upload_id: Uuid, +) -> Result<(Vec, Option), Error> { + // Check results + let object = object.ok_or(Error::NoSuchKey)?; + + let obj_version = object + .versions() + .iter() + .find(|v| v.uuid == upload_id && v.is_uploading()) + .ok_or(Error::NoSuchUpload)?; + + let version = version.ok_or(Error::NoSuchKey)?; + + // Cut the beginning of our 2 vectors if required + let (etags, blocks) = match &query.part_number_marker { + Some(marker) => { + let next = marker + 1; + + let part_idx = into_ok_or_err( + version + .parts_etags + .items() + .binary_search_by(|(part_num, _)| part_num.cmp(&next)), + ); + let parts = &version.parts_etags.items()[part_idx..]; + + let block_idx = into_ok_or_err( + version + .blocks + .items() + .binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)), + ); + let blocks = &version.blocks.items()[block_idx..]; + + (parts, blocks) + } + None => (version.parts_etags.items(), version.blocks.items()), + }; + + // Use the block vector to compute a (part_number, size) vector + let mut size = Vec::<(u64, u64)>::new(); + blocks.iter().for_each(|(key, val)| { + let mut new_size = val.size; + match size.pop() { + Some((part_number, size)) if part_number == key.part_number => new_size += size, + Some(v) => size.push(v), + None => (), + } + size.push((key.part_number, new_size)) + }); + + // Merge the etag vector and size vector to build a PartInfo vector + let max_parts = query.max_parts as usize; + let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable()); + + let mut info = Vec::::with_capacity(max_parts); + + while info.len() < max_parts { + match (etag_iter.peek(), size_iter.peek()) { + (Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) { + Ordering::Less => { + debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query); + etag_iter.next(); + } + Ordering::Equal => { + info.push(PartInfo { + etag: etag.to_string(), + timestamp: obj_version.timestamp, + part_number: *ep, + size: *size, + }); + etag_iter.next(); + size_iter.next(); + } + Ordering::Greater => { + debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query); + size_iter.next(); + } + }, + (None, None) => return Ok((info, None)), + _ => { + debug!( + "Additional block or ETag information ignored. Query: {:?}", + query + ); + return Ok((info, None)); + } + } + } + + match info.last() { + Some(part_info) => { + let pagination = Some(part_info.part_number); + Ok((info, pagination)) + } + None => Ok((info, None)), + } +} + +/* + * ListQuery logic + */ + +/// Determine the key from where we want to start fetch objects from the database +/// +/// We choose whether the object at this key must +/// be included or excluded from the response. +/// This key can be the prefix in the base case, or intermediate +/// points in the dataset if we are continuing a previous listing. +impl ListObjectsQuery { + fn build_accumulator(&self) -> Accumulator { + Accumulator::::new(self.common.page_size) + } + + fn begin(&self) -> Result { + if self.is_v2 { + match (&self.continuation_token, &self.start_after) { + // In V2 mode, the continuation token is defined as an opaque + // string in the spec, so we can do whatever we want with it. + // In our case, it is defined as either [ or ] (for include + // representing the key to start with. + (Some(token), _) => match &token[..1] { + "[" => Ok(RangeBegin::IncludingKey { + key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?, + fallback_key: None, + }), + "]" => Ok(RangeBegin::AfterKey { + key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?, + }), + _ => Err(Error::BadRequest("Invalid continuation token".to_string())), + }, + + // StartAfter has defined semantics in the spec: + // start listing at the first key immediately after. + (_, Some(key)) => Ok(RangeBegin::AfterKey { + key: key.to_string(), + }), + + // In the case where neither is specified, we start + // listing at the specified prefix. If an object has this + // exact same key, we include it. (@TODO is this correct?) + _ => Ok(RangeBegin::IncludingKey { + key: self.common.prefix.to_string(), + fallback_key: None, + }), + } + } else { + match &self.marker { + // In V1 mode, the spec defines the Marker value to mean + // the same thing as the StartAfter value in V2 mode. + Some(key) => Ok(RangeBegin::AfterKey { + key: key.to_string(), + }), + _ => Ok(RangeBegin::IncludingKey { + key: self.common.prefix.to_string(), + fallback_key: None, + }), + } + } + } +} + +impl ListMultipartUploadsQuery { + fn build_accumulator(&self) -> Accumulator { + Accumulator::::new(self.common.page_size) + } + + fn begin(&self) -> Result { + match (&self.upload_id_marker, &self.key_marker) { + // If both the upload id marker and the key marker are sets, + // the spec specifies that we must start listing uploads INCLUDING the given key, + // AFTER the specified upload id (sorted in a lexicographic order). + // To enable some optimisations, we emulate "IncludingKey" by extending the upload id + // semantic. We base our reasoning on the hypothesis that S3's upload ids are opaques + // while Garage's ones are 32 bytes hex encoded which enables us to extend this query + // with a specific "include" upload id. + (Some(up_marker), Some(key_marker)) => match &up_marker[..] { + "include" => Ok(RangeBegin::IncludingKey { + key: key_marker.to_string(), + fallback_key: None, + }), + uuid => Ok(RangeBegin::AfterUpload { + key: key_marker.to_string(), + upload: s3_put::decode_upload_id(uuid)?, + }), + }, + + // If only the key marker is specified, the spec says that we must start listing + // uploads AFTER the specified key. + (None, Some(key_marker)) => Ok(RangeBegin::AfterKey { + key: key_marker.to_string(), + }), + _ => Ok(RangeBegin::IncludingKey { + key: self.common.prefix.to_string(), + fallback_key: None, + }), + } + } +} + +/* + * Accumulator logic + */ + +trait ExtractAccumulator { + fn extract<'a>( + &mut self, + query: &ListQueryCommon, + cursor: &RangeBegin, + iter: &mut Peekable>, + ) -> ExtractionResult; +} + +struct Accumulator { + common_prefixes: BTreeSet, + keys: BTreeMap, + max_capacity: usize, +} + +type ObjectAccumulator = Accumulator; +type UploadAccumulator = Accumulator; + +impl Accumulator { + fn new(page_size: usize) -> Accumulator { + Accumulator { + common_prefixes: BTreeSet::::new(), + keys: BTreeMap::::new(), + max_capacity: page_size, + } + } + + /// Observe the Object iterator and try to extract a single common prefix + /// + /// This function can consume an arbitrary number of items as long as they share the same + /// common prefix. + fn extract_common_prefix<'a>( + &mut self, + objects: &mut Peekable>, + query: &ListQueryCommon, + ) -> Option { + // Get the next object from the iterator + let object = objects.peek().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); + + // Check if this is a common prefix (requires a passed delimiter and its value in the key) + let pfx = match common_prefix(object, query) { + Some(p) => p, + None => return None, + }; + + // Try to register this prefix + // If not possible, we can return early + if !self.try_insert_common_prefix(pfx.to_string()) { + return Some(ExtractionResult::Filled); + } + + // We consume the whole common prefix from the iterator + let mut last_pfx_key = &object.key; + loop { + last_pfx_key = match objects.peek() { + Some(o) if o.key.starts_with(pfx) => &o.key, + Some(_) => { + return Some(ExtractionResult::Extracted { + key: last_pfx_key.to_owned(), + }) + } + None => { + return match key_after_prefix(pfx) { + Some(next) => Some(ExtractionResult::SkipTo { + key: next, + fallback_key: Some(last_pfx_key.to_owned()), + }), + None => Some(ExtractionResult::NoMore), + } + } + }; + + objects.next(); + } + } + + fn is_full(&mut self) -> bool { + self.keys.len() + self.common_prefixes.len() >= self.max_capacity + } + + fn try_insert_common_prefix(&mut self, key: String) -> bool { + // If we already have an entry, we can continue + if self.common_prefixes.contains(&key) { + return true; + } + + // Otherwise, we need to check if we can add it + match self.is_full() { + true => false, + false => { + self.common_prefixes.insert(key); + true + } + } + } + + fn try_insert_entry(&mut self, key: K, value: V) -> bool { + // It is impossible to add twice a key, this is an error + assert!(!self.keys.contains_key(&key)); + + match self.is_full() { + true => false, + false => { + self.keys.insert(key, value); + true + } + } + } +} + +impl ExtractAccumulator for ObjectAccumulator { + fn extract<'a>( + &mut self, + query: &ListQueryCommon, + _cursor: &RangeBegin, + objects: &mut Peekable>, + ) -> ExtractionResult { + if let Some(e) = self.extract_common_prefix(objects, query) { + return e; + } + + let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); + + let version = match object.versions().iter().find(|x| x.is_data()) { + Some(v) => v, + None => unreachable!( + "Expect to have objects having data due to earlier filtering. This is a logic bug." + ), + }; + + let meta = match &version.state { + ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta, + ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta, + _ => unreachable!(), + }; + let info = ObjectInfo { + last_modified: version.timestamp, + size: meta.size, + etag: meta.etag.to_string(), + }; + + match self.try_insert_entry(object.key.clone(), info) { + true => ExtractionResult::Extracted { + key: object.key.clone(), + }, + false => ExtractionResult::Filled, + } + } +} + +impl ExtractAccumulator for UploadAccumulator { + /// Observe the iterator, process a single key, and try to extract one or more upload entries + /// + /// This function processes a single object from the iterator that can contain an arbitrary + /// number of versions, and thus "uploads". + fn extract<'a>( + &mut self, + query: &ListQueryCommon, + cursor: &RangeBegin, + objects: &mut Peekable>, + ) -> ExtractionResult { + if let Some(e) = self.extract_common_prefix(objects, query) { + return e; + } + + // Get the next object from the iterator + let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); + + let mut uploads_for_key = object + .versions() + .iter() + .filter(|x| x.is_uploading()) + .collect::>(); + + // S3 logic requires lexicographically sorted upload ids. + uploads_for_key.sort_unstable_by_key(|e| e.uuid); + + // Skip results if an upload marker is provided + if let RangeBegin::AfterUpload { upload, .. } = cursor { + // Because our data are sorted, we can use a binary search to find the UUID + // or to find where it should have been added. Once this position is found, + // we use it to discard the first part of the array. + let idx = match uploads_for_key.binary_search_by(|e| e.uuid.cmp(upload)) { + // we start after the found uuid so we need to discard the pointed value. + // In the worst case, the UUID is the last element, which lead us to an empty array + // but we are never out of bound. + Ok(i) => i + 1, + // if the UUID is not found, the upload may have been discarded between the 2 request, + // this function returns where it could have been inserted, + // the pointed value is thus greater than our marker and we need to keep it. + Err(i) => i, + }; + uploads_for_key = uploads_for_key[idx..].to_vec(); + } + + let mut iter = uploads_for_key.iter(); + + // The first entry is a specific case + // as it changes our result enum type + let first_upload = match iter.next() { + Some(u) => u, + None => { + return ExtractionResult::Extracted { + key: object.key.clone(), + } + } + }; + let first_up_info = UploadInfo { + key: object.key.to_string(), + timestamp: first_upload.timestamp, + }; + if !self.try_insert_entry(first_upload.uuid, first_up_info) { + return ExtractionResult::Filled; + } + + // We can then collect the remaining uploads in a loop + let mut prev_uuid = first_upload.uuid; + for upload in iter { + let up_info = UploadInfo { + key: object.key.to_string(), + timestamp: upload.timestamp, + }; + + // Insert data in our accumulator + // If it is full, return information to paginate. + if !self.try_insert_entry(upload.uuid, up_info) { + return ExtractionResult::FilledAtUpload { + key: object.key.clone(), + upload: prev_uuid, + }; + } + // Update our last added UUID + prev_uuid = upload.uuid; + } + + // We successfully collected all the uploads + ExtractionResult::Extracted { + key: object.key.clone(), + } + } +} + +/* + * Utility functions + */ + +/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable +fn into_ok_or_err(r: Result) -> T { + match r { + Ok(r) => r, + Err(r) => r, + } +} + +/// Returns the common prefix of the object given the query prefix and delimiter +fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> { + match &query.delimiter { + Some(delimiter) => object.key[query.prefix.len()..] + .find(delimiter) + .map(|i| &object.key[..query.prefix.len() + i + delimiter.len()]), + None => None, + } +} + +/// URIencode a value if needed +fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value { + if yes { + s3_xml::Value(uri_encode(s, true)) + } else { + s3_xml::Value(s.to_string()) + } +} + +/* + * Unit tests of this module + */ +#[cfg(test)] +mod tests { + use super::*; + use garage_model::s3::version_table::*; + use garage_util::*; + use std::iter::FromIterator; + + const TS: u64 = 1641394898314; + + fn bucket() -> Uuid { + Uuid::from([0x42; 32]) + } + + fn query() -> ListMultipartUploadsQuery { + ListMultipartUploadsQuery { + common: ListQueryCommon { + prefix: "".to_string(), + delimiter: Some("/".to_string()), + page_size: 1000, + urlencode_resp: false, + bucket_name: "a".to_string(), + bucket_id: Uuid::from([0x00; 32]), + }, + key_marker: None, + upload_id_marker: None, + } + } + + fn objs() -> Vec { + vec![ + Object::new( + bucket(), + "a/b/c".to_string(), + vec![objup_version([0x01; 32])], + ), + Object::new(bucket(), "d".to_string(), vec![objup_version([0x01; 32])]), + ] + } + + fn objup_version(uuid: [u8; 32]) -> ObjectVersion { + ObjectVersion { + uuid: Uuid::from(uuid), + timestamp: TS, + state: ObjectVersionState::Uploading(ObjectVersionHeaders { + content_type: "text/plain".to_string(), + other: BTreeMap::::new(), + }), + } + } + + #[test] + fn test_common_prefixes() { + let mut query = query(); + let objs = objs(); + + query.common.prefix = "a/".to_string(); + assert_eq!( + common_prefix(objs.get(0).unwrap(), &query.common), + Some("a/b/") + ); + + query.common.prefix = "a/b/".to_string(); + assert_eq!(common_prefix(objs.get(0).unwrap(), &query.common), None); + } + + #[test] + fn test_extract_common_prefix() { + let mut query = query(); + query.common.prefix = "a/".to_string(); + let objs = objs(); + let mut acc = UploadAccumulator::new(query.common.page_size); + + let mut iter = objs.iter().peekable(); + match acc.extract_common_prefix(&mut iter, &query.common) { + Some(ExtractionResult::Extracted { key }) => assert_eq!(key, "a/b/c".to_string()), + _ => panic!("wrong result"), + } + assert_eq!(acc.common_prefixes.len(), 1); + assert_eq!(acc.common_prefixes.iter().next().unwrap(), "a/b/"); + } + + #[test] + fn test_extract_upload() { + let objs = vec![ + Object::new( + bucket(), + "b".to_string(), + vec![ + objup_version([0x01; 32]), + objup_version([0x80; 32]), + objup_version([0x8f; 32]), + objup_version([0xdd; 32]), + ], + ), + Object::new(bucket(), "c".to_string(), vec![]), + ]; + + let mut acc = UploadAccumulator::new(2); + let mut start = RangeBegin::AfterUpload { + key: "b".to_string(), + upload: Uuid::from([0x01; 32]), + }; + + let mut iter = objs.iter().peekable(); + + // Check the case where we skip some uploads + match acc.extract(&(query().common), &start, &mut iter) { + ExtractionResult::FilledAtUpload { key, upload } => { + assert_eq!(key, "b"); + assert_eq!(upload, Uuid::from([0x8f; 32])); + } + _ => panic!("wrong result"), + }; + + assert_eq!(acc.keys.len(), 2); + assert_eq!( + acc.keys.get(&Uuid::from([0x80; 32])).unwrap(), + &UploadInfo { + timestamp: TS, + key: "b".to_string() + } + ); + assert_eq!( + acc.keys.get(&Uuid::from([0x8f; 32])).unwrap(), + &UploadInfo { + timestamp: TS, + key: "b".to_string() + } + ); + + acc = UploadAccumulator::new(2); + start = RangeBegin::AfterUpload { + key: "b".to_string(), + upload: Uuid::from([0xff; 32]), + }; + iter = objs.iter().peekable(); + + // Check the case where we skip all the uploads + match acc.extract(&(query().common), &start, &mut iter) { + ExtractionResult::Extracted { key } if key.as_str() == "b" => (), + _ => panic!("wrong result"), + }; + } + + #[tokio::test] + async fn test_fetch_uploads_no_result() -> Result<(), Error> { + let query = query(); + let mut acc = query.build_accumulator(); + let page = fetch_list_entries( + &query.common, + query.begin()?, + &mut acc, + |_, _, _| async move { Ok(vec![]) }, + ) + .await?; + assert_eq!(page, None); + assert_eq!(acc.common_prefixes.len(), 0); + assert_eq!(acc.keys.len(), 0); + + Ok(()) + } + + #[tokio::test] + async fn test_fetch_uploads_basic() -> Result<(), Error> { + let query = query(); + let mut acc = query.build_accumulator(); + let mut fake_io = |_, _, _| async move { Ok(objs()) }; + let page = + fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?; + assert_eq!(page, None); + assert_eq!(acc.common_prefixes.len(), 1); + assert_eq!(acc.keys.len(), 1); + assert!(acc.common_prefixes.contains("a/")); + + Ok(()) + } + + #[tokio::test] + async fn test_fetch_uploads_advanced() -> Result<(), Error> { + let mut query = query(); + query.common.page_size = 2; + + let mut fake_io = |_, k: Option, _| async move { + Ok(match k.as_deref() { + Some("") => vec![ + Object::new(bucket(), "b/a".to_string(), vec![objup_version([0x01; 32])]), + Object::new(bucket(), "b/b".to_string(), vec![objup_version([0x01; 32])]), + Object::new(bucket(), "b/c".to_string(), vec![objup_version([0x01; 32])]), + ], + Some("b0") => vec![ + Object::new(bucket(), "c/a".to_string(), vec![objup_version([0x01; 32])]), + Object::new(bucket(), "c/b".to_string(), vec![objup_version([0x01; 32])]), + Object::new(bucket(), "c/c".to_string(), vec![objup_version([0x02; 32])]), + ], + Some("c0") => vec![Object::new( + bucket(), + "d".to_string(), + vec![objup_version([0x01; 32])], + )], + _ => panic!("wrong value {:?}", k), + }) + }; + + let mut acc = query.build_accumulator(); + let page = + fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?; + assert_eq!( + page, + Some(RangeBegin::IncludingKey { + key: "c0".to_string(), + fallback_key: Some("c/c".to_string()) + }) + ); + assert_eq!(acc.common_prefixes.len(), 2); + assert_eq!(acc.keys.len(), 0); + assert!(acc.common_prefixes.contains("b/")); + assert!(acc.common_prefixes.contains("c/")); + + Ok(()) + } + + fn version() -> Version { + let uuid = Uuid::from([0x08; 32]); + + let blocks = vec![ + ( + VersionBlockKey { + part_number: 1, + offset: 1, + }, + VersionBlock { + hash: uuid, + size: 3, + }, + ), + ( + VersionBlockKey { + part_number: 1, + offset: 2, + }, + VersionBlock { + hash: uuid, + size: 2, + }, + ), + ( + VersionBlockKey { + part_number: 2, + offset: 1, + }, + VersionBlock { + hash: uuid, + size: 8, + }, + ), + ( + VersionBlockKey { + part_number: 5, + offset: 1, + }, + VersionBlock { + hash: uuid, + size: 7, + }, + ), + ( + VersionBlockKey { + part_number: 8, + offset: 1, + }, + VersionBlock { + hash: uuid, + size: 5, + }, + ), + ]; + let etags = vec![ + (1, "etag1".to_string()), + (3, "etag2".to_string()), + (5, "etag3".to_string()), + (8, "etag4".to_string()), + (9, "etag5".to_string()), + ]; + + Version { + bucket_id: uuid, + key: "a".to_string(), + uuid, + deleted: false.into(), + blocks: crdt::Map::::from_iter(blocks), + parts_etags: crdt::Map::::from_iter(etags), + } + } + + fn obj() -> Object { + Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])]) + } + + #[test] + fn test_fetch_part_info() -> Result<(), Error> { + let uuid = Uuid::from([0x08; 32]); + let mut query = ListPartsQuery { + bucket_name: "a".to_string(), + bucket_id: uuid, + key: "a".to_string(), + upload_id: "xx".to_string(), + part_number_marker: None, + max_parts: 2, + }; + + assert!( + fetch_part_info(&query, None, None, uuid).is_err(), + "No object and version should fail" + ); + assert!( + fetch_part_info(&query, Some(obj()), None, uuid).is_err(), + "No version should faild" + ); + assert!( + fetch_part_info(&query, None, Some(version()), uuid).is_err(), + "No object should fail" + ); + + // Start from the beginning but with limited size to trigger pagination + let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + assert_eq!(pagination.unwrap(), 5); + assert_eq!( + info, + vec![ + PartInfo { + etag: "etag1".to_string(), + timestamp: TS, + part_number: 1, + size: 5 + }, + PartInfo { + etag: "etag3".to_string(), + timestamp: TS, + part_number: 5, + size: 7 + }, + ] + ); + + // Use previous pagination to make a new request + query.part_number_marker = Some(pagination.unwrap()); + let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + assert!(pagination.is_none()); + assert_eq!( + info, + vec![PartInfo { + etag: "etag4".to_string(), + timestamp: TS, + part_number: 8, + size: 5 + },] + ); + + // Trying to access a part that is way larger than registered ones + query.part_number_marker = Some(9999); + let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + assert!(pagination.is_none()); + assert_eq!(info, vec![]); + + // Try without any limitation + query.max_parts = 1000; + query.part_number_marker = None; + let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; + assert!(pagination.is_none()); + assert_eq!( + info, + vec![ + PartInfo { + etag: "etag1".to_string(), + timestamp: TS, + part_number: 1, + size: 5 + }, + PartInfo { + etag: "etag3".to_string(), + timestamp: TS, + part_number: 5, + size: 7 + }, + PartInfo { + etag: "etag4".to_string(), + timestamp: TS, + part_number: 8, + size: 5 + }, + ] + ); + + Ok(()) + } +} diff --git a/src/api/s3/mod.rs b/src/api/s3/mod.rs new file mode 100644 index 00000000..3f5c1915 --- /dev/null +++ b/src/api/s3/mod.rs @@ -0,0 +1,14 @@ +pub mod api_server; + +mod bucket; +mod copy; +pub mod cors; +mod delete; +pub mod get; +mod list; +mod post_object; +mod put; +mod website; + +mod router; +pub mod xml; diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs new file mode 100644 index 00000000..86fa7880 --- /dev/null +++ b/src/api/s3/post_object.rs @@ -0,0 +1,507 @@ +use std::collections::HashMap; +use std::convert::TryInto; +use std::ops::RangeInclusive; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use bytes::Bytes; +use chrono::{DateTime, Duration, Utc}; +use futures::{Stream, StreamExt}; +use hyper::header::{self, HeaderMap, HeaderName, HeaderValue}; +use hyper::{Body, Request, Response, StatusCode}; +use multer::{Constraints, Multipart, SizeLimit}; +use serde::Deserialize; + +use garage_model::garage::Garage; + +use crate::error::*; +use crate::helpers::resolve_bucket; +use crate::s3::put::{get_headers, save_stream}; +use crate::s3::xml as s3_xml; +use crate::signature::payload::{parse_date, verify_v4}; + +pub async fn handle_post_object( + garage: Arc, + req: Request, + bucket: String, +) -> Result, Error> { + let boundary = req + .headers() + .get(header::CONTENT_TYPE) + .and_then(|ct| ct.to_str().ok()) + .and_then(|ct| multer::parse_boundary(ct).ok()) + .ok_or_bad_request("Counld not get multipart boundary")?; + + // 16k seems plenty for a header. 5G is the max size of a single part, so it seems reasonable + // for a PostObject + let constraints = Constraints::new().size_limit( + SizeLimit::new() + .per_field(16 * 1024) + .for_field("file", 5 * 1024 * 1024 * 1024), + ); + + let (head, body) = req.into_parts(); + let mut multipart = Multipart::with_constraints(body, boundary, constraints); + + let mut params = HeaderMap::new(); + let field = loop { + let field = if let Some(field) = multipart.next_field().await? { + field + } else { + return Err(Error::BadRequest( + "Request did not contain a file".to_owned(), + )); + }; + let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) { + name + } else { + continue; + }; + if name == "file" { + break field; + } + + if let Ok(content) = HeaderValue::from_str(&field.text().await?) { + match name.as_str() { + "tag" => (/* tag need to be reencoded, but we don't support them yet anyway */), + "acl" => { + if params.insert("x-amz-acl", content).is_some() { + return Err(Error::BadRequest( + "Field 'acl' provided more than one time".to_string(), + )); + } + } + _ => { + if params.insert(&name, content).is_some() { + return Err(Error::BadRequest(format!( + "Field '{}' provided more than one time", + name + ))); + } + } + } + } + }; + + // Current part is file. Do some checks before handling to PutObject code + let key = params + .get("key") + .ok_or_bad_request("No key was provided")? + .to_str()?; + let credential = params + .get("x-amz-credential") + .ok_or_else(|| { + Error::Forbidden("Garage does not support anonymous access yet".to_string()) + })? + .to_str()?; + let policy = params + .get("policy") + .ok_or_bad_request("No policy was provided")? + .to_str()?; + let signature = params + .get("x-amz-signature") + .ok_or_bad_request("No signature was provided")? + .to_str()?; + let date = params + .get("x-amz-date") + .ok_or_bad_request("No date was provided")? + .to_str()?; + + let key = if key.contains("${filename}") { + // if no filename is provided, don't replace. This matches the behavior of AWS. + if let Some(filename) = field.file_name() { + key.replace("${filename}", filename) + } else { + key.to_owned() + } + } else { + key.to_owned() + }; + + let date = parse_date(date)?; + let api_key = verify_v4( + &garage, + "s3", + credential, + &date, + signature, + policy.as_bytes(), + ) + .await?; + + let bucket_id = resolve_bucket(&garage, &bucket, &api_key).await?; + + if !api_key.allow_write(&bucket_id) { + return Err(Error::Forbidden( + "Operation is not allowed for this key.".to_string(), + )); + } + + let decoded_policy = base64::decode(&policy)?; + let decoded_policy: Policy = + serde_json::from_slice(&decoded_policy).ok_or_bad_request("Invalid policy")?; + + let expiration: DateTime = DateTime::parse_from_rfc3339(&decoded_policy.expiration) + .ok_or_bad_request("Invalid expiration date")? + .into(); + if Utc::now() - expiration > Duration::zero() { + return Err(Error::BadRequest( + "Expiration date is in the paste".to_string(), + )); + } + + let mut conditions = decoded_policy.into_conditions()?; + + for (param_key, value) in params.iter() { + let mut param_key = param_key.to_string(); + param_key.make_ascii_lowercase(); + match param_key.as_str() { + "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields + "content-type" => { + let conds = conditions.params.remove("content-type").ok_or_else(|| { + Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) + })?; + for cond in conds { + let ok = match cond { + Operation::Equal(s) => s.as_str() == value, + Operation::StartsWith(s) => { + value.to_str()?.split(',').all(|v| v.starts_with(&s)) + } + }; + if !ok { + return Err(Error::BadRequest(format!( + "Key '{}' has value not allowed in policy", + param_key + ))); + } + } + } + "key" => { + let conds = conditions.params.remove("key").ok_or_else(|| { + Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) + })?; + for cond in conds { + let ok = match cond { + Operation::Equal(s) => s == key, + Operation::StartsWith(s) => key.starts_with(&s), + }; + if !ok { + return Err(Error::BadRequest(format!( + "Key '{}' has value not allowed in policy", + param_key + ))); + } + } + } + _ => { + if param_key.starts_with("x-ignore-") { + // if a x-ignore is provided in policy, it's not removed here, so it will be + // rejected as provided in policy but not in the request. As odd as it is, it's + // how aws seems to behave. + continue; + } + let conds = conditions.params.remove(¶m_key).ok_or_else(|| { + Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) + })?; + for cond in conds { + let ok = match cond { + Operation::Equal(s) => s.as_str() == value, + Operation::StartsWith(s) => value.to_str()?.starts_with(s.as_str()), + }; + if !ok { + return Err(Error::BadRequest(format!( + "Key '{}' has value not allowed in policy", + param_key + ))); + } + } + } + } + } + + if let Some((param_key, _)) = conditions.params.iter().next() { + return Err(Error::BadRequest(format!( + "Key '{}' is required in policy, but no value was provided", + param_key + ))); + } + + let headers = get_headers(¶ms)?; + + let stream = field.map(|r| r.map_err(Into::into)); + let (_, md5) = save_stream( + garage, + headers, + StreamLimiter::new(stream, conditions.content_length), + bucket_id, + &key, + None, + None, + ) + .await?; + + let etag = format!("\"{}\"", md5); + + let resp = if let Some(mut target) = params + .get("success_action_redirect") + .and_then(|h| h.to_str().ok()) + .and_then(|u| url::Url::parse(u).ok()) + .filter(|u| u.scheme() == "https" || u.scheme() == "http") + { + target + .query_pairs_mut() + .append_pair("bucket", &bucket) + .append_pair("key", &key) + .append_pair("etag", &etag); + let target = target.to_string(); + Response::builder() + .status(StatusCode::SEE_OTHER) + .header(header::LOCATION, target.clone()) + .header(header::ETAG, etag) + .body(target.into())? + } else { + let path = head + .uri + .into_parts() + .path_and_query + .map(|paq| paq.path().to_string()) + .unwrap_or_else(|| "/".to_string()); + let authority = head + .headers + .get(header::HOST) + .and_then(|h| h.to_str().ok()) + .unwrap_or_default(); + let proto = if !authority.is_empty() { + "https://" + } else { + "" + }; + + let url_key: String = form_urlencoded::byte_serialize(key.as_bytes()) + .flat_map(str::chars) + .collect(); + let location = format!("{}{}{}{}", proto, authority, path, url_key); + + let action = params + .get("success_action_status") + .and_then(|h| h.to_str().ok()) + .unwrap_or("204"); + let builder = Response::builder() + .header(header::LOCATION, location.clone()) + .header(header::ETAG, etag.clone()); + match action { + "200" => builder.status(StatusCode::OK).body(Body::empty())?, + "201" => { + let xml = s3_xml::PostObject { + xmlns: (), + location: s3_xml::Value(location), + bucket: s3_xml::Value(bucket), + key: s3_xml::Value(key), + etag: s3_xml::Value(etag), + }; + let body = s3_xml::to_xml_with_header(&xml)?; + builder + .status(StatusCode::CREATED) + .body(Body::from(body.into_bytes()))? + } + _ => builder.status(StatusCode::NO_CONTENT).body(Body::empty())?, + } + }; + + Ok(resp) +} + +#[derive(Deserialize)] +struct Policy { + expiration: String, + conditions: Vec, +} + +impl Policy { + fn into_conditions(self) -> Result { + let mut params = HashMap::<_, Vec<_>>::new(); + + let mut length = (0, u64::MAX); + for condition in self.conditions { + match condition { + PolicyCondition::Equal(map) => { + if map.len() != 1 { + return Err(Error::BadRequest("Invalid policy item".to_owned())); + } + let (mut k, v) = map.into_iter().next().expect("size was verified"); + k.make_ascii_lowercase(); + params.entry(k).or_default().push(Operation::Equal(v)); + } + PolicyCondition::OtherOp([cond, mut key, value]) => { + if key.remove(0) != '$' { + return Err(Error::BadRequest("Invalid policy item".to_owned())); + } + key.make_ascii_lowercase(); + match cond.as_str() { + "eq" => { + params.entry(key).or_default().push(Operation::Equal(value)); + } + "starts-with" => { + params + .entry(key) + .or_default() + .push(Operation::StartsWith(value)); + } + _ => return Err(Error::BadRequest("Invalid policy item".to_owned())), + } + } + PolicyCondition::SizeRange(key, min, max) => { + if key == "content-length-range" { + length.0 = length.0.max(min); + length.1 = length.1.min(max); + } else { + return Err(Error::BadRequest("Invalid policy item".to_owned())); + } + } + } + } + Ok(Conditions { + params, + content_length: RangeInclusive::new(length.0, length.1), + }) + } +} + +/// A single condition from a policy +#[derive(Debug, Deserialize)] +#[serde(untagged)] +enum PolicyCondition { + // will contain a single key-value pair + Equal(HashMap), + OtherOp([String; 3]), + SizeRange(String, u64, u64), +} + +#[derive(Debug)] +struct Conditions { + params: HashMap>, + content_length: RangeInclusive, +} + +#[derive(Debug, PartialEq, Eq)] +enum Operation { + Equal(String), + StartsWith(String), +} + +struct StreamLimiter { + inner: T, + length: RangeInclusive, + read: u64, +} + +impl StreamLimiter { + fn new(stream: T, length: RangeInclusive) -> Self { + StreamLimiter { + inner: stream, + length, + read: 0, + } + } +} + +impl Stream for StreamLimiter +where + T: Stream> + Unpin, +{ + type Item = Result; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + ctx: &mut Context<'_>, + ) -> Poll> { + let res = std::pin::Pin::new(&mut self.inner).poll_next(ctx); + match &res { + Poll::Ready(Some(Ok(bytes))) => { + self.read += bytes.len() as u64; + // optimization to fail early when we know before the end it's too long + if self.length.end() < &self.read { + return Poll::Ready(Some(Err(Error::BadRequest( + "File size does not match policy".to_owned(), + )))); + } + } + Poll::Ready(None) => { + if !self.length.contains(&self.read) { + return Poll::Ready(Some(Err(Error::BadRequest( + "File size does not match policy".to_owned(), + )))); + } + } + _ => {} + } + res + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_policy_1() { + let policy_json = br#" +{ "expiration": "2007-12-01T12:00:00.000Z", + "conditions": [ + {"acl": "public-read" }, + {"bucket": "johnsmith" }, + ["starts-with", "$key", "user/eric/"] + ] +} + "#; + let policy_2: Policy = serde_json::from_slice(&policy_json[..]).unwrap(); + let mut conditions = policy_2.into_conditions().unwrap(); + + assert_eq!( + conditions.params.remove(&"acl".to_string()), + Some(vec![Operation::Equal("public-read".into())]) + ); + assert_eq!( + conditions.params.remove(&"bucket".to_string()), + Some(vec![Operation::Equal("johnsmith".into())]) + ); + assert_eq!( + conditions.params.remove(&"key".to_string()), + Some(vec![Operation::StartsWith("user/eric/".into())]) + ); + assert!(conditions.params.is_empty()); + assert_eq!(conditions.content_length, 0..=u64::MAX); + } + + #[test] + fn test_policy_2() { + let policy_json = br#" +{ "expiration": "2007-12-01T12:00:00.000Z", + "conditions": [ + [ "eq", "$acl", "public-read" ], + ["starts-with", "$Content-Type", "image/"], + ["starts-with", "$success_action_redirect", ""], + ["content-length-range", 1048576, 10485760] + ] +} + "#; + let policy_2: Policy = serde_json::from_slice(&policy_json[..]).unwrap(); + let mut conditions = policy_2.into_conditions().unwrap(); + + assert_eq!( + conditions.params.remove(&"acl".to_string()), + Some(vec![Operation::Equal("public-read".into())]) + ); + assert_eq!( + conditions.params.remove("content-type").unwrap(), + vec![Operation::StartsWith("image/".into())] + ); + assert_eq!( + conditions + .params + .remove(&"success_action_redirect".to_string()), + Some(vec![Operation::StartsWith("".into())]) + ); + assert!(conditions.params.is_empty()); + assert_eq!(conditions.content_length, 1048576..=10485760); + } +} diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs new file mode 100644 index 00000000..89aa8d84 --- /dev/null +++ b/src/api/s3/put.rs @@ -0,0 +1,753 @@ +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::sync::Arc; + +use futures::prelude::*; +use hyper::body::{Body, Bytes}; +use hyper::header::{HeaderMap, HeaderValue}; +use hyper::{Request, Response}; +use md5::{digest::generic_array::*, Digest as Md5Digest, Md5}; +use sha2::Sha256; + +use garage_table::*; +use garage_util::data::*; +use garage_util::error::Error as GarageError; +use garage_util::time::*; + +use garage_block::manager::INLINE_THRESHOLD; +use garage_model::garage::Garage; +use garage_model::s3::block_ref_table::*; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::error::*; +use crate::s3::xml as s3_xml; +use crate::signature::verify_signed_content; + +pub async fn handle_put( + garage: Arc, + req: Request, + bucket_id: Uuid, + key: &str, + content_sha256: Option, +) -> Result, Error> { + // Retrieve interesting headers from request + let headers = get_headers(req.headers())?; + debug!("Object headers: {:?}", headers); + + let content_md5 = match req.headers().get("content-md5") { + Some(x) => Some(x.to_str()?.to_string()), + None => None, + }; + + let (_head, body) = req.into_parts(); + let body = body.map_err(Error::from); + + save_stream( + garage, + headers, + body, + bucket_id, + key, + content_md5, + content_sha256, + ) + .await + .map(|(uuid, md5)| put_response(uuid, md5)) +} + +pub(crate) async fn save_stream> + Unpin>( + garage: Arc, + headers: ObjectVersionHeaders, + body: S, + bucket_id: Uuid, + key: &str, + content_md5: Option, + content_sha256: Option, +) -> Result<(Uuid, String), Error> { + // Generate identity of new version + let version_uuid = gen_uuid(); + let version_timestamp = now_msec(); + + let mut chunker = StreamChunker::new(body, garage.config.block_size); + let first_block = chunker.next().await?.unwrap_or_default(); + + // If body is small enough, store it directly in the object table + // as "inline data". We can then return immediately. + if first_block.len() < INLINE_THRESHOLD { + let mut md5sum = Md5::new(); + md5sum.update(&first_block[..]); + let data_md5sum = md5sum.finalize(); + let data_md5sum_hex = hex::encode(data_md5sum); + + let data_sha256sum = sha256sum(&first_block[..]); + + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + )?; + + let object_version = ObjectVersion { + uuid: version_uuid, + timestamp: version_timestamp, + state: ObjectVersionState::Complete(ObjectVersionData::Inline( + ObjectVersionMeta { + headers, + size: first_block.len() as u64, + etag: data_md5sum_hex.clone(), + }, + first_block, + )), + }; + + let object = Object::new(bucket_id, key.into(), vec![object_version]); + garage.object_table.insert(&object).await?; + + return Ok((version_uuid, data_md5sum_hex)); + } + + // Write version identifier in object table so that we have a trace + // that we are uploading something + let mut object_version = ObjectVersion { + uuid: version_uuid, + timestamp: version_timestamp, + state: ObjectVersionState::Uploading(headers.clone()), + }; + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + garage.object_table.insert(&object).await?; + + // Initialize corresponding entry in version table + // Write this entry now, even with empty block list, + // to prevent block_ref entries from being deleted (they can be deleted + // if the reference a version that isn't found in the version table) + let version = Version::new(version_uuid, bucket_id, key.into(), false); + garage.version_table.insert(&version).await?; + + // Transfer data and verify checksum + let first_block_hash = blake2sum(&first_block[..]); + let tx_result = read_and_put_blocks( + &garage, + &version, + 1, + first_block, + first_block_hash, + &mut chunker, + ) + .await + .and_then(|(total_size, data_md5sum, data_sha256sum)| { + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + ) + .map(|()| (total_size, data_md5sum)) + }); + + // If something went wrong, clean up + let (total_size, md5sum_arr) = match tx_result { + Ok(rv) => rv, + Err(e) => { + // Mark object as aborted, this will free the blocks further down + object_version.state = ObjectVersionState::Aborted; + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + garage.object_table.insert(&object).await?; + return Err(e); + } + }; + + // Save final object state, marked as Complete + let md5sum_hex = hex::encode(md5sum_arr); + object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + ObjectVersionMeta { + headers, + size: total_size, + etag: md5sum_hex.clone(), + }, + first_block_hash, + )); + let object = Object::new(bucket_id, key.into(), vec![object_version]); + garage.object_table.insert(&object).await?; + + Ok((version_uuid, md5sum_hex)) +} + +/// Validate MD5 sum against content-md5 header +/// and sha256sum against signed content-sha256 +fn ensure_checksum_matches( + data_md5sum: &[u8], + data_sha256sum: garage_util::data::FixedBytes32, + content_md5: Option<&str>, + content_sha256: Option, +) -> Result<(), Error> { + if let Some(expected_sha256) = content_sha256 { + if expected_sha256 != data_sha256sum { + return Err(Error::BadRequest( + "Unable to validate x-amz-content-sha256".to_string(), + )); + } else { + trace!("Successfully validated x-amz-content-sha256"); + } + } + if let Some(expected_md5) = content_md5 { + if expected_md5.trim_matches('"') != base64::encode(data_md5sum) { + return Err(Error::BadRequest( + "Unable to validate content-md5".to_string(), + )); + } else { + trace!("Successfully validated content-md5"); + } + } + Ok(()) +} + +async fn read_and_put_blocks> + Unpin>( + garage: &Garage, + version: &Version, + part_number: u64, + first_block: Vec, + first_block_hash: Hash, + chunker: &mut StreamChunker, +) -> Result<(u64, GenericArray, Hash), Error> { + let mut md5hasher = Md5::new(); + let mut sha256hasher = Sha256::new(); + md5hasher.update(&first_block[..]); + sha256hasher.update(&first_block[..]); + + let mut next_offset = first_block.len(); + let mut put_curr_version_block = put_block_meta( + garage, + version, + part_number, + 0, + first_block_hash, + first_block.len() as u64, + ); + let mut put_curr_block = garage + .block_manager + .rpc_put_block(first_block_hash, first_block); + + loop { + let (_, _, next_block) = futures::try_join!( + put_curr_block.map_err(Error::from), + put_curr_version_block.map_err(Error::from), + chunker.next(), + )?; + if let Some(block) = next_block { + md5hasher.update(&block[..]); + sha256hasher.update(&block[..]); + let block_hash = blake2sum(&block[..]); + let block_len = block.len(); + put_curr_version_block = put_block_meta( + garage, + version, + part_number, + next_offset as u64, + block_hash, + block_len as u64, + ); + put_curr_block = garage.block_manager.rpc_put_block(block_hash, block); + next_offset += block_len; + } else { + break; + } + } + + let total_size = next_offset as u64; + let data_md5sum = md5hasher.finalize(); + + let data_sha256sum = sha256hasher.finalize(); + let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap(); + + Ok((total_size, data_md5sum, data_sha256sum)) +} + +async fn put_block_meta( + garage: &Garage, + version: &Version, + part_number: u64, + offset: u64, + hash: Hash, + size: u64, +) -> Result<(), GarageError> { + let mut version = version.clone(); + version.blocks.put( + VersionBlockKey { + part_number, + offset, + }, + VersionBlock { hash, size }, + ); + + let block_ref = BlockRef { + block: hash, + version: version.uuid, + deleted: false.into(), + }; + + futures::try_join!( + garage.version_table.insert(&version), + garage.block_ref_table.insert(&block_ref), + )?; + Ok(()) +} + +struct StreamChunker>> { + stream: S, + read_all: bool, + block_size: usize, + buf: VecDeque, +} + +impl> + Unpin> StreamChunker { + fn new(stream: S, block_size: usize) -> Self { + Self { + stream, + read_all: false, + block_size, + buf: VecDeque::with_capacity(2 * block_size), + } + } + + async fn next(&mut self) -> Result>, Error> { + while !self.read_all && self.buf.len() < self.block_size { + if let Some(block) = self.stream.next().await { + let bytes = block?; + trace!("Body next: {} bytes", bytes.len()); + self.buf.extend(bytes); + } else { + self.read_all = true; + } + } + + if self.buf.is_empty() { + Ok(None) + } else if self.buf.len() <= self.block_size { + let block = self.buf.drain(..).collect::>(); + Ok(Some(block)) + } else { + let block = self.buf.drain(..self.block_size).collect::>(); + Ok(Some(block)) + } + } +} + +pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response { + Response::builder() + .header("x-amz-version-id", hex::encode(version_uuid)) + .header("ETag", format!("\"{}\"", md5sum_hex)) + .body(Body::from(vec![])) + .unwrap() +} + +pub async fn handle_create_multipart_upload( + garage: Arc, + req: &Request, + bucket_name: &str, + bucket_id: Uuid, + key: &str, +) -> Result, Error> { + let version_uuid = gen_uuid(); + let headers = get_headers(req.headers())?; + + // Create object in object table + let object_version = ObjectVersion { + uuid: version_uuid, + timestamp: now_msec(), + state: ObjectVersionState::Uploading(headers), + }; + let object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&object).await?; + + // Insert empty version so that block_ref entries refer to something + // (they are inserted concurrently with blocks in the version table, so + // there is the possibility that they are inserted before the version table + // is created, in which case it is allowed to delete them, e.g. in repair_*) + let version = Version::new(version_uuid, bucket_id, key.into(), false); + garage.version_table.insert(&version).await?; + + // Send success response + let result = s3_xml::InitiateMultipartUploadResult { + xmlns: (), + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key.to_string()), + upload_id: s3_xml::Value(hex::encode(version_uuid)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_put_part( + garage: Arc, + req: Request, + bucket_id: Uuid, + key: &str, + part_number: u64, + upload_id: &str, + content_sha256: Option, +) -> Result, Error> { + let version_uuid = decode_upload_id(upload_id)?; + + let content_md5 = match req.headers().get("content-md5") { + Some(x) => Some(x.to_str()?.to_string()), + None => None, + }; + + // Read first chuck, and at the same time try to get object to see if it exists + let key = key.to_string(); + + let body = req.into_body().map_err(Error::from); + let mut chunker = StreamChunker::new(body, garage.config.block_size); + + let (object, version, first_block) = futures::try_join!( + garage + .object_table + .get(&bucket_id, &key) + .map_err(Error::from), + garage + .version_table + .get(&version_uuid, &EmptyKey) + .map_err(Error::from), + chunker.next(), + )?; + + // Check object is valid and multipart block can be accepted + let first_block = first_block.ok_or_bad_request("Empty body")?; + let object = object.ok_or_bad_request("Object not found")?; + + if !object + .versions() + .iter() + .any(|v| v.uuid == version_uuid && v.is_uploading()) + { + return Err(Error::NoSuchUpload); + } + + // Check part hasn't already been uploaded + if let Some(v) = version { + if v.has_part_number(part_number) { + return Err(Error::BadRequest(format!( + "Part number {} has already been uploaded", + part_number + ))); + } + } + + // Copy block to store + let version = Version::new(version_uuid, bucket_id, key, false); + let first_block_hash = blake2sum(&first_block[..]); + let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( + &garage, + &version, + part_number, + first_block, + first_block_hash, + &mut chunker, + ) + .await?; + + // Verify that checksums map + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + )?; + + // Store part etag in version + let data_md5sum_hex = hex::encode(data_md5sum); + let mut version = version; + version + .parts_etags + .put(part_number, data_md5sum_hex.clone()); + garage.version_table.insert(&version).await?; + + let response = Response::builder() + .header("ETag", format!("\"{}\"", data_md5sum_hex)) + .body(Body::empty()) + .unwrap(); + Ok(response) +} + +pub async fn handle_complete_multipart_upload( + garage: Arc, + req: Request, + bucket_name: &str, + bucket_id: Uuid, + key: &str, + upload_id: &str, + content_sha256: Option, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; + let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) + .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; + debug!( + "CompleteMultipartUpload list of parts: {:?}", + body_list_of_parts + ); + + let version_uuid = decode_upload_id(upload_id)?; + + // Get object and version + let key = key.to_string(); + let (object, version) = futures::try_join!( + garage.object_table.get(&bucket_id, &key), + garage.version_table.get(&version_uuid, &EmptyKey), + )?; + + let object = object.ok_or(Error::NoSuchKey)?; + let mut object_version = object + .versions() + .iter() + .find(|v| v.uuid == version_uuid && v.is_uploading()) + .cloned() + .ok_or(Error::NoSuchUpload)?; + + let version = version.ok_or(Error::NoSuchKey)?; + if version.blocks.is_empty() { + return Err(Error::BadRequest("No data was uploaded".to_string())); + } + + let headers = match object_version.state { + ObjectVersionState::Uploading(headers) => headers, + _ => unreachable!(), + }; + + // Check that part numbers are an increasing sequence. + // (it doesn't need to start at 1 nor to be a continuous sequence, + // see discussion in #192) + if body_list_of_parts.is_empty() { + return Err(Error::EntityTooSmall); + } + if !body_list_of_parts + .iter() + .zip(body_list_of_parts.iter().skip(1)) + .all(|(p1, p2)| p1.part_number < p2.part_number) + { + return Err(Error::InvalidPartOrder); + } + + // Garage-specific restriction, see #204: part numbers must be + // consecutive starting at 1 + if body_list_of_parts[0].part_number != 1 + || !body_list_of_parts + .iter() + .zip(body_list_of_parts.iter().skip(1)) + .all(|(p1, p2)| p1.part_number + 1 == p2.part_number) + { + return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into())); + } + + // Check that the list of parts they gave us corresponds to the parts we have here + debug!("Expected parts from request: {:?}", body_list_of_parts); + debug!("Parts stored in version: {:?}", version.parts_etags.items()); + let parts = version + .parts_etags + .items() + .iter() + .map(|pair| (&pair.0, &pair.1)); + let same_parts = body_list_of_parts + .iter() + .map(|x| (&x.part_number, &x.etag)) + .eq(parts); + if !same_parts { + return Err(Error::InvalidPart); + } + + // Check that all blocks belong to one of the parts + let block_parts = version + .blocks + .items() + .iter() + .map(|(bk, _)| bk.part_number) + .collect::>(); + let same_parts = body_list_of_parts + .iter() + .map(|x| x.part_number) + .eq(block_parts.into_iter()); + if !same_parts { + return Err(Error::BadRequest( + "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again.".into(), + )); + } + + // Calculate etag of final object + // To understand how etags are calculated, read more here: + // https://teppen.io/2018/06/23/aws_s3_etags/ + let num_parts = body_list_of_parts.len(); + let mut etag_md5_hasher = Md5::new(); + for (_, etag) in version.parts_etags.items().iter() { + etag_md5_hasher.update(etag.as_bytes()); + } + let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts); + + // Calculate total size of final object + let total_size = version.blocks.items().iter().map(|x| x.1.size).sum(); + + // Write final object version + object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + ObjectVersionMeta { + headers, + size: total_size, + etag: etag.clone(), + }, + version.blocks.items()[0].1.hash, + )); + + let final_object = Object::new(bucket_id, key.clone(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + // Send response saying ok we're done + let result = s3_xml::CompleteMultipartUploadResult { + xmlns: (), + location: None, + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key), + etag: s3_xml::Value(format!("\"{}\"", etag)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_abort_multipart_upload( + garage: Arc, + bucket_id: Uuid, + key: &str, + upload_id: &str, +) -> Result, Error> { + let version_uuid = decode_upload_id(upload_id)?; + + let object = garage + .object_table + .get(&bucket_id, &key.to_string()) + .await?; + let object = object.ok_or(Error::NoSuchKey)?; + + let object_version = object + .versions() + .iter() + .find(|v| v.uuid == version_uuid && v.is_uploading()); + let mut object_version = match object_version { + None => return Err(Error::NoSuchUpload), + Some(x) => x.clone(), + }; + + object_version.state = ObjectVersionState::Aborted; + let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + Ok(Response::new(Body::from(vec![]))) +} + +fn get_mime_type(headers: &HeaderMap) -> Result { + Ok(headers + .get(hyper::header::CONTENT_TYPE) + .map(|x| x.to_str()) + .unwrap_or(Ok("blob"))? + .to_string()) +} + +pub(crate) fn get_headers(headers: &HeaderMap) -> Result { + let content_type = get_mime_type(headers)?; + let mut other = BTreeMap::new(); + + // Preserve standard headers + let standard_header = vec![ + hyper::header::CACHE_CONTROL, + hyper::header::CONTENT_DISPOSITION, + hyper::header::CONTENT_ENCODING, + hyper::header::CONTENT_LANGUAGE, + hyper::header::EXPIRES, + ]; + for h in standard_header.iter() { + if let Some(v) = headers.get(h) { + match v.to_str() { + Ok(v_str) => { + other.insert(h.to_string(), v_str.to_string()); + } + Err(e) => { + warn!("Discarding header {}, error in .to_str(): {}", h, e); + } + } + } + } + + // Preserve x-amz-meta- headers + for (k, v) in headers.iter() { + if k.as_str().starts_with("x-amz-meta-") { + match v.to_str() { + Ok(v_str) => { + other.insert(k.to_string(), v_str.to_string()); + } + Err(e) => { + warn!("Discarding header {}, error in .to_str(): {}", k, e); + } + } + } + } + + Ok(ObjectVersionHeaders { + content_type, + other, + }) +} + +pub fn decode_upload_id(id: &str) -> Result { + let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?; + if id_bin.len() != 32 { + return Err(Error::NoSuchUpload); + } + let mut uuid = [0u8; 32]; + uuid.copy_from_slice(&id_bin[..]); + Ok(Uuid::from(uuid)) +} + +#[derive(Debug)] +struct CompleteMultipartUploadPart { + etag: String, + part_number: u64, +} + +fn parse_complete_multipart_upload_body( + xml: &roxmltree::Document, +) -> Option> { + let mut parts = vec![]; + + let root = xml.root(); + let cmu = root.first_child()?; + if !cmu.has_tag_name("CompleteMultipartUpload") { + return None; + } + + for item in cmu.children() { + // Only parse nodes + if !item.is_element() { + continue; + } + + if item.has_tag_name("Part") { + let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?; + let part_number = item + .children() + .find(|e| e.has_tag_name("PartNumber"))? + .text()?; + parts.push(CompleteMultipartUploadPart { + etag: etag.trim_matches('"').to_string(), + part_number: part_number.parse().ok()?, + }); + } else { + return None; + } + } + + Some(parts) +} diff --git a/src/api/s3/router.rs b/src/api/s3/router.rs new file mode 100644 index 00000000..0525c649 --- /dev/null +++ b/src/api/s3/router.rs @@ -0,0 +1,1080 @@ +use crate::error::{Error, OkOrBadRequest}; + +use std::borrow::Cow; + +use hyper::header::HeaderValue; +use hyper::{HeaderMap, Method, Request}; + +use crate::helpers::Authorization; +use crate::router_macros::{generateQueryParameters, router_match}; + +router_match! {@func + +/// List of all S3 API endpoints. +/// +/// For each endpoint, it contains the parameters this endpoint receive by url (bucket, key and +/// query parameters). Parameters it may receive by header are left out, however headers are +/// considered when required to determine between one endpoint or another (for CopyObject and +/// UploadObject, for instance). +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Endpoint { + AbortMultipartUpload { + key: String, + upload_id: String, + }, + CompleteMultipartUpload { + key: String, + upload_id: String, + }, + CopyObject { + key: String, + }, + CreateBucket { + }, + CreateMultipartUpload { + key: String, + }, + DeleteBucket { + }, + DeleteBucketAnalyticsConfiguration { + id: String, + }, + DeleteBucketCors { + }, + DeleteBucketEncryption { + }, + DeleteBucketIntelligentTieringConfiguration { + id: String, + }, + DeleteBucketInventoryConfiguration { + id: String, + }, + DeleteBucketLifecycle { + }, + DeleteBucketMetricsConfiguration { + id: String, + }, + DeleteBucketOwnershipControls { + }, + DeleteBucketPolicy { + }, + DeleteBucketReplication { + }, + DeleteBucketTagging { + }, + DeleteBucketWebsite { + }, + DeleteObject { + key: String, + version_id: Option, + }, + DeleteObjects { + }, + DeleteObjectTagging { + key: String, + version_id: Option, + }, + DeletePublicAccessBlock { + }, + GetBucketAccelerateConfiguration { + }, + GetBucketAcl { + }, + GetBucketAnalyticsConfiguration { + id: String, + }, + GetBucketCors { + }, + GetBucketEncryption { + }, + GetBucketIntelligentTieringConfiguration { + id: String, + }, + GetBucketInventoryConfiguration { + id: String, + }, + GetBucketLifecycleConfiguration { + }, + GetBucketLocation { + }, + GetBucketLogging { + }, + GetBucketMetricsConfiguration { + id: String, + }, + GetBucketNotificationConfiguration { + }, + GetBucketOwnershipControls { + }, + GetBucketPolicy { + }, + GetBucketPolicyStatus { + }, + GetBucketReplication { + }, + GetBucketRequestPayment { + }, + GetBucketTagging { + }, + GetBucketVersioning { + }, + GetBucketWebsite { + }, + /// There are actually many more query parameters, used to add headers to the answer. They were + /// not added here as they are best handled in a dedicated route. + GetObject { + key: String, + part_number: Option, + version_id: Option, + }, + GetObjectAcl { + key: String, + version_id: Option, + }, + GetObjectLegalHold { + key: String, + version_id: Option, + }, + GetObjectLockConfiguration { + }, + GetObjectRetention { + key: String, + version_id: Option, + }, + GetObjectTagging { + key: String, + version_id: Option, + }, + GetObjectTorrent { + key: String, + }, + GetPublicAccessBlock { + }, + HeadBucket { + }, + HeadObject { + key: String, + part_number: Option, + version_id: Option, + }, + ListBucketAnalyticsConfigurations { + continuation_token: Option, + }, + ListBucketIntelligentTieringConfigurations { + continuation_token: Option, + }, + ListBucketInventoryConfigurations { + continuation_token: Option, + }, + ListBucketMetricsConfigurations { + continuation_token: Option, + }, + ListBuckets, + ListMultipartUploads { + delimiter: Option, + encoding_type: Option, + key_marker: Option, + max_uploads: Option, + prefix: Option, + upload_id_marker: Option, + }, + ListObjects { + delimiter: Option, + encoding_type: Option, + marker: Option, + max_keys: Option, + prefix: Option, + }, + ListObjectsV2 { + // This value should always be 2. It is not checked when constructing the struct + list_type: String, + continuation_token: Option, + delimiter: Option, + encoding_type: Option, + fetch_owner: Option, + max_keys: Option, + prefix: Option, + start_after: Option, + }, + ListObjectVersions { + delimiter: Option, + encoding_type: Option, + key_marker: Option, + max_keys: Option, + prefix: Option, + version_id_marker: Option, + }, + ListParts { + key: String, + max_parts: Option, + part_number_marker: Option, + upload_id: String, + }, + Options, + PutBucketAccelerateConfiguration { + }, + PutBucketAcl { + }, + PutBucketAnalyticsConfiguration { + id: String, + }, + PutBucketCors { + }, + PutBucketEncryption { + }, + PutBucketIntelligentTieringConfiguration { + id: String, + }, + PutBucketInventoryConfiguration { + id: String, + }, + PutBucketLifecycleConfiguration { + }, + PutBucketLogging { + }, + PutBucketMetricsConfiguration { + id: String, + }, + PutBucketNotificationConfiguration { + }, + PutBucketOwnershipControls { + }, + PutBucketPolicy { + }, + PutBucketReplication { + }, + PutBucketRequestPayment { + }, + PutBucketTagging { + }, + PutBucketVersioning { + }, + PutBucketWebsite { + }, + PutObject { + key: String, + }, + PutObjectAcl { + key: String, + version_id: Option, + }, + PutObjectLegalHold { + key: String, + version_id: Option, + }, + PutObjectLockConfiguration { + }, + PutObjectRetention { + key: String, + version_id: Option, + }, + PutObjectTagging { + key: String, + version_id: Option, + }, + PutPublicAccessBlock { + }, + RestoreObject { + key: String, + version_id: Option, + }, + SelectObjectContent { + key: String, + // This value should always be 2. It is not checked when constructing the struct + select_type: String, + }, + UploadPart { + key: String, + part_number: u64, + upload_id: String, + }, + UploadPartCopy { + key: String, + part_number: u64, + upload_id: String, + }, + // This endpoint is not documented with others because it has special use case : + // It's intended to be used with HTML forms, using a multipart/form-data body. + // It works a lot like presigned requests, but everything is in the form instead + // of being query parameters of the URL, so authenticating it is a bit different. + PostObject, +}} + +impl Endpoint { + /// Determine which S3 endpoint a request is for using the request, and a bucket which was + /// possibly extracted from the Host header. + /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets + pub fn from_request( + req: &Request, + bucket: Option, + ) -> Result<(Self, Option), Error> { + let uri = req.uri(); + let path = uri.path().trim_start_matches('/'); + let query = uri.query(); + if bucket.is_none() && path.is_empty() { + if *req.method() == Method::OPTIONS { + return Ok((Self::Options, None)); + } else { + return Ok((Self::ListBuckets, None)); + } + } + + let (bucket, key) = if let Some(bucket) = bucket { + (bucket, path) + } else { + path.split_once('/') + .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/'))) + .unwrap_or((path.to_owned(), "")) + }; + + if *req.method() == Method::OPTIONS { + return Ok((Self::Options, Some(bucket))); + } + + let key = percent_encoding::percent_decode_str(key) + .decode_utf8()? + .into_owned(); + + let mut query = QueryParameters::from_query(query.unwrap_or_default())?; + + let res = match *req.method() { + Method::GET => Self::from_get(key, &mut query)?, + Method::HEAD => Self::from_head(key, &mut query)?, + Method::POST => Self::from_post(key, &mut query)?, + Method::PUT => Self::from_put(key, &mut query, req.headers())?, + Method::DELETE => Self::from_delete(key, &mut query)?, + _ => return Err(Error::BadRequest("Unknown method".to_owned())), + }; + + if let Some(message) = query.nonempty_message() { + debug!("Unused query parameter: {}", message) + } + Ok((res, Some(bucket))) + } + + /// Determine which endpoint a request is for, knowing it is a GET. + fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + key: [ + EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker), + EMPTY => GetObject (query_opt::version_id, opt_parse::part_number), + ACL => GetObjectAcl (query_opt::version_id), + LEGAL_HOLD => GetObjectLegalHold (query_opt::version_id), + RETENTION => GetObjectRetention (query_opt::version_id), + TAGGING => GetObjectTagging (query_opt::version_id), + TORRENT => GetObjectTorrent, + ], + no_key: [ + EMPTY if list_type => ListObjectsV2 (query::list_type, query_opt::continuation_token, + opt_parse::delimiter, query_opt::encoding_type, + opt_parse::fetch_owner, opt_parse::max_keys, + query_opt::prefix, query_opt::start_after), + EMPTY => ListObjects (opt_parse::delimiter, query_opt::encoding_type, query_opt::marker, + opt_parse::max_keys, opt_parse::prefix), + ACCELERATE => GetBucketAccelerateConfiguration, + ACL => GetBucketAcl, + ANALYTICS if id => GetBucketAnalyticsConfiguration (query::id), + ANALYTICS => ListBucketAnalyticsConfigurations (query_opt::continuation_token), + CORS => GetBucketCors, + ENCRYPTION => GetBucketEncryption, + INTELLIGENT_TIERING if id => GetBucketIntelligentTieringConfiguration (query::id), + INTELLIGENT_TIERING => ListBucketIntelligentTieringConfigurations (query_opt::continuation_token), + INVENTORY if id => GetBucketInventoryConfiguration (query::id), + INVENTORY => ListBucketInventoryConfigurations (query_opt::continuation_token), + LIFECYCLE => GetBucketLifecycleConfiguration, + LOCATION => GetBucketLocation, + LOGGING => GetBucketLogging, + METRICS if id => GetBucketMetricsConfiguration (query::id), + METRICS => ListBucketMetricsConfigurations (query_opt::continuation_token), + NOTIFICATION => GetBucketNotificationConfiguration, + OBJECT_LOCK => GetObjectLockConfiguration, + OWNERSHIP_CONTROLS => GetBucketOwnershipControls, + POLICY => GetBucketPolicy, + POLICY_STATUS => GetBucketPolicyStatus, + PUBLIC_ACCESS_BLOCK => GetPublicAccessBlock, + REPLICATION => GetBucketReplication, + REQUEST_PAYMENT => GetBucketRequestPayment, + TAGGING => GetBucketTagging, + UPLOADS => ListMultipartUploads (opt_parse::delimiter, query_opt::encoding_type, + query_opt::key_marker, opt_parse::max_uploads, + query_opt::prefix, query_opt::upload_id_marker), + VERSIONING => GetBucketVersioning, + VERSIONS => ListObjectVersions (opt_parse::delimiter, query_opt::encoding_type, + query_opt::key_marker, opt_parse::max_keys, + query_opt::prefix, query_opt::version_id_marker), + WEBSITE => GetBucketWebsite, + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a HEAD. + fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + key: [ + EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), + ], + no_key: [ + EMPTY => HeadBucket, + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a POST. + fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + key: [ + EMPTY if upload_id => CompleteMultipartUpload (query::upload_id), + RESTORE => RestoreObject (query_opt::version_id), + SELECT => SelectObjectContent (query::select_type), + UPLOADS => CreateMultipartUpload, + ], + no_key: [ + EMPTY => PostObject, + DELETE => DeleteObjects, + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a PUT. + fn from_put( + key: String, + query: &mut QueryParameters<'_>, + headers: &HeaderMap, + ) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), key, query, headers), + key: [ + EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), + EMPTY header "x-amz-copy-source" => CopyObject, + EMPTY if part_number => UploadPart (parse::part_number, query::upload_id), + EMPTY => PutObject, + ACL => PutObjectAcl (query_opt::version_id), + LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id), + RETENTION => PutObjectRetention (query_opt::version_id), + TAGGING => PutObjectTagging (query_opt::version_id), + + ], + no_key: [ + EMPTY => CreateBucket, + ACCELERATE => PutBucketAccelerateConfiguration, + ACL => PutBucketAcl, + ANALYTICS => PutBucketAnalyticsConfiguration (query::id), + CORS => PutBucketCors, + ENCRYPTION => PutBucketEncryption, + INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id), + INVENTORY => PutBucketInventoryConfiguration(query::id), + LIFECYCLE => PutBucketLifecycleConfiguration, + LOGGING => PutBucketLogging, + METRICS => PutBucketMetricsConfiguration(query::id), + NOTIFICATION => PutBucketNotificationConfiguration, + OBJECT_LOCK => PutObjectLockConfiguration, + OWNERSHIP_CONTROLS => PutBucketOwnershipControls, + POLICY => PutBucketPolicy, + PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock, + REPLICATION => PutBucketReplication, + REQUEST_PAYMENT => PutBucketRequestPayment, + TAGGING => PutBucketTagging, + VERSIONING => PutBucketVersioning, + WEBSITE => PutBucketWebsite, + ] + } + } + + /// Determine which endpoint a request is for, knowing it is a DELETE. + fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result { + router_match! { + @gen_parser + (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + key: [ + EMPTY if upload_id => AbortMultipartUpload (query::upload_id), + EMPTY => DeleteObject (query_opt::version_id), + TAGGING => DeleteObjectTagging (query_opt::version_id), + ], + no_key: [ + EMPTY => DeleteBucket, + ANALYTICS => DeleteBucketAnalyticsConfiguration (query::id), + CORS => DeleteBucketCors, + ENCRYPTION => DeleteBucketEncryption, + INTELLIGENT_TIERING => DeleteBucketIntelligentTieringConfiguration (query::id), + INVENTORY => DeleteBucketInventoryConfiguration (query::id), + LIFECYCLE => DeleteBucketLifecycle, + METRICS => DeleteBucketMetricsConfiguration (query::id), + OWNERSHIP_CONTROLS => DeleteBucketOwnershipControls, + POLICY => DeleteBucketPolicy, + PUBLIC_ACCESS_BLOCK => DeletePublicAccessBlock, + REPLICATION => DeleteBucketReplication, + TAGGING => DeleteBucketTagging, + WEBSITE => DeleteBucketWebsite, + ] + } + } + + /// Get the key the request target. Returns None for requests which don't use a key. + #[allow(dead_code)] + pub fn get_key(&self) -> Option<&str> { + router_match! { + @extract + self, + key, + [ + AbortMultipartUpload, + CompleteMultipartUpload, + CopyObject, + CreateMultipartUpload, + DeleteObject, + DeleteObjectTagging, + GetObject, + GetObjectAcl, + GetObjectLegalHold, + GetObjectRetention, + GetObjectTagging, + GetObjectTorrent, + HeadObject, + ListParts, + PutObject, + PutObjectAcl, + PutObjectLegalHold, + PutObjectRetention, + PutObjectTagging, + RestoreObject, + SelectObjectContent, + UploadPart, + UploadPartCopy, + ] + } + } + + /// Get the kind of authorization which is required to perform the operation. + pub fn authorization_type(&self) -> Authorization { + if let Endpoint::ListBuckets = self { + return Authorization::None; + }; + let readonly = router_match! { + @match + self, + [ + GetBucketAccelerateConfiguration, + GetBucketAcl, + GetBucketAnalyticsConfiguration, + GetBucketEncryption, + GetBucketIntelligentTieringConfiguration, + GetBucketInventoryConfiguration, + GetBucketLifecycleConfiguration, + GetBucketLocation, + GetBucketLogging, + GetBucketMetricsConfiguration, + GetBucketNotificationConfiguration, + GetBucketOwnershipControls, + GetBucketPolicy, + GetBucketPolicyStatus, + GetBucketReplication, + GetBucketRequestPayment, + GetBucketTagging, + GetBucketVersioning, + GetObject, + GetObjectAcl, + GetObjectLegalHold, + GetObjectLockConfiguration, + GetObjectRetention, + GetObjectTagging, + GetObjectTorrent, + GetPublicAccessBlock, + HeadBucket, + HeadObject, + ListBucketAnalyticsConfigurations, + ListBucketIntelligentTieringConfigurations, + ListBucketInventoryConfigurations, + ListBucketMetricsConfigurations, + ListMultipartUploads, + ListObjects, + ListObjectsV2, + ListObjectVersions, + ListParts, + SelectObjectContent, + ] + }; + let owner = router_match! { + @match + self, + [ + DeleteBucket, + GetBucketWebsite, + PutBucketWebsite, + DeleteBucketWebsite, + GetBucketCors, + PutBucketCors, + DeleteBucketCors, + ] + }; + if readonly { + Authorization::Read + } else if owner { + Authorization::Owner + } else { + Authorization::Write + } + } +} + +// parameter name => struct field +generateQueryParameters! { + "continuation-token" => continuation_token, + "delimiter" => delimiter, + "encoding-type" => encoding_type, + "fetch-owner" => fetch_owner, + "id" => id, + "key-marker" => key_marker, + "list-type" => list_type, + "marker" => marker, + "max-keys" => max_keys, + "max-parts" => max_parts, + "max-uploads" => max_uploads, + "partNumber" => part_number, + "part-number-marker" => part_number_marker, + "prefix" => prefix, + "select-type" => select_type, + "start-after" => start_after, + "uploadId" => upload_id, + "upload-id-marker" => upload_id_marker, + "versionId" => version_id, + "version-id-marker" => version_id_marker +} + +mod keywords { + //! This module contain all query parameters with no associated value S3 uses to differentiate + //! endpoints. + pub const EMPTY: &str = ""; + + pub const ACCELERATE: &str = "accelerate"; + pub const ACL: &str = "acl"; + pub const ANALYTICS: &str = "analytics"; + pub const CORS: &str = "cors"; + pub const DELETE: &str = "delete"; + pub const ENCRYPTION: &str = "encryption"; + pub const INTELLIGENT_TIERING: &str = "intelligent-tiering"; + pub const INVENTORY: &str = "inventory"; + pub const LEGAL_HOLD: &str = "legal-hold"; + pub const LIFECYCLE: &str = "lifecycle"; + pub const LOCATION: &str = "location"; + pub const LOGGING: &str = "logging"; + pub const METRICS: &str = "metrics"; + pub const NOTIFICATION: &str = "notification"; + pub const OBJECT_LOCK: &str = "object-lock"; + pub const OWNERSHIP_CONTROLS: &str = "ownershipControls"; + pub const POLICY: &str = "policy"; + pub const POLICY_STATUS: &str = "policyStatus"; + pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock"; + pub const REPLICATION: &str = "replication"; + pub const REQUEST_PAYMENT: &str = "requestPayment"; + pub const RESTORE: &str = "restore"; + pub const RETENTION: &str = "retention"; + pub const SELECT: &str = "select"; + pub const TAGGING: &str = "tagging"; + pub const TORRENT: &str = "torrent"; + pub const UPLOADS: &str = "uploads"; + pub const VERSIONING: &str = "versioning"; + pub const VERSIONS: &str = "versions"; + pub const WEBSITE: &str = "website"; +} + +#[cfg(test)] +mod tests { + use super::*; + + fn parse( + method: &str, + uri: &str, + bucket: Option, + header: Option<(&str, &str)>, + ) -> (Endpoint, Option) { + let mut req = Request::builder().method(method).uri(uri); + if let Some((k, v)) = header { + req = req.header(k, v) + } + let req = req.body(()).unwrap(); + + Endpoint::from_request(&req, bucket).unwrap() + } + + macro_rules! test_cases { + ($($method:ident $uri:expr => $variant:ident )*) => {{ + $( + assert!( + matches!( + parse(test_cases!{@actual_method $method}, $uri, Some("my_bucket".to_owned()), None).0, + Endpoint::$variant { .. } + ) + ); + assert!( + matches!( + parse(test_cases!{@actual_method $method}, concat!("/my_bucket", $uri), None, None).0, + Endpoint::$variant { .. } + ) + ); + + test_cases!{@auth $method $uri} + )* + }}; + + (@actual_method HEAD) => {{ "HEAD" }}; + (@actual_method GET) => {{ "GET" }}; + (@actual_method OWNER_GET) => {{ "GET" }}; + (@actual_method PUT) => {{ "PUT" }}; + (@actual_method OWNER_PUT) => {{ "PUT" }}; + (@actual_method POST) => {{ "POST" }}; + (@actual_method DELETE) => {{ "DELETE" }}; + (@actual_method OWNER_DELETE) => {{ "DELETE" }}; + + (@auth HEAD $uri:expr) => {{ + assert_eq!(parse("HEAD", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Read) + }}; + (@auth GET $uri:expr) => {{ + assert_eq!(parse("GET", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Read) + }}; + (@auth OWNER_GET $uri:expr) => {{ + assert_eq!(parse("GET", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Owner) + }}; + (@auth PUT $uri:expr) => {{ + assert_eq!(parse("PUT", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Write) + }}; + (@auth OWNER_PUT $uri:expr) => {{ + assert_eq!(parse("PUT", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Owner) + }}; + (@auth POST $uri:expr) => {{ + assert_eq!(parse("POST", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Write) + }}; + (@auth DELETE $uri:expr) => {{ + assert_eq!(parse("DELETE", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Write) + }}; + (@auth OWNER_DELETE $uri:expr) => {{ + assert_eq!(parse("DELETE", concat!("/my_bucket", $uri), None, None).0.authorization_type(), + Authorization::Owner) + }}; + } + + #[test] + fn test_bucket_extraction() { + assert_eq!( + parse("GET", "/my/key", Some("my_bucket".to_owned()), None).1, + parse("GET", "/my_bucket/my/key", None, None).1 + ); + assert_eq!( + parse("GET", "/my_bucket/my/key", None, None).1.unwrap(), + "my_bucket" + ); + assert!(parse("GET", "/", None, None).1.is_none()); + } + + #[test] + fn test_key() { + assert_eq!( + parse("GET", "/my/key", Some("my_bucket".to_owned()), None) + .0 + .get_key(), + parse("GET", "/my_bucket/my/key", None, None).0.get_key() + ); + assert_eq!( + parse("GET", "/my_bucket/my/key", None, None) + .0 + .get_key() + .unwrap(), + "my/key" + ); + assert_eq!( + parse("GET", "/my_bucket/my/key?acl", None, None) + .0 + .get_key() + .unwrap(), + "my/key" + ); + assert!(parse("GET", "/my_bucket/?list-type=2", None, None) + .0 + .get_key() + .is_none()); + + assert_eq!( + parse("GET", "/my_bucket/%26%2B%3F%25%C3%A9/something", None, None) + .0 + .get_key() + .unwrap(), + "&+?%é/something" + ); + + /* + * this case is failing. We should verify how clients encode space in url + assert_eq!( + parse("GET", "/my_bucket/+", None, None).get_key().unwrap(), + " "); + */ + } + + #[test] + fn invalid_endpoint() { + let req = Request::builder() + .method("GET") + .uri("/bucket/key?website") + .body(()) + .unwrap(); + + assert!(Endpoint::from_request(&req, None).is_err()) + } + + #[test] + fn test_aws_doc_examples() { + test_cases!( + DELETE "/example-object?uploadId=VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZ" => AbortMultipartUpload + DELETE "/Key+?uploadId=UploadId" => AbortMultipartUpload + POST "/example-object?uploadId=AAAsb2FkIElEIGZvciBlbHZpbmcncyWeeS1tb3ZpZS5tMnRzIRRwbG9hZA" => CompleteMultipartUpload + POST "/Key+?uploadId=UploadId" => CompleteMultipartUpload + PUT "/" => CreateBucket + POST "/example-object?uploads" => CreateMultipartUpload + POST "/{Key+}?uploads" => CreateMultipartUpload + OWNER_DELETE "/" => DeleteBucket + DELETE "/?analytics&id=list1" => DeleteBucketAnalyticsConfiguration + DELETE "/?analytics&id=Id" => DeleteBucketAnalyticsConfiguration + OWNER_DELETE "/?cors" => DeleteBucketCors + DELETE "/?encryption" => DeleteBucketEncryption + DELETE "/?intelligent-tiering&id=Id" => DeleteBucketIntelligentTieringConfiguration + DELETE "/?inventory&id=list1" => DeleteBucketInventoryConfiguration + DELETE "/?inventory&id=Id" => DeleteBucketInventoryConfiguration + DELETE "/?lifecycle" => DeleteBucketLifecycle + DELETE "/?metrics&id=ExampleMetrics" => DeleteBucketMetricsConfiguration + DELETE "/?metrics&id=Id" => DeleteBucketMetricsConfiguration + DELETE "/?ownershipControls" => DeleteBucketOwnershipControls + DELETE "/?policy" => DeleteBucketPolicy + DELETE "/?replication" => DeleteBucketReplication + DELETE "/?tagging" => DeleteBucketTagging + OWNER_DELETE "/?website" => DeleteBucketWebsite + DELETE "/my-second-image.jpg" => DeleteObject + DELETE "/my-third-image.jpg?versionId=UIORUnfndfiufdisojhr398493jfdkjFJjkndnqUifhnw89493jJFJ" => DeleteObject + DELETE "/Key+?versionId=VersionId" => DeleteObject + POST "/?delete" => DeleteObjects + DELETE "/exampleobject?tagging" => DeleteObjectTagging + DELETE "/{Key+}?tagging&versionId=VersionId" => DeleteObjectTagging + DELETE "/?publicAccessBlock" => DeletePublicAccessBlock + GET "/?accelerate" => GetBucketAccelerateConfiguration + GET "/?acl" => GetBucketAcl + GET "/?analytics&id=Id" => GetBucketAnalyticsConfiguration + OWNER_GET "/?cors" => GetBucketCors + GET "/?encryption" => GetBucketEncryption + GET "/?intelligent-tiering&id=Id" => GetBucketIntelligentTieringConfiguration + GET "/?inventory&id=list1" => GetBucketInventoryConfiguration + GET "/?inventory&id=Id" => GetBucketInventoryConfiguration + GET "/?lifecycle" => GetBucketLifecycleConfiguration + GET "/?location" => GetBucketLocation + GET "/?logging" => GetBucketLogging + GET "/?metrics&id=Documents" => GetBucketMetricsConfiguration + GET "/?metrics&id=Id" => GetBucketMetricsConfiguration + GET "/?notification" => GetBucketNotificationConfiguration + GET "/?ownershipControls" => GetBucketOwnershipControls + GET "/?policy" => GetBucketPolicy + GET "/?policyStatus" => GetBucketPolicyStatus + GET "/?replication" => GetBucketReplication + GET "/?requestPayment" => GetBucketRequestPayment + GET "/?tagging" => GetBucketTagging + GET "/?versioning" => GetBucketVersioning + OWNER_GET "/?website" => GetBucketWebsite + GET "/my-image.jpg" => GetObject + GET "/myObject?versionId=3/L4kqtJlcpXroDTDmpUMLUo" => GetObject + GET "/Junk3.txt?response-cache-control=No-cache&response-content-disposition=attachment%3B%20filename%3Dtesting.txt&response-content-encoding=x-gzip&response-content-language=mi%2C%20en&response-expires=Thu%2C%2001%20Dec%201994%2016:00:00%20GMT" => GetObject + GET "/Key+?partNumber=1&response-cache-control=ResponseCacheControl&response-content-disposition=ResponseContentDisposition&response-content-encoding=ResponseContentEncoding&response-content-language=ResponseContentLanguage&response-content-type=ResponseContentType&response-expires=ResponseExpires&versionId=VersionId" => GetObject + GET "/my-image.jpg?acl" => GetObjectAcl + GET "/my-image.jpg?versionId=3/L4kqtJlcpXroDVBH40Nr8X8gdRQBpUMLUo&acl" => GetObjectAcl + GET "/{Key+}?acl&versionId=VersionId" => GetObjectAcl + GET "/{Key+}?legal-hold&versionId=VersionId" => GetObjectLegalHold + GET "/?object-lock" => GetObjectLockConfiguration + GET "/{Key+}?retention&versionId=VersionId" => GetObjectRetention + GET "/example-object?tagging" => GetObjectTagging + GET "/{Key+}?tagging&versionId=VersionId" => GetObjectTagging + GET "/quotes/Nelson?torrent" => GetObjectTorrent + GET "/{Key+}?torrent" => GetObjectTorrent + GET "/?publicAccessBlock" => GetPublicAccessBlock + HEAD "/" => HeadBucket + HEAD "/my-image.jpg" => HeadObject + HEAD "/my-image.jpg?versionId=3HL4kqCxf3vjVBH40Nrjfkd" => HeadObject + HEAD "/Key+?partNumber=3&versionId=VersionId" => HeadObject + GET "/?analytics" => ListBucketAnalyticsConfigurations + GET "/?analytics&continuation-token=ContinuationToken" => ListBucketAnalyticsConfigurations + GET "/?intelligent-tiering" => ListBucketIntelligentTieringConfigurations + GET "/?intelligent-tiering&continuation-token=ContinuationToken" => ListBucketIntelligentTieringConfigurations + GET "/?inventory" => ListBucketInventoryConfigurations + GET "/?inventory&continuation-token=ContinuationToken" => ListBucketInventoryConfigurations + GET "/?metrics" => ListBucketMetricsConfigurations + GET "/?metrics&continuation-token=ContinuationToken" => ListBucketMetricsConfigurations + GET "/?uploads&max-uploads=3" => ListMultipartUploads + GET "/?uploads&delimiter=/" => ListMultipartUploads + GET "/?uploads&delimiter=/&prefix=photos/2006/" => ListMultipartUploads + GET "/?uploads&delimiter=D&encoding-type=EncodingType&key-marker=KeyMarker&max-uploads=1&prefix=Prefix&upload-id-marker=UploadIdMarker" => ListMultipartUploads + GET "/" => ListObjects + GET "/?prefix=N&marker=Ned&max-keys=40" => ListObjects + GET "/?delimiter=/" => ListObjects + GET "/?prefix=photos/2006/&delimiter=/" => ListObjects + + GET "/?delimiter=D&encoding-type=EncodingType&marker=Marker&max-keys=1&prefix=Prefix" => ListObjects + GET "/?list-type=2" => ListObjectsV2 + GET "/?list-type=2&max-keys=3&prefix=E&start-after=ExampleGuide.pdf" => ListObjectsV2 + GET "/?list-type=2&delimiter=/" => ListObjectsV2 + GET "/?list-type=2&prefix=photos/2006/&delimiter=/" => ListObjectsV2 + GET "/?list-type=2" => ListObjectsV2 + GET "/?list-type=2&continuation-token=1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=" => ListObjectsV2 + GET "/?list-type=2&continuation-token=ContinuationToken&delimiter=D&encoding-type=EncodingType&fetch-owner=true&max-keys=1&prefix=Prefix&start-after=StartAfter" => ListObjectsV2 + GET "/?versions" => ListObjectVersions + GET "/?versions&key-marker=key2" => ListObjectVersions + GET "/?versions&key-marker=key3&version-id-marker=t46ZenlYTZBnj" => ListObjectVersions + GET "/?versions&key-marker=key3&version-id-marker=t46Z0menlYTZBnj&max-keys=3" => ListObjectVersions + GET "/?versions&delimiter=/" => ListObjectVersions + GET "/?versions&prefix=photos/2006/&delimiter=/" => ListObjectVersions + GET "/?versions&delimiter=D&encoding-type=EncodingType&key-marker=KeyMarker&max-keys=2&prefix=Prefix&version-id-marker=VersionIdMarker" => ListObjectVersions + GET "/example-object?uploadId=XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA&max-parts=2&part-number-marker=1" => ListParts + GET "/Key+?max-parts=2&part-number-marker=2&uploadId=UploadId" => ListParts + PUT "/?accelerate" => PutBucketAccelerateConfiguration + PUT "/?acl" => PutBucketAcl + PUT "/?analytics&id=report1" => PutBucketAnalyticsConfiguration + PUT "/?analytics&id=Id" => PutBucketAnalyticsConfiguration + OWNER_PUT "/?cors" => PutBucketCors + PUT "/?encryption" => PutBucketEncryption + PUT "/?intelligent-tiering&id=Id" => PutBucketIntelligentTieringConfiguration + PUT "/?inventory&id=report1" => PutBucketInventoryConfiguration + PUT "/?inventory&id=Id" => PutBucketInventoryConfiguration + PUT "/?lifecycle" => PutBucketLifecycleConfiguration + PUT "/?logging" => PutBucketLogging + PUT "/?metrics&id=EntireBucket" => PutBucketMetricsConfiguration + PUT "/?metrics&id=Id" => PutBucketMetricsConfiguration + PUT "/?notification" => PutBucketNotificationConfiguration + PUT "/?ownershipControls" => PutBucketOwnershipControls + PUT "/?policy" => PutBucketPolicy + PUT "/?replication" => PutBucketReplication + PUT "/?requestPayment" => PutBucketRequestPayment + PUT "/?tagging" => PutBucketTagging + PUT "/?versioning" => PutBucketVersioning + OWNER_PUT "/?website" => PutBucketWebsite + PUT "/my-image.jpg" => PutObject + PUT "/Key+" => PutObject + PUT "/my-image.jpg?acl" => PutObjectAcl + PUT "/my-image.jpg?acl&versionId=3HL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nrjfkd" => PutObjectAcl + PUT "/{Key+}?acl&versionId=VersionId" => PutObjectAcl + PUT "/{Key+}?legal-hold&versionId=VersionId" => PutObjectLegalHold + PUT "/?object-lock" => PutObjectLockConfiguration + PUT "/{Key+}?retention&versionId=VersionId" => PutObjectRetention + PUT "/object-key?tagging" => PutObjectTagging + PUT "/{Key+}?tagging&versionId=VersionId" => PutObjectTagging + PUT "/?publicAccessBlock" => PutPublicAccessBlock + POST "/object-one.csv?restore" => RestoreObject + POST "/{Key+}?restore&versionId=VersionId" => RestoreObject + PUT "/my-movie.m2ts?partNumber=1&uploadId=VCVsb2FkIElEIGZvciBlbZZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZR" => UploadPart + PUT "/Key+?partNumber=2&uploadId=UploadId" => UploadPart + POST "/" => PostObject + ); + // no bucket, won't work with the rest of the test suite + assert!(matches!( + parse("GET", "/", None, None).0, + Endpoint::ListBuckets { .. } + )); + assert!(matches!( + parse("GET", "/", None, None).0.authorization_type(), + Authorization::None + )); + + // require a header + assert!(matches!( + parse( + "PUT", + "/Key+", + Some("my_bucket".to_owned()), + Some(("x-amz-copy-source", "some/key")) + ) + .0, + Endpoint::CopyObject { .. } + )); + assert!(matches!( + parse( + "PUT", + "/my_bucket/Key+", + None, + Some(("x-amz-copy-source", "some/key")) + ) + .0, + Endpoint::CopyObject { .. } + )); + assert!(matches!( + parse( + "PUT", + "/my_bucket/Key+", + None, + Some(("x-amz-copy-source", "some/key")) + ) + .0 + .authorization_type(), + Authorization::Write + )); + + // require a header + assert!(matches!( + parse( + "PUT", + "/Key+?partNumber=2&uploadId=UploadId", + Some("my_bucket".to_owned()), + Some(("x-amz-copy-source", "some/key")) + ) + .0, + Endpoint::UploadPartCopy { .. } + )); + assert!(matches!( + parse( + "PUT", + "/my_bucket/Key+?partNumber=2&uploadId=UploadId", + None, + Some(("x-amz-copy-source", "some/key")) + ) + .0, + Endpoint::UploadPartCopy { .. } + )); + assert!(matches!( + parse( + "PUT", + "/my_bucket/Key+?partNumber=2&uploadId=UploadId", + None, + Some(("x-amz-copy-source", "some/key")) + ) + .0 + .authorization_type(), + Authorization::Write + )); + + // POST request, but with GET semantic for permissions purpose + assert!(matches!( + parse( + "POST", + "/{Key+}?select&select-type=2", + Some("my_bucket".to_owned()), + None + ) + .0, + Endpoint::SelectObjectContent { .. } + )); + assert!(matches!( + parse("POST", "/my_bucket/{Key+}?select&select-type=2", None, None).0, + Endpoint::SelectObjectContent { .. } + )); + assert!(matches!( + parse("POST", "/my_bucket/{Key+}?select&select-type=2", None, None) + .0 + .authorization_type(), + Authorization::Read + )); + } +} diff --git a/src/api/s3/website.rs b/src/api/s3/website.rs new file mode 100644 index 00000000..561130dc --- /dev/null +++ b/src/api/s3/website.rs @@ -0,0 +1,369 @@ +use quick_xml::de::from_reader; +use std::sync::Arc; + +use hyper::{Body, Request, Response, StatusCode}; +use serde::{Deserialize, Serialize}; + +use crate::error::*; +use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; +use crate::signature::verify_signed_content; + +use garage_model::bucket_table::*; +use garage_model::garage::Garage; +use garage_table::*; +use garage_util::data::*; + +pub async fn handle_get_website(bucket: &Bucket) -> Result, Error> { + let param = bucket + .params() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + if let Some(website) = param.website_config.get() { + let wc = WebsiteConfiguration { + xmlns: (), + error_document: website.error_document.as_ref().map(|v| Key { + key: Value(v.to_string()), + }), + index_document: Some(Suffix { + suffix: Value(website.index_document.to_string()), + }), + redirect_all_requests_to: None, + routing_rules: None, + }; + let xml = to_xml_with_header(&wc)?; + Ok(Response::builder() + .status(StatusCode::OK) + .header(http::header::CONTENT_TYPE, "application/xml") + .body(Body::from(xml))?) + } else { + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) + } +} + +pub async fn handle_delete_website( + garage: Arc, + bucket_id: Uuid, +) -> Result, Error> { + let mut bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .ok_or(Error::NoSuchBucket)?; + + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + param.website_config.update(None); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::NO_CONTENT) + .body(Body::empty())?) +} + +pub async fn handle_put_website( + garage: Arc, + bucket_id: Uuid, + req: Request, + content_sha256: Option, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let mut bucket = garage + .bucket_table + .get(&EmptyKey, &bucket_id) + .await? + .ok_or(Error::NoSuchBucket)?; + + let param = bucket + .params_mut() + .ok_or_internal_error("Bucket should not be deleted at this point")?; + + let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; + conf.validate()?; + + param + .website_config + .update(Some(conf.into_garage_website_config()?)); + garage.bucket_table.insert(&bucket).await?; + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::empty())?) +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct WebsiteConfiguration { + #[serde(serialize_with = "xmlns_tag", skip_deserializing)] + pub xmlns: (), + #[serde(rename = "ErrorDocument")] + pub error_document: Option, + #[serde(rename = "IndexDocument")] + pub index_document: Option, + #[serde(rename = "RedirectAllRequestsTo")] + pub redirect_all_requests_to: Option, + #[serde(rename = "RoutingRules")] + pub routing_rules: Option>, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct RoutingRule { + #[serde(rename = "RoutingRule")] + pub inner: RoutingRuleInner, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct RoutingRuleInner { + #[serde(rename = "Condition")] + pub condition: Option, + #[serde(rename = "Redirect")] + pub redirect: Redirect, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Key { + #[serde(rename = "Key")] + pub key: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Suffix { + #[serde(rename = "Suffix")] + pub suffix: Value, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Target { + #[serde(rename = "HostName")] + pub hostname: Value, + #[serde(rename = "Protocol")] + pub protocol: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Condition { + #[serde(rename = "HttpErrorCodeReturnedEquals")] + pub http_error_code: Option, + #[serde(rename = "KeyPrefixEquals")] + pub prefix: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Redirect { + #[serde(rename = "HostName")] + pub hostname: Option, + #[serde(rename = "Protocol")] + pub protocol: Option, + #[serde(rename = "HttpRedirectCode")] + pub http_redirect_code: Option, + #[serde(rename = "ReplaceKeyPrefixWith")] + pub replace_prefix: Option, + #[serde(rename = "ReplaceKeyWith")] + pub replace_full: Option, +} + +impl WebsiteConfiguration { + pub fn validate(&self) -> Result<(), Error> { + if self.redirect_all_requests_to.is_some() + && (self.error_document.is_some() + || self.index_document.is_some() + || self.routing_rules.is_some()) + { + return Err(Error::BadRequest( + "Bad XML: can't have RedirectAllRequestsTo and other fields".to_owned(), + )); + } + if let Some(ref ed) = self.error_document { + ed.validate()?; + } + if let Some(ref id) = self.index_document { + id.validate()?; + } + if let Some(ref rart) = self.redirect_all_requests_to { + rart.validate()?; + } + if let Some(ref rrs) = self.routing_rules { + for rr in rrs { + rr.inner.validate()?; + } + } + + Ok(()) + } + + pub fn into_garage_website_config(self) -> Result { + if self.redirect_all_requests_to.is_some() { + Err(Error::NotImplemented( + "S3 website redirects are not currently implemented in Garage.".into(), + )) + } else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) { + Err(Error::NotImplemented( + "S3 routing rules are not currently implemented in Garage.".into(), + )) + } else { + Ok(WebsiteConfig { + index_document: self + .index_document + .map(|x| x.suffix.0) + .unwrap_or_else(|| "index.html".to_string()), + error_document: self.error_document.map(|x| x.key.0), + }) + } + } +} + +impl Key { + pub fn validate(&self) -> Result<(), Error> { + if self.key.0.is_empty() { + Err(Error::BadRequest( + "Bad XML: error document specified but empty".to_owned(), + )) + } else { + Ok(()) + } + } +} + +impl Suffix { + pub fn validate(&self) -> Result<(), Error> { + if self.suffix.0.is_empty() | self.suffix.0.contains('/') { + Err(Error::BadRequest( + "Bad XML: index document is empty or contains /".to_owned(), + )) + } else { + Ok(()) + } + } +} + +impl Target { + pub fn validate(&self) -> Result<(), Error> { + if let Some(ref protocol) = self.protocol { + if protocol.0 != "http" && protocol.0 != "https" { + return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned())); + } + } + Ok(()) + } +} + +impl RoutingRuleInner { + pub fn validate(&self) -> Result<(), Error> { + let has_prefix = self + .condition + .as_ref() + .and_then(|c| c.prefix.as_ref()) + .is_some(); + self.redirect.validate(has_prefix) + } +} + +impl Redirect { + pub fn validate(&self, has_prefix: bool) -> Result<(), Error> { + if self.replace_prefix.is_some() { + if self.replace_full.is_some() { + return Err(Error::BadRequest( + "Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set".to_owned(), + )); + } + if !has_prefix { + return Err(Error::BadRequest( + "Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't".to_owned(), + )); + } + } + if let Some(ref protocol) = self.protocol { + if protocol.0 != "http" && protocol.0 != "https" { + return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned())); + } + } + // TODO there are probably more invalide cases, but which ones? + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use quick_xml::de::from_str; + + #[test] + fn test_deserialize() -> Result<(), Error> { + let message = r#" + + + my-error-doc + + + my-index + + + garage.tld + https + + + + + 404 + prefix1 + + + gara.ge + http + 303 + prefix2 + fullkey + + + +"#; + let conf: WebsiteConfiguration = from_str(message).unwrap(); + let ref_value = WebsiteConfiguration { + xmlns: (), + error_document: Some(Key { + key: Value("my-error-doc".to_owned()), + }), + index_document: Some(Suffix { + suffix: Value("my-index".to_owned()), + }), + redirect_all_requests_to: Some(Target { + hostname: Value("garage.tld".to_owned()), + protocol: Some(Value("https".to_owned())), + }), + routing_rules: Some(vec![RoutingRule { + inner: RoutingRuleInner { + condition: Some(Condition { + http_error_code: Some(IntValue(404)), + prefix: Some(Value("prefix1".to_owned())), + }), + redirect: Redirect { + hostname: Some(Value("gara.ge".to_owned())), + protocol: Some(Value("http".to_owned())), + http_redirect_code: Some(IntValue(303)), + replace_prefix: Some(Value("prefix2".to_owned())), + replace_full: Some(Value("fullkey".to_owned())), + }, + }, + }]), + }; + assert_eq! { + ref_value, + conf + } + + let message2 = to_xml_with_header(&ref_value)?; + + let cleanup = |c: &str| c.replace(char::is_whitespace, ""); + assert_eq!(cleanup(message), cleanup(&message2)); + + Ok(()) + } +} diff --git a/src/api/s3/xml.rs b/src/api/s3/xml.rs new file mode 100644 index 00000000..75ec4559 --- /dev/null +++ b/src/api/s3/xml.rs @@ -0,0 +1,844 @@ +use quick_xml::se::to_string; +use serde::{Deserialize, Serialize, Serializer}; + +use crate::Error as ApiError; + +pub fn to_xml_with_header(x: &T) -> Result { + let mut xml = r#""#.to_string(); + xml.push_str(&to_string(x)?); + Ok(xml) +} + +pub fn xmlns_tag(_v: &(), s: S) -> Result { + s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Value(#[serde(rename = "$value")] pub String); + +impl From<&str> for Value { + fn from(s: &str) -> Value { + Value(s.to_string()) + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct IntValue(#[serde(rename = "$value")] pub i64); + +#[derive(Debug, Serialize, PartialEq)] +pub struct Bucket { + #[serde(rename = "CreationDate")] + pub creation_date: Value, + #[serde(rename = "Name")] + pub name: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct Owner { + #[serde(rename = "DisplayName")] + pub display_name: Value, + #[serde(rename = "ID")] + pub id: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct BucketList { + #[serde(rename = "Bucket")] + pub entries: Vec, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListAllMyBucketsResult { + #[serde(rename = "Buckets")] + pub buckets: BucketList, + #[serde(rename = "Owner")] + pub owner: Owner, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct LocationConstraint { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "$value")] + pub region: String, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct Deleted { + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "VersionId")] + pub version_id: Value, + #[serde(rename = "DeleteMarkerVersionId")] + pub delete_marker_version_id: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct Error { + #[serde(rename = "Code")] + pub code: Value, + #[serde(rename = "Message")] + pub message: Value, + #[serde(rename = "Resource")] + pub resource: Option, + #[serde(rename = "Region")] + pub region: Option, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct DeleteError { + #[serde(rename = "Code")] + pub code: Value, + #[serde(rename = "Key")] + pub key: Option, + #[serde(rename = "Message")] + pub message: Value, + #[serde(rename = "VersionId")] + pub version_id: Option, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct DeleteResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Deleted")] + pub deleted: Vec, + #[serde(rename = "Error")] + pub errors: Vec, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct InitiateMultipartUploadResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Bucket")] + pub bucket: Value, + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "UploadId")] + pub upload_id: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct CompleteMultipartUploadResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Location")] + pub location: Option, + #[serde(rename = "Bucket")] + pub bucket: Value, + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "ETag")] + pub etag: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct Initiator { + #[serde(rename = "DisplayName")] + pub display_name: Value, + #[serde(rename = "ID")] + pub id: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListMultipartItem { + #[serde(rename = "Initiated")] + pub initiated: Value, + #[serde(rename = "Initiator")] + pub initiator: Initiator, + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "UploadId")] + pub upload_id: Value, + #[serde(rename = "Owner")] + pub owner: Owner, + #[serde(rename = "StorageClass")] + pub storage_class: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListMultipartUploadsResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Bucket")] + pub bucket: Value, + #[serde(rename = "KeyMarker")] + pub key_marker: Option, + #[serde(rename = "UploadIdMarker")] + pub upload_id_marker: Option, + #[serde(rename = "NextKeyMarker")] + pub next_key_marker: Option, + #[serde(rename = "NextUploadIdMarker")] + pub next_upload_id_marker: Option, + #[serde(rename = "Prefix")] + pub prefix: Value, + #[serde(rename = "Delimiter")] + pub delimiter: Option, + #[serde(rename = "MaxUploads")] + pub max_uploads: IntValue, + #[serde(rename = "IsTruncated")] + pub is_truncated: Value, + #[serde(rename = "Upload")] + pub upload: Vec, + #[serde(rename = "CommonPrefixes")] + pub common_prefixes: Vec, + #[serde(rename = "EncodingType")] + pub encoding_type: Option, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct PartItem { + #[serde(rename = "ETag")] + pub etag: Value, + #[serde(rename = "LastModified")] + pub last_modified: Value, + #[serde(rename = "PartNumber")] + pub part_number: IntValue, + #[serde(rename = "Size")] + pub size: IntValue, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListPartsResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Bucket")] + pub bucket: Value, + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "UploadId")] + pub upload_id: Value, + #[serde(rename = "PartNumberMarker")] + pub part_number_marker: Option, + #[serde(rename = "NextPartNumberMarker")] + pub next_part_number_marker: Option, + #[serde(rename = "MaxParts")] + pub max_parts: IntValue, + #[serde(rename = "IsTruncated")] + pub is_truncated: Value, + #[serde(rename = "Part", default)] + pub parts: Vec, + #[serde(rename = "Initiator")] + pub initiator: Initiator, + #[serde(rename = "Owner")] + pub owner: Owner, + #[serde(rename = "StorageClass")] + pub storage_class: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListBucketItem { + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "LastModified")] + pub last_modified: Value, + #[serde(rename = "ETag")] + pub etag: Value, + #[serde(rename = "Size")] + pub size: IntValue, + #[serde(rename = "StorageClass")] + pub storage_class: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct CommonPrefix { + #[serde(rename = "Prefix")] + pub prefix: Value, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct ListBucketResult { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Name")] + pub name: Value, + #[serde(rename = "Prefix")] + pub prefix: Value, + #[serde(rename = "Marker")] + pub marker: Option, + #[serde(rename = "NextMarker")] + pub next_marker: Option, + #[serde(rename = "StartAfter")] + pub start_after: Option, + #[serde(rename = "ContinuationToken")] + pub continuation_token: Option, + #[serde(rename = "NextContinuationToken")] + pub next_continuation_token: Option, + #[serde(rename = "KeyCount")] + pub key_count: Option, + #[serde(rename = "MaxKeys")] + pub max_keys: IntValue, + #[serde(rename = "Delimiter")] + pub delimiter: Option, + #[serde(rename = "EncodingType")] + pub encoding_type: Option, + #[serde(rename = "IsTruncated")] + pub is_truncated: Value, + #[serde(rename = "Contents")] + pub contents: Vec, + #[serde(rename = "CommonPrefixes")] + pub common_prefixes: Vec, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct VersioningConfiguration { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Status")] + pub status: Option, +} + +#[derive(Debug, Serialize, PartialEq)] +pub struct PostObject { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Location")] + pub location: Value, + #[serde(rename = "Bucket")] + pub bucket: Value, + #[serde(rename = "Key")] + pub key: Value, + #[serde(rename = "ETag")] + pub etag: Value, +} + +#[cfg(test)] +mod tests { + use super::*; + + use garage_util::time::*; + + #[test] + fn error_message() -> Result<(), ApiError> { + let error = Error { + code: Value("TestError".to_string()), + message: Value("A dummy error message".to_string()), + resource: Some(Value("/bucket/a/plop".to_string())), + region: Some(Value("garage".to_string())), + }; + assert_eq!( + to_xml_with_header(&error)?, + "\ +\ + TestError\ + A dummy error message\ + /bucket/a/plop\ + garage\ +" + ); + Ok(()) + } + + #[test] + fn list_all_my_buckets_result() -> Result<(), ApiError> { + let list_buckets = ListAllMyBucketsResult { + owner: Owner { + display_name: Value("owner_name".to_string()), + id: Value("qsdfjklm".to_string()), + }, + buckets: BucketList { + entries: vec![ + Bucket { + creation_date: Value(msec_to_rfc3339(0)), + name: Value("bucket_A".to_string()), + }, + Bucket { + creation_date: Value(msec_to_rfc3339(3600 * 24 * 1000)), + name: Value("bucket_B".to_string()), + }, + ], + }, + }; + assert_eq!( + to_xml_with_header(&list_buckets)?, + "\ +\ + \ + \ + 1970-01-01T00:00:00.000Z\ + bucket_A\ + \ + \ + 1970-01-02T00:00:00.000Z\ + bucket_B\ + \ + \ + \ + owner_name\ + qsdfjklm\ + \ +" + ); + Ok(()) + } + + #[test] + fn get_bucket_location_result() -> Result<(), ApiError> { + let get_bucket_location = LocationConstraint { + xmlns: (), + region: "garage".to_string(), + }; + assert_eq!( + to_xml_with_header(&get_bucket_location)?, + "\ +garage" + ); + Ok(()) + } + + #[test] + fn get_bucket_versioning_result() -> Result<(), ApiError> { + let get_bucket_versioning = VersioningConfiguration { + xmlns: (), + status: None, + }; + assert_eq!( + to_xml_with_header(&get_bucket_versioning)?, + "\ +" + ); + let get_bucket_versioning2 = VersioningConfiguration { + xmlns: (), + status: Some(Value("Suspended".to_string())), + }; + assert_eq!( + to_xml_with_header(&get_bucket_versioning2)?, + "\ +Suspended" + ); + + Ok(()) + } + + #[test] + fn delete_result() -> Result<(), ApiError> { + let delete_result = DeleteResult { + xmlns: (), + deleted: vec![ + Deleted { + key: Value("a/plop".to_string()), + version_id: Value("qsdfjklm".to_string()), + delete_marker_version_id: Value("wxcvbn".to_string()), + }, + Deleted { + key: Value("b/plip".to_string()), + version_id: Value("1234".to_string()), + delete_marker_version_id: Value("4321".to_string()), + }, + ], + errors: vec![ + DeleteError { + code: Value("NotFound".to_string()), + key: Some(Value("c/plap".to_string())), + message: Value("Object c/plap not found".to_string()), + version_id: None, + }, + DeleteError { + code: Value("Forbidden".to_string()), + key: Some(Value("d/plep".to_string())), + message: Value("Not authorized".to_string()), + version_id: Some(Value("789".to_string())), + }, + ], + }; + assert_eq!( + to_xml_with_header(&delete_result)?, + "\ +\ + \ + a/plop\ + qsdfjklm\ + wxcvbn\ + \ + \ + b/plip\ + 1234\ + 4321\ + \ + \ + NotFound\ + c/plap\ + Object c/plap not found\ + \ + \ + Forbidden\ + d/plep\ + Not authorized\ + 789\ + \ +" + ); + Ok(()) + } + + #[test] + fn initiate_multipart_upload_result() -> Result<(), ApiError> { + let result = InitiateMultipartUploadResult { + xmlns: (), + bucket: Value("mybucket".to_string()), + key: Value("a/plop".to_string()), + upload_id: Value("azerty".to_string()), + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + mybucket\ + a/plop\ + azerty\ +" + ); + Ok(()) + } + + #[test] + fn complete_multipart_upload_result() -> Result<(), ApiError> { + let result = CompleteMultipartUploadResult { + xmlns: (), + location: Some(Value("https://garage.tld/mybucket/a/plop".to_string())), + bucket: Value("mybucket".to_string()), + key: Value("a/plop".to_string()), + etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()), + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + https://garage.tld/mybucket/a/plop\ + mybucket\ + a/plop\ + "3858f62230ac3c915f300c664312c11f-9"\ +" + ); + Ok(()) + } + + #[test] + fn list_multipart_uploads_result() -> Result<(), ApiError> { + let result = ListMultipartUploadsResult { + xmlns: (), + bucket: Value("example-bucket".to_string()), + key_marker: None, + next_key_marker: None, + upload_id_marker: None, + encoding_type: None, + next_upload_id_marker: None, + upload: vec![], + delimiter: Some(Value("/".to_string())), + prefix: Value("photos/2006/".to_string()), + max_uploads: IntValue(1000), + is_truncated: Value("false".to_string()), + common_prefixes: vec![ + CommonPrefix { + prefix: Value("photos/2006/February/".to_string()), + }, + CommonPrefix { + prefix: Value("photos/2006/January/".to_string()), + }, + CommonPrefix { + prefix: Value("photos/2006/March/".to_string()), + }, + ], + }; + + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + example-bucket\ + photos/2006/\ + /\ + 1000\ + false\ + \ + photos/2006/February/\ + \ + \ + photos/2006/January/\ + \ + \ + photos/2006/March/\ + \ +" + ); + + Ok(()) + } + + #[test] + fn list_objects_v1_1() -> Result<(), ApiError> { + let result = ListBucketResult { + xmlns: (), + name: Value("example-bucket".to_string()), + prefix: Value("".to_string()), + marker: Some(Value("".to_string())), + next_marker: None, + start_after: None, + continuation_token: None, + next_continuation_token: None, + key_count: None, + max_keys: IntValue(1000), + encoding_type: None, + delimiter: Some(Value("/".to_string())), + is_truncated: Value("false".to_string()), + contents: vec![ListBucketItem { + key: Value("sample.jpg".to_string()), + last_modified: Value(msec_to_rfc3339(0)), + etag: Value("\"bf1d737a4d46a19f3bced6905cc8b902\"".to_string()), + size: IntValue(142863), + storage_class: Value("STANDARD".to_string()), + }], + common_prefixes: vec![CommonPrefix { + prefix: Value("photos/".to_string()), + }], + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + example-bucket\ + \ + \ + 1000\ + /\ + false\ + \ + sample.jpg\ + 1970-01-01T00:00:00.000Z\ + "bf1d737a4d46a19f3bced6905cc8b902"\ + 142863\ + STANDARD\ + \ + \ + photos/\ + \ +" + ); + Ok(()) + } + + #[test] + fn list_objects_v1_2() -> Result<(), ApiError> { + let result = ListBucketResult { + xmlns: (), + name: Value("example-bucket".to_string()), + prefix: Value("photos/2006/".to_string()), + marker: Some(Value("".to_string())), + next_marker: None, + start_after: None, + continuation_token: None, + next_continuation_token: None, + key_count: None, + max_keys: IntValue(1000), + delimiter: Some(Value("/".to_string())), + encoding_type: None, + is_truncated: Value("false".to_string()), + contents: vec![], + common_prefixes: vec![ + CommonPrefix { + prefix: Value("photos/2006/February/".to_string()), + }, + CommonPrefix { + prefix: Value("photos/2006/January/".to_string()), + }, + ], + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + example-bucket\ + photos/2006/\ + \ + 1000\ + /\ + false\ + \ + photos/2006/February/\ + \ + \ + photos/2006/January/\ + \ +" + ); + Ok(()) + } + + #[test] + fn list_objects_v2_1() -> Result<(), ApiError> { + let result = ListBucketResult { + xmlns: (), + name: Value("quotes".to_string()), + prefix: Value("E".to_string()), + marker: None, + next_marker: None, + start_after: Some(Value("ExampleGuide.pdf".to_string())), + continuation_token: None, + next_continuation_token: None, + key_count: None, + max_keys: IntValue(3), + delimiter: None, + encoding_type: None, + is_truncated: Value("false".to_string()), + contents: vec![ListBucketItem { + key: Value("ExampleObject.txt".to_string()), + last_modified: Value(msec_to_rfc3339(0)), + etag: Value("\"599bab3ed2c697f1d26842727561fd94\"".to_string()), + size: IntValue(857), + storage_class: Value("REDUCED_REDUNDANCY".to_string()), + }], + common_prefixes: vec![], + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + quotes\ + E\ + ExampleGuide.pdf\ + 3\ + false\ + \ + ExampleObject.txt\ + 1970-01-01T00:00:00.000Z\ + "599bab3ed2c697f1d26842727561fd94"\ + 857\ + REDUCED_REDUNDANCY\ + \ +" + ); + Ok(()) + } + + #[test] + fn list_objects_v2_2() -> Result<(), ApiError> { + let result = ListBucketResult { + xmlns: (), + name: Value("bucket".to_string()), + prefix: Value("".to_string()), + marker: None, + next_marker: None, + start_after: None, + continuation_token: Some(Value( + "1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=".to_string(), + )), + next_continuation_token: Some(Value("qsdfjklm".to_string())), + key_count: Some(IntValue(112)), + max_keys: IntValue(1000), + delimiter: None, + encoding_type: None, + is_truncated: Value("false".to_string()), + contents: vec![ListBucketItem { + key: Value("happyfacex.jpg".to_string()), + last_modified: Value(msec_to_rfc3339(0)), + etag: Value("\"70ee1738b6b21e2c8a43f3a5ab0eee71\"".to_string()), + size: IntValue(1111), + storage_class: Value("STANDARD".to_string()), + }], + common_prefixes: vec![], + }; + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + bucket\ + \ + 1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=\ + qsdfjklm\ + 112\ + 1000\ + false\ + \ + happyfacex.jpg\ + 1970-01-01T00:00:00.000Z\ + "70ee1738b6b21e2c8a43f3a5ab0eee71"\ + 1111\ + STANDARD\ + \ +" + ); + Ok(()) + } + + #[test] + fn list_parts() -> Result<(), ApiError> { + let result = ListPartsResult { + xmlns: (), + bucket: Value("example-bucket".to_string()), + key: Value("example-object".to_string()), + upload_id: Value( + "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA".to_string(), + ), + part_number_marker: Some(IntValue(1)), + next_part_number_marker: Some(IntValue(3)), + max_parts: IntValue(2), + is_truncated: Value("true".to_string()), + parts: vec![ + PartItem { + etag: Value("\"7778aef83f66abc1fa1e8477f296d394\"".to_string()), + last_modified: Value("2010-11-10T20:48:34.000Z".to_string()), + part_number: IntValue(2), + size: IntValue(10485760), + }, + PartItem { + etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()), + last_modified: Value("2010-11-10T20:48:33.000Z".to_string()), + part_number: IntValue(3), + size: IntValue(10485760), + }, + ], + initiator: Initiator { + display_name: Value("umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx".to_string()), + id: Value( + "arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx" + .to_string(), + ), + }, + owner: Owner { + display_name: Value("someName".to_string()), + id: Value( + "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a".to_string(), + ), + }, + storage_class: Value("STANDARD".to_string()), + }; + + assert_eq!( + to_xml_with_header(&result)?, + "\ +\ + example-bucket\ + example-object\ + XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA\ + 1\ + 3\ + 2\ + true\ + \ + "7778aef83f66abc1fa1e8477f296d394"\ + 2010-11-10T20:48:34.000Z\ + 2\ + 10485760\ + \ + \ + "aaaa18db4cc2f85cedef654fccc4a4x8"\ + 2010-11-10T20:48:33.000Z\ + 3\ + 10485760\ + \ + \ + umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx\ + arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx\ + \ + \ + someName\ + 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a\ + \ + STANDARD\ +" + ); + + Ok(()) + } +} diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs deleted file mode 100644 index 8a5407d3..00000000 --- a/src/api/s3_bucket.rs +++ /dev/null @@ -1,352 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use hyper::{Body, Request, Response, StatusCode}; - -use garage_model::bucket_alias_table::*; -use garage_model::bucket_table::Bucket; -use garage_model::garage::Garage; -use garage_model::key_table::Key; -use garage_model::object_table::ObjectFilter; -use garage_model::permission::BucketKeyPerm; -use garage_table::util::*; -use garage_util::crdt::*; -use garage_util::data::*; -use garage_util::time::*; - -use crate::error::*; -use crate::s3_xml; -use crate::signature::verify_signed_content; - -pub fn handle_get_bucket_location(garage: Arc) -> Result, Error> { - let loc = s3_xml::LocationConstraint { - xmlns: (), - region: garage.config.s3_api.s3_region.to_string(), - }; - let xml = s3_xml::to_xml_with_header(&loc)?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml.into_bytes()))?) -} - -pub fn handle_get_bucket_versioning() -> Result, Error> { - let versioning = s3_xml::VersioningConfiguration { - xmlns: (), - status: None, - }; - - let xml = s3_xml::to_xml_with_header(&versioning)?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml.into_bytes()))?) -} - -pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result, Error> { - let key_p = api_key.params().ok_or_internal_error( - "Key should not be in deleted state at this point (in handle_list_buckets)", - )?; - - // Collect buckets user has access to - let ids = api_key - .state - .as_option() - .unwrap() - .authorized_buckets - .items() - .iter() - .filter(|(_, perms)| perms.is_any()) - .map(|(id, _)| *id) - .collect::>(); - - let mut buckets_by_id = HashMap::new(); - let mut aliases = HashMap::new(); - - for bucket_id in ids.iter() { - let bucket = garage.bucket_table.get(&EmptyKey, bucket_id).await?; - if let Some(bucket) = bucket { - for (alias, _, _active) in bucket.aliases().iter().filter(|(_, _, active)| *active) { - let alias_opt = garage.bucket_alias_table.get(&EmptyKey, alias).await?; - if let Some(alias_ent) = alias_opt { - if *alias_ent.state.get() == Some(*bucket_id) { - aliases.insert(alias_ent.name().to_string(), *bucket_id); - } - } - } - if let Deletable::Present(param) = bucket.state { - buckets_by_id.insert(bucket_id, param); - } - } - } - - for (alias, _, id_opt) in key_p.local_aliases.items() { - if let Some(id) = id_opt { - aliases.insert(alias.clone(), *id); - } - } - - // Generate response - let list_buckets = s3_xml::ListAllMyBucketsResult { - owner: s3_xml::Owner { - display_name: s3_xml::Value(key_p.name.get().to_string()), - id: s3_xml::Value(api_key.key_id.to_string()), - }, - buckets: s3_xml::BucketList { - entries: aliases - .iter() - .filter_map(|(name, id)| buckets_by_id.get(id).map(|p| (name, id, p))) - .map(|(name, _id, param)| s3_xml::Bucket { - creation_date: s3_xml::Value(msec_to_rfc3339(param.creation_date)), - name: s3_xml::Value(name.to_string()), - }) - .collect(), - }, - }; - - let xml = s3_xml::to_xml_with_header(&list_buckets)?; - trace!("xml: {}", xml); - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml))?) -} - -pub async fn handle_create_bucket( - garage: &Garage, - req: Request, - content_sha256: Option, - api_key: Key, - bucket_name: String, -) -> Result, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let cmd = - parse_create_bucket_xml(&body[..]).ok_or_bad_request("Invalid create bucket XML query")?; - - if let Some(location_constraint) = cmd { - if location_constraint != garage.config.s3_api.s3_region { - return Err(Error::BadRequest(format!( - "Cannot satisfy location constraint `{}`: buckets can only be created in region `{}`", - location_constraint, - garage.config.s3_api.s3_region - ))); - } - } - - let key_params = api_key - .params() - .ok_or_internal_error("Key should not be deleted at this point")?; - - let existing_bucket = if let Some(Some(bucket_id)) = key_params.local_aliases.get(&bucket_name) - { - Some(*bucket_id) - } else { - garage - .bucket_helper() - .resolve_global_bucket_name(&bucket_name) - .await? - }; - - if let Some(bucket_id) = existing_bucket { - // Check we have write or owner permission on the bucket, - // in that case it's fine, return 200 OK, bucket exists; - // otherwise return a forbidden error. - let kp = api_key.bucket_permissions(&bucket_id); - if !(kp.allow_write || kp.allow_owner) { - return Err(Error::BucketAlreadyExists); - } - } else { - // Create the bucket! - if !is_valid_bucket_name(&bucket_name) { - return Err(Error::BadRequest(format!( - "{}: {}", - bucket_name, INVALID_BUCKET_NAME_MESSAGE - ))); - } - - let bucket = Bucket::new(); - garage.bucket_table.insert(&bucket).await?; - - garage - .bucket_helper() - .set_bucket_key_permissions(bucket.id, &api_key.key_id, BucketKeyPerm::ALL_PERMISSIONS) - .await?; - - garage - .bucket_helper() - .set_local_bucket_alias(bucket.id, &api_key.key_id, &bucket_name) - .await?; - } - - Ok(Response::builder() - .header("Location", format!("/{}", bucket_name)) - .body(Body::empty()) - .unwrap()) -} - -pub async fn handle_delete_bucket( - garage: &Garage, - bucket_id: Uuid, - bucket_name: String, - api_key: Key, -) -> Result, Error> { - let key_params = api_key - .params() - .ok_or_internal_error("Key should not be deleted at this point")?; - - let is_local_alias = matches!(key_params.local_aliases.get(&bucket_name), Some(Some(_))); - - let mut bucket = garage - .bucket_helper() - .get_existing_bucket(bucket_id) - .await?; - let bucket_state = bucket.state.as_option().unwrap(); - - // If the bucket has no other aliases, this is a true deletion. - // Otherwise, it is just an alias removal. - - let has_other_global_aliases = bucket_state - .aliases - .items() - .iter() - .filter(|(_, _, active)| *active) - .any(|(n, _, _)| is_local_alias || (*n != bucket_name)); - - let has_other_local_aliases = bucket_state - .local_aliases - .items() - .iter() - .filter(|(_, _, active)| *active) - .any(|((k, n), _, _)| !is_local_alias || *n != bucket_name || *k != api_key.key_id); - - if !has_other_global_aliases && !has_other_local_aliases { - // Delete bucket - - // Check bucket is empty - let objects = garage - .object_table - .get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10) - .await?; - if !objects.is_empty() { - return Err(Error::BucketNotEmpty); - } - - // --- done checking, now commit --- - // 1. delete bucket alias - if is_local_alias { - garage - .bucket_helper() - .unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) - .await?; - } else { - garage - .bucket_helper() - .unset_global_bucket_alias(bucket_id, &bucket_name) - .await?; - } - - // 2. delete authorization from keys that had access - for (key_id, _) in bucket.authorized_keys() { - garage - .bucket_helper() - .set_bucket_key_permissions(bucket.id, key_id, BucketKeyPerm::NO_PERMISSIONS) - .await?; - } - - // 3. delete bucket - bucket.state = Deletable::delete(); - garage.bucket_table.insert(&bucket).await?; - } else if is_local_alias { - // Just unalias - garage - .bucket_helper() - .unset_local_bucket_alias(bucket_id, &api_key.key_id, &bucket_name) - .await?; - } else { - // Just unalias (but from global namespace) - garage - .bucket_helper() - .unset_global_bucket_alias(bucket_id, &bucket_name) - .await?; - } - - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) -} - -fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option> { - // Returns None if invalid data - // Returns Some(None) if no location constraint is given - // Returns Some(Some("xxxx")) where xxxx is the given location constraint - - let xml_str = std::str::from_utf8(xml_bytes).ok()?; - if xml_str.trim_matches(char::is_whitespace).is_empty() { - return Some(None); - } - - let xml = roxmltree::Document::parse(xml_str).ok()?; - - let cbc = xml.root().first_child()?; - if !cbc.has_tag_name("CreateBucketConfiguration") { - return None; - } - - let mut ret = None; - for item in cbc.children() { - println!("{:?}", item); - if item.has_tag_name("LocationConstraint") { - if ret != None { - return None; - } - ret = Some(item.text()?.to_string()); - } else if !item.is_text() { - return None; - } - } - - Some(ret) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn create_bucket() { - assert_eq!(parse_create_bucket_xml(br#""#), Some(None)); - assert_eq!( - parse_create_bucket_xml( - br#" - - - "# - ), - Some(None) - ); - assert_eq!( - parse_create_bucket_xml( - br#" - - Europe - - "# - ), - Some(Some("Europe".into())) - ); - assert_eq!( - parse_create_bucket_xml( - br#" - - - "# - ), - None - ); - } -} diff --git a/src/api/s3_copy.rs b/src/api/s3_copy.rs deleted file mode 100644 index fc4707e2..00000000 --- a/src/api/s3_copy.rs +++ /dev/null @@ -1,660 +0,0 @@ -use std::pin::Pin; -use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -use futures::{stream, stream::Stream, StreamExt, TryFutureExt}; -use md5::{Digest as Md5Digest, Md5}; - -use hyper::{Body, Request, Response}; -use serde::Serialize; - -use garage_table::*; -use garage_util::data::*; -use garage_util::time::*; - -use garage_model::block_ref_table::*; -use garage_model::garage::Garage; -use garage_model::key_table::Key; -use garage_model::object_table::*; -use garage_model::version_table::*; - -use crate::api_server::{parse_bucket_key, resolve_bucket}; -use crate::error::*; -use crate::s3_put::{decode_upload_id, get_headers}; -use crate::s3_xml::{self, xmlns_tag}; - -pub async fn handle_copy( - garage: Arc, - api_key: &Key, - req: &Request, - dest_bucket_id: Uuid, - dest_key: &str, -) -> Result, Error> { - let copy_precondition = CopyPreconditionHeaders::parse(req)?; - - let source_object = get_copy_source(&garage, api_key, req).await?; - - let (source_version, source_version_data, source_version_meta) = - extract_source_info(&source_object)?; - - // Check precondition, e.g. x-amz-copy-source-if-match - copy_precondition.check(source_version, &source_version_meta.etag)?; - - // Generate parameters for copied object - let new_uuid = gen_uuid(); - let new_timestamp = now_msec(); - - // Implement x-amz-metadata-directive: REPLACE - let new_meta = match req.headers().get("x-amz-metadata-directive") { - Some(v) if v == hyper::header::HeaderValue::from_static("REPLACE") => ObjectVersionMeta { - headers: get_headers(req.headers())?, - size: source_version_meta.size, - etag: source_version_meta.etag.clone(), - }, - _ => source_version_meta.clone(), - }; - - let etag = new_meta.etag.to_string(); - - // Save object copy - match source_version_data { - ObjectVersionData::DeleteMarker => unreachable!(), - ObjectVersionData::Inline(_meta, bytes) => { - let dest_object_version = ObjectVersion { - uuid: new_uuid, - timestamp: new_timestamp, - state: ObjectVersionState::Complete(ObjectVersionData::Inline( - new_meta, - bytes.clone(), - )), - }; - let dest_object = Object::new( - dest_bucket_id, - dest_key.to_string(), - vec![dest_object_version], - ); - garage.object_table.insert(&dest_object).await?; - } - ObjectVersionData::FirstBlock(_meta, first_block_hash) => { - // Get block list from source version - let source_version = garage - .version_table - .get(&source_version.uuid, &EmptyKey) - .await?; - let source_version = source_version.ok_or(Error::NoSuchKey)?; - - // Write an "uploading" marker in Object table - // This holds a reference to the object in the Version table - // so that it won't be deleted, e.g. by repair_versions. - let tmp_dest_object_version = ObjectVersion { - uuid: new_uuid, - timestamp: new_timestamp, - state: ObjectVersionState::Uploading(new_meta.headers.clone()), - }; - let tmp_dest_object = Object::new( - dest_bucket_id, - dest_key.to_string(), - vec![tmp_dest_object_version], - ); - garage.object_table.insert(&tmp_dest_object).await?; - - // Write version in the version table. Even with empty block list, - // this means that the BlockRef entries linked to this version cannot be - // marked as deleted (they are marked as deleted only if the Version - // doesn't exist or is marked as deleted). - let mut dest_version = - Version::new(new_uuid, dest_bucket_id, dest_key.to_string(), false); - garage.version_table.insert(&dest_version).await?; - - // Fill in block list for version and insert block refs - for (bk, bv) in source_version.blocks.items().iter() { - dest_version.blocks.put(*bk, *bv); - } - let dest_block_refs = dest_version - .blocks - .items() - .iter() - .map(|b| BlockRef { - block: b.1.hash, - version: new_uuid, - deleted: false.into(), - }) - .collect::>(); - futures::try_join!( - garage.version_table.insert(&dest_version), - garage.block_ref_table.insert_many(&dest_block_refs[..]), - )?; - - // Insert final object - // We do this last because otherwise there is a race condition in the case where - // the copy call has the same source and destination (this happens, rclone does - // it to update the modification timestamp for instance). If we did this concurrently - // with the stuff before, the block's reference counts could be decremented before - // they are incremented again for the new version, leading to data being deleted. - let dest_object_version = ObjectVersion { - uuid: new_uuid, - timestamp: new_timestamp, - state: ObjectVersionState::Complete(ObjectVersionData::FirstBlock( - new_meta, - *first_block_hash, - )), - }; - let dest_object = Object::new( - dest_bucket_id, - dest_key.to_string(), - vec![dest_object_version], - ); - garage.object_table.insert(&dest_object).await?; - } - } - - let last_modified = msec_to_rfc3339(new_timestamp); - let result = CopyObjectResult { - last_modified: s3_xml::Value(last_modified), - etag: s3_xml::Value(format!("\"{}\"", etag)), - }; - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .header("x-amz-version-id", hex::encode(new_uuid)) - .header( - "x-amz-copy-source-version-id", - hex::encode(source_version.uuid), - ) - .body(Body::from(xml))?) -} - -pub async fn handle_upload_part_copy( - garage: Arc, - api_key: &Key, - req: &Request, - dest_bucket_id: Uuid, - dest_key: &str, - part_number: u64, - upload_id: &str, -) -> Result, Error> { - let copy_precondition = CopyPreconditionHeaders::parse(req)?; - - let dest_version_uuid = decode_upload_id(upload_id)?; - - let dest_key = dest_key.to_string(); - let (source_object, dest_object) = futures::try_join!( - get_copy_source(&garage, api_key, req), - garage - .object_table - .get(&dest_bucket_id, &dest_key) - .map_err(Error::from), - )?; - let dest_object = dest_object.ok_or(Error::NoSuchKey)?; - - let (source_object_version, source_version_data, source_version_meta) = - extract_source_info(&source_object)?; - - // Check precondition on source, e.g. x-amz-copy-source-if-match - copy_precondition.check(source_object_version, &source_version_meta.etag)?; - - // Check source range is valid - let source_range = match req.headers().get("x-amz-copy-source-range") { - Some(range) => { - let range_str = range.to_str()?; - let mut ranges = http_range::HttpRange::parse(range_str, source_version_meta.size) - .map_err(|e| (e, source_version_meta.size))?; - if ranges.len() != 1 { - return Err(Error::BadRequest( - "Invalid x-amz-copy-source-range header: exactly 1 range must be given".into(), - )); - } else { - ranges.pop().unwrap() - } - } - None => http_range::HttpRange { - start: 0, - length: source_version_meta.size, - }, - }; - - // Check destination version is indeed in uploading state - if !dest_object - .versions() - .iter() - .any(|v| v.uuid == dest_version_uuid && v.is_uploading()) - { - return Err(Error::NoSuchUpload); - } - - // Check source version is not inlined - match source_version_data { - ObjectVersionData::DeleteMarker => unreachable!(), - ObjectVersionData::Inline(_meta, _bytes) => { - // This is only for small files, we don't bother handling this. - // (in AWS UploadPartCopy works for parts at least 5MB which - // is never the case of an inline object) - return Err(Error::BadRequest( - "Source object is too small (minimum part size is 5Mb)".into(), - )); - } - ObjectVersionData::FirstBlock(_meta, _first_block_hash) => (), - }; - - // Fetch source versin with its block list, - // and destination version to check part hasn't yet been uploaded - let (source_version, dest_version) = futures::try_join!( - garage - .version_table - .get(&source_object_version.uuid, &EmptyKey), - garage.version_table.get(&dest_version_uuid, &EmptyKey), - )?; - let source_version = source_version.ok_or(Error::NoSuchKey)?; - - // Check this part number hasn't yet been uploaded - if let Some(dv) = dest_version { - if dv.has_part_number(part_number) { - return Err(Error::BadRequest(format!( - "Part number {} has already been uploaded", - part_number - ))); - } - } - - // We want to reuse blocks from the source version as much as possible. - // However, we still need to get the data from these blocks - // because we need to know it to calculate the MD5sum of the part - // which is used as its ETag. - - // First, calculate what blocks we want to keep, - // and the subrange of the block to take, if the bounds of the - // requested range are in the middle. - let (range_begin, range_end) = (source_range.start, source_range.start + source_range.length); - - let mut blocks_to_copy = vec![]; - let mut current_offset = 0; - for (_bk, block) in source_version.blocks.items().iter() { - let (block_begin, block_end) = (current_offset, current_offset + block.size); - - if block_begin < range_end && block_end > range_begin { - let subrange_begin = if block_begin < range_begin { - Some(range_begin - block_begin) - } else { - None - }; - let subrange_end = if block_end > range_end { - Some(range_end - block_begin) - } else { - None - }; - let range_to_copy = match (subrange_begin, subrange_end) { - (Some(b), Some(e)) => Some(b as usize..e as usize), - (None, Some(e)) => Some(0..e as usize), - (Some(b), None) => Some(b as usize..block.size as usize), - (None, None) => None, - }; - - blocks_to_copy.push((block.hash, range_to_copy)); - } - - current_offset = block_end; - } - - // Now, actually copy the blocks - let mut md5hasher = Md5::new(); - - // First, create a stream that is able to read the source blocks - // and extract the subrange if necessary. - // The second returned value is an Option, that is Some - // if and only if the block returned is a block that already existed - // in the Garage data store (thus we don't need to save it again). - let garage2 = garage.clone(); - let source_blocks = stream::iter(blocks_to_copy) - .flat_map(|(block_hash, range_to_copy)| { - let garage3 = garage2.clone(); - stream::once(async move { - let data = garage3.block_manager.rpc_get_block(&block_hash).await?; - match range_to_copy { - Some(r) => Ok((data[r].to_vec(), None)), - None => Ok((data, Some(block_hash))), - } - }) - }) - .peekable(); - - // The defragmenter is a custom stream (defined below) that concatenates - // consecutive block parts when they are too small. - // It returns a series of (Vec, Option). - // When it is done, it returns an empty vec. - // Same as the previous iterator, the Option is Some(_) if and only if - // it's an existing block of the Garage data store. - let mut defragmenter = Defragmenter::new(garage.config.block_size, Box::pin(source_blocks)); - - let mut current_offset = 0; - let mut next_block = defragmenter.next().await?; - - loop { - let (data, existing_block_hash) = next_block; - if data.is_empty() { - break; - } - - md5hasher.update(&data[..]); - - let must_upload = existing_block_hash.is_none(); - let final_hash = existing_block_hash.unwrap_or_else(|| blake2sum(&data[..])); - - let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); - version.blocks.put( - VersionBlockKey { - part_number, - offset: current_offset, - }, - VersionBlock { - hash: final_hash, - size: data.len() as u64, - }, - ); - current_offset += data.len() as u64; - - let block_ref = BlockRef { - block: final_hash, - version: dest_version_uuid, - deleted: false.into(), - }; - - let garage2 = garage.clone(); - let res = futures::try_join!( - // Thing 1: if the block is not exactly a block that existed before, - // we need to insert that data as a new block. - async move { - if must_upload { - garage2.block_manager.rpc_put_block(final_hash, data).await - } else { - Ok(()) - } - }, - // Thing 2: we need to insert the block in the version - garage.version_table.insert(&version), - // Thing 3: we need to add a block reference - garage.block_ref_table.insert(&block_ref), - // Thing 4: we need to prefetch the next block - defragmenter.next(), - )?; - next_block = res.3; - } - - let data_md5sum = md5hasher.finalize(); - let etag = hex::encode(data_md5sum); - - // Put the part's ETag in the Versiontable - let mut version = Version::new(dest_version_uuid, dest_bucket_id, dest_key.clone(), false); - version.parts_etags.put(part_number, etag.clone()); - garage.version_table.insert(&version).await?; - - // LGTM - let resp_xml = s3_xml::to_xml_with_header(&CopyPartResult { - xmlns: (), - etag: s3_xml::Value(format!("\"{}\"", etag)), - last_modified: s3_xml::Value(msec_to_rfc3339(source_object_version.timestamp)), - })?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .header( - "x-amz-copy-source-version-id", - hex::encode(source_object_version.uuid), - ) - .body(Body::from(resp_xml))?) -} - -async fn get_copy_source( - garage: &Garage, - api_key: &Key, - req: &Request, -) -> Result { - let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?; - let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?; - - let (source_bucket, source_key) = parse_bucket_key(©_source, None)?; - let source_bucket_id = resolve_bucket(garage, &source_bucket.to_string(), api_key).await?; - - if !api_key.allow_read(&source_bucket_id) { - return Err(Error::Forbidden(format!( - "Reading from bucket {} not allowed for this key", - source_bucket - ))); - } - - let source_key = source_key.ok_or_bad_request("No source key specified")?; - - let source_object = garage - .object_table - .get(&source_bucket_id, &source_key.to_string()) - .await? - .ok_or(Error::NoSuchKey)?; - - Ok(source_object) -} - -fn extract_source_info( - source_object: &Object, -) -> Result<(&ObjectVersion, &ObjectVersionData, &ObjectVersionMeta), Error> { - let source_version = source_object - .versions() - .iter() - .rev() - .find(|v| v.is_complete()) - .ok_or(Error::NoSuchKey)?; - - let source_version_data = match &source_version.state { - ObjectVersionState::Complete(x) => x, - _ => unreachable!(), - }; - - let source_version_meta = match source_version_data { - ObjectVersionData::DeleteMarker => { - return Err(Error::NoSuchKey); - } - ObjectVersionData::Inline(meta, _bytes) => meta, - ObjectVersionData::FirstBlock(meta, _fbh) => meta, - }; - - Ok((source_version, source_version_data, source_version_meta)) -} - -struct CopyPreconditionHeaders { - copy_source_if_match: Option>, - copy_source_if_modified_since: Option, - copy_source_if_none_match: Option>, - copy_source_if_unmodified_since: Option, -} - -impl CopyPreconditionHeaders { - fn parse(req: &Request) -> Result { - Ok(Self { - copy_source_if_match: req - .headers() - .get("x-amz-copy-source-if-match") - .map(|x| x.to_str()) - .transpose()? - .map(|x| { - x.split(',') - .map(|m| m.trim().trim_matches('"').to_string()) - .collect::>() - }), - copy_source_if_modified_since: req - .headers() - .get("x-amz-copy-source-if-modified-since") - .map(|x| x.to_str()) - .transpose()? - .map(httpdate::parse_http_date) - .transpose() - .ok_or_bad_request("Invalid date in x-amz-copy-source-if-modified-since")?, - copy_source_if_none_match: req - .headers() - .get("x-amz-copy-source-if-none-match") - .map(|x| x.to_str()) - .transpose()? - .map(|x| { - x.split(',') - .map(|m| m.trim().trim_matches('"').to_string()) - .collect::>() - }), - copy_source_if_unmodified_since: req - .headers() - .get("x-amz-copy-source-if-unmodified-since") - .map(|x| x.to_str()) - .transpose()? - .map(httpdate::parse_http_date) - .transpose() - .ok_or_bad_request("Invalid date in x-amz-copy-source-if-unmodified-since")?, - }) - } - - fn check(&self, v: &ObjectVersion, etag: &str) -> Result<(), Error> { - let v_date = UNIX_EPOCH + Duration::from_millis(v.timestamp); - - let ok = match ( - &self.copy_source_if_match, - &self.copy_source_if_unmodified_since, - &self.copy_source_if_none_match, - &self.copy_source_if_modified_since, - ) { - // TODO I'm not sure all of the conditions are evaluated correctly here - - // If we have both if-match and if-unmodified-since, - // basically we don't care about if-unmodified-since, - // because in the spec it says that if if-match evaluates to - // true but if-unmodified-since evaluates to false, - // the copy is still done. - (Some(im), _, None, None) => im.iter().any(|x| x == etag || x == "*"), - (None, Some(ius), None, None) => v_date <= *ius, - - // If we have both if-none-match and if-modified-since, - // then both of the two conditions must evaluate to true - (None, None, Some(inm), Some(ims)) => { - !inm.iter().any(|x| x == etag || x == "*") && v_date > *ims - } - (None, None, Some(inm), None) => !inm.iter().any(|x| x == etag || x == "*"), - (None, None, None, Some(ims)) => v_date > *ims, - (None, None, None, None) => true, - _ => { - return Err(Error::BadRequest( - "Invalid combination of x-amz-copy-source-if-xxxxx headers".into(), - )) - } - }; - - if ok { - Ok(()) - } else { - Err(Error::PreconditionFailed) - } - } -} - -type BlockStreamItemOk = (Vec, Option); -type BlockStreamItem = Result; - -struct Defragmenter> { - block_size: usize, - block_stream: Pin>>, - buffer: Vec, - hash: Option, -} - -impl> Defragmenter { - fn new(block_size: usize, block_stream: Pin>>) -> Self { - Self { - block_size, - block_stream, - buffer: vec![], - hash: None, - } - } - - async fn next(&mut self) -> BlockStreamItem { - // Fill buffer while we can - while let Some(res) = self.block_stream.as_mut().peek().await { - let (peeked_next_block, _) = match res { - Ok(t) => t, - Err(_) => { - self.block_stream.next().await.unwrap()?; - unreachable!() - } - }; - - if self.buffer.is_empty() { - let (next_block, next_block_hash) = self.block_stream.next().await.unwrap()?; - self.buffer = next_block; - self.hash = next_block_hash; - } else if self.buffer.len() + peeked_next_block.len() > self.block_size { - break; - } else { - let (next_block, _) = self.block_stream.next().await.unwrap()?; - self.buffer.extend(next_block); - self.hash = None; - } - } - - Ok((std::mem::take(&mut self.buffer), self.hash.take())) - } -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct CopyObjectResult { - #[serde(rename = "LastModified")] - pub last_modified: s3_xml::Value, - #[serde(rename = "ETag")] - pub etag: s3_xml::Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct CopyPartResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "LastModified")] - pub last_modified: s3_xml::Value, - #[serde(rename = "ETag")] - pub etag: s3_xml::Value, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::s3_xml::to_xml_with_header; - - #[test] - fn copy_object_result() -> Result<(), Error> { - let copy_result = CopyObjectResult { - last_modified: s3_xml::Value(msec_to_rfc3339(0)), - etag: s3_xml::Value("\"9b2cf535f27731c974343645a3985328\"".to_string()), - }; - assert_eq!( - to_xml_with_header(©_result)?, - "\ -\ - 1970-01-01T00:00:00.000Z\ - "9b2cf535f27731c974343645a3985328"\ -\ - " - ); - Ok(()) - } - - #[test] - fn serialize_copy_part_result() -> Result<(), Error> { - let expected_retval = "\ -\ - 2011-04-11T20:34:56.000Z\ - "9b2cf535f27731c974343645a3985328"\ -"; - let v = CopyPartResult { - xmlns: (), - last_modified: s3_xml::Value("2011-04-11T20:34:56.000Z".into()), - etag: s3_xml::Value("\"9b2cf535f27731c974343645a3985328\"".into()), - }; - println!("{}", to_xml_with_header(&v)?); - - assert_eq!(to_xml_with_header(&v)?, expected_retval); - - Ok(()) - } -} diff --git a/src/api/s3_cors.rs b/src/api/s3_cors.rs deleted file mode 100644 index ab77e23a..00000000 --- a/src/api/s3_cors.rs +++ /dev/null @@ -1,442 +0,0 @@ -use quick_xml::de::from_reader; -use std::sync::Arc; - -use http::header::{ - ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, - ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD, -}; -use hyper::{header::HeaderName, Body, Method, Request, Response, StatusCode}; - -use serde::{Deserialize, Serialize}; - -use crate::error::*; -use crate::s3_xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; -use crate::signature::verify_signed_content; - -use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule}; -use garage_model::garage::Garage; -use garage_table::*; -use garage_util::data::*; - -pub async fn handle_get_cors(bucket: &Bucket) -> Result, Error> { - let param = bucket - .params() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - if let Some(cors) = param.cors_config.get() { - let wc = CorsConfiguration { - xmlns: (), - cors_rules: cors - .iter() - .map(CorsRule::from_garage_cors_rule) - .collect::>(), - }; - let xml = to_xml_with_header(&wc)?; - Ok(Response::builder() - .status(StatusCode::OK) - .header(http::header::CONTENT_TYPE, "application/xml") - .body(Body::from(xml))?) - } else { - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) - } -} - -pub async fn handle_delete_cors( - garage: Arc, - bucket_id: Uuid, -) -> Result, Error> { - let mut bucket = garage - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .ok_or(Error::NoSuchBucket)?; - - let param = bucket - .params_mut() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - param.cors_config.update(None); - garage.bucket_table.insert(&bucket).await?; - - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) -} - -pub async fn handle_put_cors( - garage: Arc, - bucket_id: Uuid, - req: Request, - content_sha256: Option, -) -> Result, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let mut bucket = garage - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .ok_or(Error::NoSuchBucket)?; - - let param = bucket - .params_mut() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - let conf: CorsConfiguration = from_reader(&body as &[u8])?; - conf.validate()?; - - param - .cors_config - .update(Some(conf.into_garage_cors_config()?)); - garage.bucket_table.insert(&bucket).await?; - - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::empty())?) -} - -pub async fn handle_options_s3api( - garage: Arc, - req: &Request, - bucket_name: Option, -) -> Result, Error> { - // FIXME: CORS rules of buckets with local aliases are - // not taken into account. - - // If the bucket name is a global bucket name, - // we try to apply the CORS rules of that bucket. - // If a user has a local bucket name that has - // the same name, its CORS rules won't be applied - // and will be shadowed by the rules of the globally - // existing bucket (but this is inevitable because - // OPTIONS calls are not auhtenticated). - if let Some(bn) = bucket_name { - let helper = garage.bucket_helper(); - let bucket_id = helper.resolve_global_bucket_name(&bn).await?; - if let Some(id) = bucket_id { - let bucket = garage - .bucket_table - .get(&EmptyKey, &id) - .await? - .filter(|b| !b.state.is_deleted()) - .ok_or(Error::NoSuchBucket)?; - handle_options_for_bucket(req, &bucket) - } else { - // If there is a bucket name in the request, but that name - // does not correspond to a global alias for a bucket, - // then it's either a non-existing bucket or a local bucket. - // We have no way of knowing, because the request is not - // authenticated and thus we can't resolve local aliases. - // We take the permissive approach of allowing everything, - // because we don't want to prevent web apps that use - // local bucket names from making API calls. - Ok(Response::builder() - .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header(ACCESS_CONTROL_ALLOW_METHODS, "*") - .status(StatusCode::OK) - .body(Body::empty())?) - } - } else { - // If there is no bucket name in the request, - // we are doing a ListBuckets call, which we want to allow - // for all origins. - Ok(Response::builder() - .header(ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header(ACCESS_CONTROL_ALLOW_METHODS, "GET") - .status(StatusCode::OK) - .body(Body::empty())?) - } -} - -pub fn handle_options_for_bucket( - req: &Request, - bucket: &Bucket, -) -> Result, Error> { - let origin = req - .headers() - .get("Origin") - .ok_or_bad_request("Missing Origin header")? - .to_str()?; - let request_method = req - .headers() - .get(ACCESS_CONTROL_REQUEST_METHOD) - .ok_or_bad_request("Missing Access-Control-Request-Method header")? - .to_str()?; - let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) { - Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::>(), - None => vec![], - }; - - if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { - let matching_rule = cors_config - .iter() - .find(|rule| cors_rule_matches(rule, origin, request_method, request_headers.iter())); - if let Some(rule) = matching_rule { - let mut resp = Response::builder() - .status(StatusCode::OK) - .body(Body::empty())?; - add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?; - return Ok(resp); - } - } - - Err(Error::Forbidden("This CORS request is not allowed.".into())) -} - -pub fn find_matching_cors_rule<'a>( - bucket: &'a Bucket, - req: &Request, -) -> Result, Error> { - if let Some(cors_config) = bucket.params().unwrap().cors_config.get() { - if let Some(origin) = req.headers().get("Origin") { - let origin = origin.to_str()?; - let request_headers = match req.headers().get(ACCESS_CONTROL_REQUEST_HEADERS) { - Some(h) => h.to_str()?.split(',').map(|h| h.trim()).collect::>(), - None => vec![], - }; - return Ok(cors_config.iter().find(|rule| { - cors_rule_matches(rule, origin, req.method().as_ref(), request_headers.iter()) - })); - } - } - Ok(None) -} - -fn cors_rule_matches<'a, HI, S>( - rule: &GarageCorsRule, - origin: &'a str, - method: &'a str, - mut request_headers: HI, -) -> bool -where - HI: Iterator, - S: AsRef, -{ - rule.allow_origins.iter().any(|x| x == "*" || x == origin) - && rule.allow_methods.iter().any(|x| x == "*" || x == method) - && request_headers.all(|h| { - rule.allow_headers - .iter() - .any(|x| x == "*" || x == h.as_ref()) - }) -} - -pub fn add_cors_headers( - resp: &mut Response, - rule: &GarageCorsRule, -) -> Result<(), http::header::InvalidHeaderValue> { - let h = resp.headers_mut(); - h.insert( - ACCESS_CONTROL_ALLOW_ORIGIN, - rule.allow_origins.join(", ").parse()?, - ); - h.insert( - ACCESS_CONTROL_ALLOW_METHODS, - rule.allow_methods.join(", ").parse()?, - ); - h.insert( - ACCESS_CONTROL_ALLOW_HEADERS, - rule.allow_headers.join(", ").parse()?, - ); - h.insert( - ACCESS_CONTROL_EXPOSE_HEADERS, - rule.expose_headers.join(", ").parse()?, - ); - Ok(()) -} - -// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ---- - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[serde(rename = "CORSConfiguration")] -pub struct CorsConfiguration { - #[serde(serialize_with = "xmlns_tag", skip_deserializing)] - pub xmlns: (), - #[serde(rename = "CORSRule")] - pub cors_rules: Vec, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct CorsRule { - #[serde(rename = "ID")] - pub id: Option, - #[serde(rename = "MaxAgeSeconds")] - pub max_age_seconds: Option, - #[serde(rename = "AllowedOrigin")] - pub allowed_origins: Vec, - #[serde(rename = "AllowedMethod")] - pub allowed_methods: Vec, - #[serde(rename = "AllowedHeader", default)] - pub allowed_headers: Vec, - #[serde(rename = "ExposeHeader", default)] - pub expose_headers: Vec, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct AllowedMethod { - #[serde(rename = "AllowedMethod")] - pub allowed_method: Value, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct AllowedHeader { - #[serde(rename = "AllowedHeader")] - pub allowed_header: Value, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct ExposeHeader { - #[serde(rename = "ExposeHeader")] - pub expose_header: Value, -} - -impl CorsConfiguration { - pub fn validate(&self) -> Result<(), Error> { - for r in self.cors_rules.iter() { - r.validate()?; - } - Ok(()) - } - - pub fn into_garage_cors_config(self) -> Result, Error> { - Ok(self - .cors_rules - .iter() - .map(CorsRule::to_garage_cors_rule) - .collect()) - } -} - -impl CorsRule { - pub fn validate(&self) -> Result<(), Error> { - for method in self.allowed_methods.iter() { - method - .0 - .parse::() - .ok_or_bad_request("Invalid CORSRule method")?; - } - for header in self - .allowed_headers - .iter() - .chain(self.expose_headers.iter()) - { - header - .0 - .parse::() - .ok_or_bad_request("Invalid HTTP header name")?; - } - Ok(()) - } - - pub fn to_garage_cors_rule(&self) -> GarageCorsRule { - let convert_vec = - |vval: &[Value]| vval.iter().map(|x| x.0.to_owned()).collect::>(); - GarageCorsRule { - id: self.id.as_ref().map(|x| x.0.to_owned()), - max_age_seconds: self.max_age_seconds.as_ref().map(|x| x.0 as u64), - allow_origins: convert_vec(&self.allowed_origins), - allow_methods: convert_vec(&self.allowed_methods), - allow_headers: convert_vec(&self.allowed_headers), - expose_headers: convert_vec(&self.expose_headers), - } - } - - pub fn from_garage_cors_rule(rule: &GarageCorsRule) -> Self { - let convert_vec = |vval: &[String]| { - vval.iter() - .map(|x| Value(x.clone())) - .collect::>() - }; - Self { - id: rule.id.as_ref().map(|x| Value(x.clone())), - max_age_seconds: rule.max_age_seconds.map(|x| IntValue(x as i64)), - allowed_origins: convert_vec(&rule.allow_origins), - allowed_methods: convert_vec(&rule.allow_methods), - allowed_headers: convert_vec(&rule.allow_headers), - expose_headers: convert_vec(&rule.expose_headers), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use quick_xml::de::from_str; - - #[test] - fn test_deserialize() -> Result<(), Error> { - let message = r#" - - - http://www.example.com - - PUT - POST - DELETE - - * - - - * - GET - - - qsdfjklm - 12345 - https://perdu.com - - GET - DELETE - * - * - -"#; - let conf: CorsConfiguration = from_str(message).unwrap(); - let ref_value = CorsConfiguration { - xmlns: (), - cors_rules: vec![ - CorsRule { - id: None, - max_age_seconds: None, - allowed_origins: vec!["http://www.example.com".into()], - allowed_methods: vec!["PUT".into(), "POST".into(), "DELETE".into()], - allowed_headers: vec!["*".into()], - expose_headers: vec![], - }, - CorsRule { - id: None, - max_age_seconds: None, - allowed_origins: vec!["*".into()], - allowed_methods: vec!["GET".into()], - allowed_headers: vec![], - expose_headers: vec![], - }, - CorsRule { - id: Some("qsdfjklm".into()), - max_age_seconds: Some(IntValue(12345)), - allowed_origins: vec!["https://perdu.com".into()], - allowed_methods: vec!["GET".into(), "DELETE".into()], - allowed_headers: vec!["*".into()], - expose_headers: vec!["*".into()], - }, - ], - }; - assert_eq! { - ref_value, - conf - }; - - let message2 = to_xml_with_header(&ref_value)?; - - let cleanup = |c: &str| c.replace(char::is_whitespace, ""); - assert_eq!(cleanup(message), cleanup(&message2)); - - Ok(()) - } -} diff --git a/src/api/s3_delete.rs b/src/api/s3_delete.rs deleted file mode 100644 index b243d982..00000000 --- a/src/api/s3_delete.rs +++ /dev/null @@ -1,170 +0,0 @@ -use std::sync::Arc; - -use hyper::{Body, Request, Response, StatusCode}; - -use garage_util::data::*; -use garage_util::time::*; - -use garage_model::garage::Garage; -use garage_model::object_table::*; - -use crate::error::*; -use crate::s3_xml; -use crate::signature::verify_signed_content; - -async fn handle_delete_internal( - garage: &Garage, - bucket_id: Uuid, - key: &str, -) -> Result<(Uuid, Uuid), Error> { - let object = garage - .object_table - .get(&bucket_id, &key.to_string()) - .await? - .ok_or(Error::NoSuchKey)?; // No need to delete - - let interesting_versions = object.versions().iter().filter(|v| { - !matches!( - v.state, - ObjectVersionState::Aborted - | ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) - ) - }); - - let mut version_to_delete = None; - let mut timestamp = now_msec(); - for v in interesting_versions { - if v.timestamp + 1 > timestamp || version_to_delete.is_none() { - version_to_delete = Some(v.uuid); - } - timestamp = std::cmp::max(timestamp, v.timestamp + 1); - } - - let deleted_version = version_to_delete.ok_or(Error::NoSuchKey)?; - - let version_uuid = gen_uuid(); - - let object = Object::new( - bucket_id, - key.into(), - vec![ObjectVersion { - uuid: version_uuid, - timestamp, - state: ObjectVersionState::Complete(ObjectVersionData::DeleteMarker), - }], - ); - - garage.object_table.insert(&object).await?; - - Ok((deleted_version, version_uuid)) -} - -pub async fn handle_delete( - garage: Arc, - bucket_id: Uuid, - key: &str, -) -> Result, Error> { - let (_deleted_version, delete_marker_version) = - handle_delete_internal(&garage, bucket_id, key).await?; - - Ok(Response::builder() - .header("x-amz-version-id", hex::encode(delete_marker_version)) - .status(StatusCode::NO_CONTENT) - .body(Body::from(vec![])) - .unwrap()) -} - -pub async fn handle_delete_objects( - garage: Arc, - bucket_id: Uuid, - req: Request, - content_sha256: Option, -) -> Result, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let cmd_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; - let cmd = parse_delete_objects_xml(&cmd_xml).ok_or_bad_request("Invalid delete XML query")?; - - let mut ret_deleted = Vec::new(); - let mut ret_errors = Vec::new(); - - for obj in cmd.objects.iter() { - match handle_delete_internal(&garage, bucket_id, &obj.key).await { - Ok((deleted_version, delete_marker_version)) => { - if cmd.quiet { - continue; - } - ret_deleted.push(s3_xml::Deleted { - key: s3_xml::Value(obj.key.clone()), - version_id: s3_xml::Value(hex::encode(deleted_version)), - delete_marker_version_id: s3_xml::Value(hex::encode(delete_marker_version)), - }); - } - Err(e) => { - ret_errors.push(s3_xml::DeleteError { - code: s3_xml::Value(e.aws_code().to_string()), - key: Some(s3_xml::Value(obj.key.clone())), - message: s3_xml::Value(format!("{}", e)), - version_id: None, - }); - } - } - } - - let xml = s3_xml::to_xml_with_header(&s3_xml::DeleteResult { - xmlns: (), - deleted: ret_deleted, - errors: ret_errors, - })?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml))?) -} - -struct DeleteRequest { - quiet: bool, - objects: Vec, -} - -struct DeleteObject { - key: String, -} - -fn parse_delete_objects_xml(xml: &roxmltree::Document) -> Option { - let mut ret = DeleteRequest { - quiet: false, - objects: vec![], - }; - - let root = xml.root(); - let delete = root.first_child()?; - - if !delete.has_tag_name("Delete") { - return None; - } - - for item in delete.children() { - if item.has_tag_name("Object") { - let key = item.children().find(|e| e.has_tag_name("Key"))?; - let key_str = key.text()?; - ret.objects.push(DeleteObject { - key: key_str.to_string(), - }); - } else if item.has_tag_name("Quiet") { - if item.text()? == "true" { - ret.quiet = true; - } else { - ret.quiet = false; - } - } else { - return None; - } - } - - Some(ret) -} diff --git a/src/api/s3_get.rs b/src/api/s3_get.rs deleted file mode 100644 index 7f647e15..00000000 --- a/src/api/s3_get.rs +++ /dev/null @@ -1,461 +0,0 @@ -//! Function related to GET and HEAD requests -use std::sync::Arc; -use std::time::{Duration, UNIX_EPOCH}; - -use futures::stream::*; -use http::header::{ - ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, IF_MODIFIED_SINCE, - IF_NONE_MATCH, LAST_MODIFIED, RANGE, -}; -use hyper::body::Bytes; -use hyper::{Body, Request, Response, StatusCode}; - -use garage_table::EmptyKey; -use garage_util::data::*; - -use garage_model::garage::Garage; -use garage_model::object_table::*; -use garage_model::version_table::*; - -use crate::error::*; - -const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count"; - -fn object_headers( - version: &ObjectVersion, - version_meta: &ObjectVersionMeta, -) -> http::response::Builder { - debug!("Version meta: {:?}", version_meta); - - let date = UNIX_EPOCH + Duration::from_millis(version.timestamp); - let date_str = httpdate::fmt_http_date(date); - - let mut resp = Response::builder() - .header(CONTENT_TYPE, version_meta.headers.content_type.to_string()) - .header(LAST_MODIFIED, date_str) - .header(ACCEPT_RANGES, "bytes".to_string()); - - if !version_meta.etag.is_empty() { - resp = resp.header(ETAG, format!("\"{}\"", version_meta.etag)); - } - - for (k, v) in version_meta.headers.other.iter() { - resp = resp.header(k, v.to_string()); - } - - resp -} - -fn try_answer_cached( - version: &ObjectVersion, - version_meta: &ObjectVersionMeta, - req: &Request, -) -> Option> { - // It is possible, and is even usually the case, [that both If-None-Match and - // If-Modified-Since] are present in a request. In this situation If-None-Match takes - // precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational - // being that etag based matching is more accurate, it has no issue with sub-second precision - // for instance (in case of very fast updates) - let cached = if let Some(none_match) = req.headers().get(IF_NONE_MATCH) { - let none_match = none_match.to_str().ok()?; - let expected = format!("\"{}\"", version_meta.etag); - let found = none_match - .split(',') - .map(str::trim) - .any(|etag| etag == expected || etag == "\"*\""); - found - } else if let Some(modified_since) = req.headers().get(IF_MODIFIED_SINCE) { - let modified_since = modified_since.to_str().ok()?; - let client_date = httpdate::parse_http_date(modified_since).ok()?; - let server_date = UNIX_EPOCH + Duration::from_millis(version.timestamp); - client_date >= server_date - } else { - false - }; - - if cached { - Some( - Response::builder() - .status(StatusCode::NOT_MODIFIED) - .body(Body::empty()) - .unwrap(), - ) - } else { - None - } -} - -/// Handle HEAD request -pub async fn handle_head( - garage: Arc, - req: &Request, - bucket_id: Uuid, - key: &str, - part_number: Option, -) -> Result, Error> { - let object = garage - .object_table - .get(&bucket_id, &key.to_string()) - .await? - .ok_or(Error::NoSuchKey)?; - - let object_version = object - .versions() - .iter() - .rev() - .find(|v| v.is_data()) - .ok_or(Error::NoSuchKey)?; - - let version_data = match &object_version.state { - ObjectVersionState::Complete(c) => c, - _ => unreachable!(), - }; - - let version_meta = match version_data { - ObjectVersionData::Inline(meta, _) => meta, - ObjectVersionData::FirstBlock(meta, _) => meta, - _ => unreachable!(), - }; - - if let Some(cached) = try_answer_cached(object_version, version_meta, req) { - return Ok(cached); - } - - if let Some(pn) = part_number { - match version_data { - ObjectVersionData::Inline(_, bytes) => { - if pn != 1 { - return Err(Error::InvalidPart); - } - Ok(object_headers(object_version, version_meta) - .header(CONTENT_LENGTH, format!("{}", bytes.len())) - .header( - CONTENT_RANGE, - format!("bytes 0-{}/{}", bytes.len() - 1, bytes.len()), - ) - .header(X_AMZ_MP_PARTS_COUNT, "1") - .status(StatusCode::PARTIAL_CONTENT) - .body(Body::empty())?) - } - ObjectVersionData::FirstBlock(_, _) => { - let version = garage - .version_table - .get(&object_version.uuid, &EmptyKey) - .await? - .ok_or(Error::NoSuchKey)?; - - let (part_offset, part_end) = - calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; - let n_parts = version.parts_etags.items().len(); - - Ok(object_headers(object_version, version_meta) - .header(CONTENT_LENGTH, format!("{}", part_end - part_offset)) - .header( - CONTENT_RANGE, - format!( - "bytes {}-{}/{}", - part_offset, - part_end - 1, - version_meta.size - ), - ) - .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) - .status(StatusCode::PARTIAL_CONTENT) - .body(Body::empty())?) - } - _ => unreachable!(), - } - } else { - Ok(object_headers(object_version, version_meta) - .header(CONTENT_LENGTH, format!("{}", version_meta.size)) - .status(StatusCode::OK) - .body(Body::empty())?) - } -} - -/// Handle GET request -pub async fn handle_get( - garage: Arc, - req: &Request, - bucket_id: Uuid, - key: &str, - part_number: Option, -) -> Result, Error> { - let object = garage - .object_table - .get(&bucket_id, &key.to_string()) - .await? - .ok_or(Error::NoSuchKey)?; - - let last_v = object - .versions() - .iter() - .rev() - .find(|v| v.is_complete()) - .ok_or(Error::NoSuchKey)?; - - let last_v_data = match &last_v.state { - ObjectVersionState::Complete(x) => x, - _ => unreachable!(), - }; - let last_v_meta = match last_v_data { - ObjectVersionData::DeleteMarker => return Err(Error::NoSuchKey), - ObjectVersionData::Inline(meta, _) => meta, - ObjectVersionData::FirstBlock(meta, _) => meta, - }; - - if let Some(cached) = try_answer_cached(last_v, last_v_meta, req) { - return Ok(cached); - } - - match (part_number, parse_range_header(req, last_v_meta.size)?) { - (Some(_), Some(_)) => { - return Err(Error::BadRequest( - "Cannot specify both partNumber and Range header".into(), - )); - } - (Some(pn), None) => { - return handle_get_part(garage, last_v, last_v_data, last_v_meta, pn).await; - } - (None, Some(range)) => { - return handle_get_range( - garage, - last_v, - last_v_data, - last_v_meta, - range.start, - range.start + range.length, - ) - .await; - } - (None, None) => (), - } - - let resp_builder = object_headers(last_v, last_v_meta) - .header(CONTENT_LENGTH, format!("{}", last_v_meta.size)) - .status(StatusCode::OK); - - match &last_v_data { - ObjectVersionData::DeleteMarker => unreachable!(), - ObjectVersionData::Inline(_, bytes) => { - let body: Body = Body::from(bytes.to_vec()); - Ok(resp_builder.body(body)?) - } - ObjectVersionData::FirstBlock(_, first_block_hash) => { - let read_first_block = garage.block_manager.rpc_get_block(first_block_hash); - let get_next_blocks = garage.version_table.get(&last_v.uuid, &EmptyKey); - - let (first_block, version) = futures::try_join!(read_first_block, get_next_blocks)?; - let version = version.ok_or(Error::NoSuchKey)?; - - let mut blocks = version - .blocks - .items() - .iter() - .map(|(_, vb)| (vb.hash, None)) - .collect::>(); - blocks[0].1 = Some(first_block); - - let body_stream = futures::stream::iter(blocks) - .map(move |(hash, data_opt)| { - let garage = garage.clone(); - async move { - if let Some(data) = data_opt { - Ok(Bytes::from(data)) - } else { - garage - .block_manager - .rpc_get_block(&hash) - .await - .map(Bytes::from) - } - } - }) - .buffered(2); - - let body = hyper::body::Body::wrap_stream(body_stream); - Ok(resp_builder.body(body)?) - } - } -} - -async fn handle_get_range( - garage: Arc, - version: &ObjectVersion, - version_data: &ObjectVersionData, - version_meta: &ObjectVersionMeta, - begin: u64, - end: u64, -) -> Result, Error> { - let resp_builder = object_headers(version, version_meta) - .header(CONTENT_LENGTH, format!("{}", end - begin)) - .header( - CONTENT_RANGE, - format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), - ) - .status(StatusCode::PARTIAL_CONTENT); - - match &version_data { - ObjectVersionData::DeleteMarker => unreachable!(), - ObjectVersionData::Inline(_meta, bytes) => { - if end as usize <= bytes.len() { - let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec()); - Ok(resp_builder.body(body)?) - } else { - None.ok_or_internal_error( - "Requested range not present in inline bytes when it should have been", - ) - } - } - ObjectVersionData::FirstBlock(_meta, _first_block_hash) => { - let version = garage - .version_table - .get(&version.uuid, &EmptyKey) - .await? - .ok_or(Error::NoSuchKey)?; - - let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); - Ok(resp_builder.body(body)?) - } - } -} - -async fn handle_get_part( - garage: Arc, - object_version: &ObjectVersion, - version_data: &ObjectVersionData, - version_meta: &ObjectVersionMeta, - part_number: u64, -) -> Result, Error> { - let resp_builder = - object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT); - - match version_data { - ObjectVersionData::Inline(_, bytes) => { - if part_number != 1 { - return Err(Error::InvalidPart); - } - Ok(resp_builder - .header(CONTENT_LENGTH, format!("{}", bytes.len())) - .header( - CONTENT_RANGE, - format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()), - ) - .header(X_AMZ_MP_PARTS_COUNT, "1") - .body(Body::from(bytes.to_vec()))?) - } - ObjectVersionData::FirstBlock(_, _) => { - let version = garage - .version_table - .get(&object_version.uuid, &EmptyKey) - .await? - .ok_or(Error::NoSuchKey)?; - - let (begin, end) = - calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; - let n_parts = version.parts_etags.items().len(); - - let body = body_from_blocks_range(garage, version.blocks.items(), begin, end); - - Ok(resp_builder - .header(CONTENT_LENGTH, format!("{}", end - begin)) - .header( - CONTENT_RANGE, - format!("bytes {}-{}/{}", begin, end - 1, version_meta.size), - ) - .header(X_AMZ_MP_PARTS_COUNT, format!("{}", n_parts)) - .body(body)?) - } - _ => unreachable!(), - } -} - -fn parse_range_header( - req: &Request, - total_size: u64, -) -> Result, Error> { - let range = match req.headers().get(RANGE) { - Some(range) => { - let range_str = range.to_str()?; - let mut ranges = - http_range::HttpRange::parse(range_str, total_size).map_err(|e| (e, total_size))?; - if ranges.len() > 1 { - // garage does not support multi-range requests yet, so we respond with the entire - // object when multiple ranges are requested - None - } else { - ranges.pop() - } - } - None => None, - }; - Ok(range) -} - -fn calculate_part_bounds(v: &Version, part_number: u64) -> Option<(u64, u64)> { - let mut offset = 0; - for (i, (bk, bv)) in v.blocks.items().iter().enumerate() { - if bk.part_number == part_number { - let size: u64 = v.blocks.items()[i..] - .iter() - .take_while(|(k, _)| k.part_number == part_number) - .map(|(_, v)| v.size) - .sum(); - return Some((offset, offset + size)); - } - offset += bv.size; - } - None -} - -fn body_from_blocks_range( - garage: Arc, - all_blocks: &[(VersionBlockKey, VersionBlock)], - begin: u64, - end: u64, -) -> Body { - // We will store here the list of blocks that have an intersection with the requested - // range, as well as their "true offset", which is their actual offset in the complete - // file (whereas block.offset designates the offset of the block WITHIN THE PART - // block.part_number, which is not the same in the case of a multipart upload) - let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min( - all_blocks.len(), - 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize, - )); - let mut true_offset = 0; - for (_, b) in all_blocks.iter() { - if true_offset >= end { - break; - } - // Keep only blocks that have an intersection with the requested range - if true_offset < end && true_offset + b.size > begin { - blocks.push((*b, true_offset)); - } - true_offset += b.size; - } - - let body_stream = futures::stream::iter(blocks) - .map(move |(block, true_offset)| { - let garage = garage.clone(); - async move { - let data = garage.block_manager.rpc_get_block(&block.hash).await?; - let data = Bytes::from(data); - let start_in_block = if true_offset > begin { - 0 - } else { - begin - true_offset - }; - let end_in_block = if true_offset + block.size < end { - block.size - } else { - end - true_offset - }; - Result::::Ok( - data.slice(start_in_block as usize..end_in_block as usize), - ) - } - }) - .buffered(2); - - hyper::body::Body::wrap_stream(body_stream) -} diff --git a/src/api/s3_list.rs b/src/api/s3_list.rs deleted file mode 100644 index 5852fc1b..00000000 --- a/src/api/s3_list.rs +++ /dev/null @@ -1,1383 +0,0 @@ -use std::cmp::Ordering; -use std::collections::{BTreeMap, BTreeSet}; -use std::iter::{Iterator, Peekable}; -use std::sync::Arc; - -use hyper::{Body, Response}; - -use garage_util::data::*; -use garage_util::error::Error as GarageError; -use garage_util::time::*; - -use garage_model::garage::Garage; -use garage_model::object_table::*; -use garage_model::version_table::Version; - -use garage_table::EmptyKey; - -use crate::encoding::*; -use crate::error::*; -use crate::s3_put; -use crate::s3_xml; - -const DUMMY_NAME: &str = "Dummy Key"; -const DUMMY_KEY: &str = "GKDummyKey"; - -#[derive(Debug)] -pub struct ListQueryCommon { - pub bucket_name: String, - pub bucket_id: Uuid, - pub delimiter: Option, - pub page_size: usize, - pub prefix: String, - pub urlencode_resp: bool, -} - -#[derive(Debug)] -pub struct ListObjectsQuery { - pub is_v2: bool, - pub marker: Option, - pub continuation_token: Option, - pub start_after: Option, - pub common: ListQueryCommon, -} - -#[derive(Debug)] -pub struct ListMultipartUploadsQuery { - pub key_marker: Option, - pub upload_id_marker: Option, - pub common: ListQueryCommon, -} - -#[derive(Debug)] -pub struct ListPartsQuery { - pub bucket_name: String, - pub bucket_id: Uuid, - pub key: String, - pub upload_id: String, - pub part_number_marker: Option, - pub max_parts: u64, -} - -pub async fn handle_list( - garage: Arc, - query: &ListObjectsQuery, -) -> Result, Error> { - let io = |bucket, key, count| { - let t = &garage.object_table; - async move { - t.get_range(&bucket, key, Some(ObjectFilter::IsData), count) - .await - } - }; - - debug!("ListObjects {:?}", query); - let mut acc = query.build_accumulator(); - let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?; - - let result = s3_xml::ListBucketResult { - xmlns: (), - // Sending back request information - name: s3_xml::Value(query.common.bucket_name.to_string()), - prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp), - max_keys: s3_xml::IntValue(query.common.page_size as i64), - delimiter: query - .common - .delimiter - .as_ref() - .map(|x| uriencode_maybe(x, query.common.urlencode_resp)), - encoding_type: match query.common.urlencode_resp { - true => Some(s3_xml::Value("url".to_string())), - false => None, - }, - marker: match (!query.is_v2, &query.marker) { - (true, Some(k)) => Some(uriencode_maybe(k, query.common.urlencode_resp)), - _ => None, - }, - start_after: match (query.is_v2, &query.start_after) { - (true, Some(sa)) => Some(uriencode_maybe(sa, query.common.urlencode_resp)), - _ => None, - }, - continuation_token: match (query.is_v2, &query.continuation_token) { - (true, Some(ct)) => Some(s3_xml::Value(ct.to_string())), - _ => None, - }, - - // Pagination - is_truncated: s3_xml::Value(format!("{}", pagination.is_some())), - key_count: Some(s3_xml::IntValue( - acc.keys.len() as i64 + acc.common_prefixes.len() as i64, - )), - next_marker: match (!query.is_v2, &pagination) { - (true, Some(RangeBegin::AfterKey { key: k })) - | ( - true, - Some(RangeBegin::IncludingKey { - fallback_key: Some(k), - .. - }), - ) => Some(uriencode_maybe(k, query.common.urlencode_resp)), - _ => None, - }, - next_continuation_token: match (query.is_v2, &pagination) { - (true, Some(RangeBegin::AfterKey { key })) => Some(s3_xml::Value(format!( - "]{}", - base64::encode(key.as_bytes()) - ))), - (true, Some(RangeBegin::IncludingKey { key, .. })) => Some(s3_xml::Value(format!( - "[{}", - base64::encode(key.as_bytes()) - ))), - _ => None, - }, - - // Body - contents: acc - .keys - .iter() - .map(|(key, info)| s3_xml::ListBucketItem { - key: uriencode_maybe(key, query.common.urlencode_resp), - last_modified: s3_xml::Value(msec_to_rfc3339(info.last_modified)), - size: s3_xml::IntValue(info.size as i64), - etag: s3_xml::Value(format!("\"{}\"", info.etag)), - storage_class: s3_xml::Value("STANDARD".to_string()), - }) - .collect(), - common_prefixes: acc - .common_prefixes - .iter() - .map(|pfx| s3_xml::CommonPrefix { - prefix: uriencode_maybe(pfx, query.common.urlencode_resp), - }) - .collect(), - }; - - let xml = s3_xml::to_xml_with_header(&result)?; - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml.into_bytes()))?) -} - -pub async fn handle_list_multipart_upload( - garage: Arc, - query: &ListMultipartUploadsQuery, -) -> Result, Error> { - let io = |bucket, key, count| { - let t = &garage.object_table; - async move { - t.get_range(&bucket, key, Some(ObjectFilter::IsUploading), count) - .await - } - }; - - debug!("ListMultipartUploads {:?}", query); - let mut acc = query.build_accumulator(); - let pagination = fetch_list_entries(&query.common, query.begin()?, &mut acc, &io).await?; - - let result = s3_xml::ListMultipartUploadsResult { - xmlns: (), - - // Sending back some information about the request - bucket: s3_xml::Value(query.common.bucket_name.to_string()), - prefix: uriencode_maybe(&query.common.prefix, query.common.urlencode_resp), - delimiter: query - .common - .delimiter - .as_ref() - .map(|d| uriencode_maybe(d, query.common.urlencode_resp)), - max_uploads: s3_xml::IntValue(query.common.page_size as i64), - key_marker: query - .key_marker - .as_ref() - .map(|m| uriencode_maybe(m, query.common.urlencode_resp)), - upload_id_marker: query - .upload_id_marker - .as_ref() - .map(|m| s3_xml::Value(m.to_string())), - encoding_type: match query.common.urlencode_resp { - true => Some(s3_xml::Value("url".to_string())), - false => None, - }, - - // Handling pagination - is_truncated: s3_xml::Value(format!("{}", pagination.is_some())), - next_key_marker: match &pagination { - None => None, - Some(RangeBegin::AfterKey { key }) - | Some(RangeBegin::AfterUpload { key, .. }) - | Some(RangeBegin::IncludingKey { key, .. }) => { - Some(uriencode_maybe(key, query.common.urlencode_resp)) - } - }, - next_upload_id_marker: match pagination { - Some(RangeBegin::AfterUpload { upload, .. }) => { - Some(s3_xml::Value(hex::encode(upload))) - } - Some(RangeBegin::IncludingKey { .. }) => Some(s3_xml::Value("include".to_string())), - _ => None, - }, - - // Result body - upload: acc - .keys - .iter() - .map(|(uuid, info)| s3_xml::ListMultipartItem { - initiated: s3_xml::Value(msec_to_rfc3339(info.timestamp)), - key: uriencode_maybe(&info.key, query.common.urlencode_resp), - upload_id: s3_xml::Value(hex::encode(uuid)), - storage_class: s3_xml::Value("STANDARD".to_string()), - initiator: s3_xml::Initiator { - display_name: s3_xml::Value(DUMMY_NAME.to_string()), - id: s3_xml::Value(DUMMY_KEY.to_string()), - }, - owner: s3_xml::Owner { - display_name: s3_xml::Value(DUMMY_NAME.to_string()), - id: s3_xml::Value(DUMMY_KEY.to_string()), - }, - }) - .collect(), - common_prefixes: acc - .common_prefixes - .iter() - .map(|c| s3_xml::CommonPrefix { - prefix: s3_xml::Value(c.to_string()), - }) - .collect(), - }; - - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml.into_bytes()))?) -} - -pub async fn handle_list_parts( - garage: Arc, - query: &ListPartsQuery, -) -> Result, Error> { - debug!("ListParts {:?}", query); - - let upload_id = s3_put::decode_upload_id(&query.upload_id)?; - - let (object, version) = futures::try_join!( - garage.object_table.get(&query.bucket_id, &query.key), - garage.version_table.get(&upload_id, &EmptyKey), - )?; - - let (info, next) = fetch_part_info(query, object, version, upload_id)?; - - let result = s3_xml::ListPartsResult { - xmlns: (), - bucket: s3_xml::Value(query.bucket_name.to_string()), - key: s3_xml::Value(query.key.to_string()), - upload_id: s3_xml::Value(query.upload_id.to_string()), - part_number_marker: query.part_number_marker.map(|e| s3_xml::IntValue(e as i64)), - next_part_number_marker: next.map(|e| s3_xml::IntValue(e as i64)), - max_parts: s3_xml::IntValue(query.max_parts as i64), - is_truncated: s3_xml::Value(next.map(|_| "true").unwrap_or("false").to_string()), - parts: info - .iter() - .map(|part| s3_xml::PartItem { - etag: s3_xml::Value(format!("\"{}\"", part.etag)), - last_modified: s3_xml::Value(msec_to_rfc3339(part.timestamp)), - part_number: s3_xml::IntValue(part.part_number as i64), - size: s3_xml::IntValue(part.size as i64), - }) - .collect(), - initiator: s3_xml::Initiator { - display_name: s3_xml::Value(DUMMY_NAME.to_string()), - id: s3_xml::Value(DUMMY_KEY.to_string()), - }, - owner: s3_xml::Owner { - display_name: s3_xml::Value(DUMMY_NAME.to_string()), - id: s3_xml::Value(DUMMY_KEY.to_string()), - }, - storage_class: s3_xml::Value("STANDARD".to_string()), - }; - - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::builder() - .header("Content-Type", "application/xml") - .body(Body::from(xml.into_bytes()))?) -} - -/* - * Private enums and structs - */ - -#[derive(Debug)] -struct ObjectInfo { - last_modified: u64, - size: u64, - etag: String, -} - -#[derive(Debug, PartialEq)] -struct UploadInfo { - key: String, - timestamp: u64, -} - -#[derive(Debug, PartialEq)] -struct PartInfo { - etag: String, - timestamp: u64, - part_number: u64, - size: u64, -} - -enum ExtractionResult { - NoMore, - Filled, - FilledAtUpload { - key: String, - upload: Uuid, - }, - Extracted { - key: String, - }, - // Fallback key is used for legacy APIs that only support - // exlusive pagination (and not inclusive one). - SkipTo { - key: String, - fallback_key: Option, - }, -} - -#[derive(PartialEq, Clone, Debug)] -enum RangeBegin { - // Fallback key is used for legacy APIs that only support - // exlusive pagination (and not inclusive one). - IncludingKey { - key: String, - fallback_key: Option, - }, - AfterKey { - key: String, - }, - AfterUpload { - key: String, - upload: Uuid, - }, -} -type Pagination = Option; - -/* - * Fetch list entries - */ - -async fn fetch_list_entries( - query: &ListQueryCommon, - begin: RangeBegin, - acc: &mut impl ExtractAccumulator, - mut io: F, -) -> Result -where - R: futures::Future, GarageError>>, - F: FnMut(Uuid, Option, usize) -> R, -{ - let mut cursor = begin; - // +1 is needed as we may need to skip the 1st key - // (range is inclusive while most S3 requests are exclusive) - let count = query.page_size + 1; - - loop { - let start_key = match cursor { - RangeBegin::AfterKey { ref key } - | RangeBegin::AfterUpload { ref key, .. } - | RangeBegin::IncludingKey { ref key, .. } => Some(key.clone()), - }; - - // Fetch objects - let objects = io(query.bucket_id, start_key.clone(), count).await?; - - debug!( - "List: get range {:?} (max {}), results: {}", - start_key, - count, - objects.len() - ); - let server_more = objects.len() >= count; - - let prev_req_cursor = cursor.clone(); - let mut iter = objects.iter().peekable(); - - // Drop the first key if needed - // Only AfterKey requires it according to the S3 spec and our implem. - match (&cursor, iter.peek()) { - (RangeBegin::AfterKey { key }, Some(object)) if &object.key == key => iter.next(), - (_, _) => None, - }; - - while let Some(object) = iter.peek() { - if !object.key.starts_with(&query.prefix) { - // If the key is not in the requested prefix, we're done - return Ok(None); - } - - cursor = match acc.extract(query, &cursor, &mut iter) { - ExtractionResult::Extracted { key } => RangeBegin::AfterKey { key }, - ExtractionResult::SkipTo { key, fallback_key } => { - RangeBegin::IncludingKey { key, fallback_key } - } - ExtractionResult::FilledAtUpload { key, upload } => { - return Ok(Some(RangeBegin::AfterUpload { key, upload })) - } - ExtractionResult::Filled => return Ok(Some(cursor)), - ExtractionResult::NoMore => return Ok(None), - }; - } - - if !server_more { - // We did not fully fill the accumulator despite exhausting all the data we have, - // we're done - return Ok(None); - } - - if prev_req_cursor == cursor { - unreachable!("No progress has been done in the loop. This is a bug, please report it."); - } - } -} - -fn fetch_part_info( - query: &ListPartsQuery, - object: Option, - version: Option, - upload_id: Uuid, -) -> Result<(Vec, Option), Error> { - // Check results - let object = object.ok_or(Error::NoSuchKey)?; - - let obj_version = object - .versions() - .iter() - .find(|v| v.uuid == upload_id && v.is_uploading()) - .ok_or(Error::NoSuchUpload)?; - - let version = version.ok_or(Error::NoSuchKey)?; - - // Cut the beginning of our 2 vectors if required - let (etags, blocks) = match &query.part_number_marker { - Some(marker) => { - let next = marker + 1; - - let part_idx = into_ok_or_err( - version - .parts_etags - .items() - .binary_search_by(|(part_num, _)| part_num.cmp(&next)), - ); - let parts = &version.parts_etags.items()[part_idx..]; - - let block_idx = into_ok_or_err( - version - .blocks - .items() - .binary_search_by(|(vkey, _)| vkey.part_number.cmp(&next)), - ); - let blocks = &version.blocks.items()[block_idx..]; - - (parts, blocks) - } - None => (version.parts_etags.items(), version.blocks.items()), - }; - - // Use the block vector to compute a (part_number, size) vector - let mut size = Vec::<(u64, u64)>::new(); - blocks.iter().for_each(|(key, val)| { - let mut new_size = val.size; - match size.pop() { - Some((part_number, size)) if part_number == key.part_number => new_size += size, - Some(v) => size.push(v), - None => (), - } - size.push((key.part_number, new_size)) - }); - - // Merge the etag vector and size vector to build a PartInfo vector - let max_parts = query.max_parts as usize; - let (mut etag_iter, mut size_iter) = (etags.iter().peekable(), size.iter().peekable()); - - let mut info = Vec::::with_capacity(max_parts); - - while info.len() < max_parts { - match (etag_iter.peek(), size_iter.peek()) { - (Some((ep, etag)), Some((sp, size))) => match ep.cmp(sp) { - Ordering::Less => { - debug!("ETag information ignored due to missing corresponding block information. Query: {:?}", query); - etag_iter.next(); - } - Ordering::Equal => { - info.push(PartInfo { - etag: etag.to_string(), - timestamp: obj_version.timestamp, - part_number: *ep, - size: *size, - }); - etag_iter.next(); - size_iter.next(); - } - Ordering::Greater => { - debug!("Block information ignored due to missing corresponding ETag information. Query: {:?}", query); - size_iter.next(); - } - }, - (None, None) => return Ok((info, None)), - _ => { - debug!( - "Additional block or ETag information ignored. Query: {:?}", - query - ); - return Ok((info, None)); - } - } - } - - match info.last() { - Some(part_info) => { - let pagination = Some(part_info.part_number); - Ok((info, pagination)) - } - None => Ok((info, None)), - } -} - -/* - * ListQuery logic - */ - -/// Determine the key from where we want to start fetch objects from the database -/// -/// We choose whether the object at this key must -/// be included or excluded from the response. -/// This key can be the prefix in the base case, or intermediate -/// points in the dataset if we are continuing a previous listing. -impl ListObjectsQuery { - fn build_accumulator(&self) -> Accumulator { - Accumulator::::new(self.common.page_size) - } - - fn begin(&self) -> Result { - if self.is_v2 { - match (&self.continuation_token, &self.start_after) { - // In V2 mode, the continuation token is defined as an opaque - // string in the spec, so we can do whatever we want with it. - // In our case, it is defined as either [ or ] (for include - // representing the key to start with. - (Some(token), _) => match &token[..1] { - "[" => Ok(RangeBegin::IncludingKey { - key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?, - fallback_key: None, - }), - "]" => Ok(RangeBegin::AfterKey { - key: String::from_utf8(base64::decode(token[1..].as_bytes())?)?, - }), - _ => Err(Error::BadRequest("Invalid continuation token".to_string())), - }, - - // StartAfter has defined semantics in the spec: - // start listing at the first key immediately after. - (_, Some(key)) => Ok(RangeBegin::AfterKey { - key: key.to_string(), - }), - - // In the case where neither is specified, we start - // listing at the specified prefix. If an object has this - // exact same key, we include it. (@TODO is this correct?) - _ => Ok(RangeBegin::IncludingKey { - key: self.common.prefix.to_string(), - fallback_key: None, - }), - } - } else { - match &self.marker { - // In V1 mode, the spec defines the Marker value to mean - // the same thing as the StartAfter value in V2 mode. - Some(key) => Ok(RangeBegin::AfterKey { - key: key.to_string(), - }), - _ => Ok(RangeBegin::IncludingKey { - key: self.common.prefix.to_string(), - fallback_key: None, - }), - } - } - } -} - -impl ListMultipartUploadsQuery { - fn build_accumulator(&self) -> Accumulator { - Accumulator::::new(self.common.page_size) - } - - fn begin(&self) -> Result { - match (&self.upload_id_marker, &self.key_marker) { - // If both the upload id marker and the key marker are sets, - // the spec specifies that we must start listing uploads INCLUDING the given key, - // AFTER the specified upload id (sorted in a lexicographic order). - // To enable some optimisations, we emulate "IncludingKey" by extending the upload id - // semantic. We base our reasoning on the hypothesis that S3's upload ids are opaques - // while Garage's ones are 32 bytes hex encoded which enables us to extend this query - // with a specific "include" upload id. - (Some(up_marker), Some(key_marker)) => match &up_marker[..] { - "include" => Ok(RangeBegin::IncludingKey { - key: key_marker.to_string(), - fallback_key: None, - }), - uuid => Ok(RangeBegin::AfterUpload { - key: key_marker.to_string(), - upload: s3_put::decode_upload_id(uuid)?, - }), - }, - - // If only the key marker is specified, the spec says that we must start listing - // uploads AFTER the specified key. - (None, Some(key_marker)) => Ok(RangeBegin::AfterKey { - key: key_marker.to_string(), - }), - _ => Ok(RangeBegin::IncludingKey { - key: self.common.prefix.to_string(), - fallback_key: None, - }), - } - } -} - -/* - * Accumulator logic - */ - -trait ExtractAccumulator { - fn extract<'a>( - &mut self, - query: &ListQueryCommon, - cursor: &RangeBegin, - iter: &mut Peekable>, - ) -> ExtractionResult; -} - -struct Accumulator { - common_prefixes: BTreeSet, - keys: BTreeMap, - max_capacity: usize, -} - -type ObjectAccumulator = Accumulator; -type UploadAccumulator = Accumulator; - -impl Accumulator { - fn new(page_size: usize) -> Accumulator { - Accumulator { - common_prefixes: BTreeSet::::new(), - keys: BTreeMap::::new(), - max_capacity: page_size, - } - } - - /// Observe the Object iterator and try to extract a single common prefix - /// - /// This function can consume an arbitrary number of items as long as they share the same - /// common prefix. - fn extract_common_prefix<'a>( - &mut self, - objects: &mut Peekable>, - query: &ListQueryCommon, - ) -> Option { - // Get the next object from the iterator - let object = objects.peek().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); - - // Check if this is a common prefix (requires a passed delimiter and its value in the key) - let pfx = match common_prefix(object, query) { - Some(p) => p, - None => return None, - }; - - // Try to register this prefix - // If not possible, we can return early - if !self.try_insert_common_prefix(pfx.to_string()) { - return Some(ExtractionResult::Filled); - } - - // We consume the whole common prefix from the iterator - let mut last_pfx_key = &object.key; - loop { - last_pfx_key = match objects.peek() { - Some(o) if o.key.starts_with(pfx) => &o.key, - Some(_) => { - return Some(ExtractionResult::Extracted { - key: last_pfx_key.to_owned(), - }) - } - None => { - return match key_after_prefix(pfx) { - Some(next) => Some(ExtractionResult::SkipTo { - key: next, - fallback_key: Some(last_pfx_key.to_owned()), - }), - None => Some(ExtractionResult::NoMore), - } - } - }; - - objects.next(); - } - } - - fn is_full(&mut self) -> bool { - self.keys.len() + self.common_prefixes.len() >= self.max_capacity - } - - fn try_insert_common_prefix(&mut self, key: String) -> bool { - // If we already have an entry, we can continue - if self.common_prefixes.contains(&key) { - return true; - } - - // Otherwise, we need to check if we can add it - match self.is_full() { - true => false, - false => { - self.common_prefixes.insert(key); - true - } - } - } - - fn try_insert_entry(&mut self, key: K, value: V) -> bool { - // It is impossible to add twice a key, this is an error - assert!(!self.keys.contains_key(&key)); - - match self.is_full() { - true => false, - false => { - self.keys.insert(key, value); - true - } - } - } -} - -impl ExtractAccumulator for ObjectAccumulator { - fn extract<'a>( - &mut self, - query: &ListQueryCommon, - _cursor: &RangeBegin, - objects: &mut Peekable>, - ) -> ExtractionResult { - if let Some(e) = self.extract_common_prefix(objects, query) { - return e; - } - - let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); - - let version = match object.versions().iter().find(|x| x.is_data()) { - Some(v) => v, - None => unreachable!( - "Expect to have objects having data due to earlier filtering. This is a logic bug." - ), - }; - - let meta = match &version.state { - ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta, - ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => meta, - _ => unreachable!(), - }; - let info = ObjectInfo { - last_modified: version.timestamp, - size: meta.size, - etag: meta.etag.to_string(), - }; - - match self.try_insert_entry(object.key.clone(), info) { - true => ExtractionResult::Extracted { - key: object.key.clone(), - }, - false => ExtractionResult::Filled, - } - } -} - -impl ExtractAccumulator for UploadAccumulator { - /// Observe the iterator, process a single key, and try to extract one or more upload entries - /// - /// This function processes a single object from the iterator that can contain an arbitrary - /// number of versions, and thus "uploads". - fn extract<'a>( - &mut self, - query: &ListQueryCommon, - cursor: &RangeBegin, - objects: &mut Peekable>, - ) -> ExtractionResult { - if let Some(e) = self.extract_common_prefix(objects, query) { - return e; - } - - // Get the next object from the iterator - let object = objects.next().expect("This iterator can not be empty as it is checked earlier in the code. This is a logic bug, please report it."); - - let mut uploads_for_key = object - .versions() - .iter() - .filter(|x| x.is_uploading()) - .collect::>(); - - // S3 logic requires lexicographically sorted upload ids. - uploads_for_key.sort_unstable_by_key(|e| e.uuid); - - // Skip results if an upload marker is provided - if let RangeBegin::AfterUpload { upload, .. } = cursor { - // Because our data are sorted, we can use a binary search to find the UUID - // or to find where it should have been added. Once this position is found, - // we use it to discard the first part of the array. - let idx = match uploads_for_key.binary_search_by(|e| e.uuid.cmp(upload)) { - // we start after the found uuid so we need to discard the pointed value. - // In the worst case, the UUID is the last element, which lead us to an empty array - // but we are never out of bound. - Ok(i) => i + 1, - // if the UUID is not found, the upload may have been discarded between the 2 request, - // this function returns where it could have been inserted, - // the pointed value is thus greater than our marker and we need to keep it. - Err(i) => i, - }; - uploads_for_key = uploads_for_key[idx..].to_vec(); - } - - let mut iter = uploads_for_key.iter(); - - // The first entry is a specific case - // as it changes our result enum type - let first_upload = match iter.next() { - Some(u) => u, - None => { - return ExtractionResult::Extracted { - key: object.key.clone(), - } - } - }; - let first_up_info = UploadInfo { - key: object.key.to_string(), - timestamp: first_upload.timestamp, - }; - if !self.try_insert_entry(first_upload.uuid, first_up_info) { - return ExtractionResult::Filled; - } - - // We can then collect the remaining uploads in a loop - let mut prev_uuid = first_upload.uuid; - for upload in iter { - let up_info = UploadInfo { - key: object.key.to_string(), - timestamp: upload.timestamp, - }; - - // Insert data in our accumulator - // If it is full, return information to paginate. - if !self.try_insert_entry(upload.uuid, up_info) { - return ExtractionResult::FilledAtUpload { - key: object.key.clone(), - upload: prev_uuid, - }; - } - // Update our last added UUID - prev_uuid = upload.uuid; - } - - // We successfully collected all the uploads - ExtractionResult::Extracted { - key: object.key.clone(), - } - } -} - -/* - * Utility functions - */ - -/// This is a stub for Result::into_ok_or_err that is not yet in Rust stable -fn into_ok_or_err(r: Result) -> T { - match r { - Ok(r) => r, - Err(r) => r, - } -} - -/// Returns the common prefix of the object given the query prefix and delimiter -fn common_prefix<'a>(object: &'a Object, query: &ListQueryCommon) -> Option<&'a str> { - match &query.delimiter { - Some(delimiter) => object.key[query.prefix.len()..] - .find(delimiter) - .map(|i| &object.key[..query.prefix.len() + i + delimiter.len()]), - None => None, - } -} - -/// URIencode a value if needed -fn uriencode_maybe(s: &str, yes: bool) -> s3_xml::Value { - if yes { - s3_xml::Value(uri_encode(s, true)) - } else { - s3_xml::Value(s.to_string()) - } -} - -const UTF8_BEFORE_LAST_CHAR: char = '\u{10FFFE}'; - -/// Compute the key after the prefix -fn key_after_prefix(pfx: &str) -> Option { - let mut next = pfx.to_string(); - while !next.is_empty() { - let tail = next.pop().unwrap(); - if tail >= char::MAX { - continue; - } - - // Circumvent a limitation of RangeFrom that overflow earlier than needed - // See: https://doc.rust-lang.org/core/ops/struct.RangeFrom.html - let new_tail = if tail == UTF8_BEFORE_LAST_CHAR { - char::MAX - } else { - (tail..).nth(1).unwrap() - }; - - next.push(new_tail); - return Some(next); - } - - None -} - -/* - * Unit tests of this module - */ -#[cfg(test)] -mod tests { - use super::*; - use garage_model::version_table::*; - use garage_util::*; - use std::iter::FromIterator; - - const TS: u64 = 1641394898314; - - fn bucket() -> Uuid { - Uuid::from([0x42; 32]) - } - - fn query() -> ListMultipartUploadsQuery { - ListMultipartUploadsQuery { - common: ListQueryCommon { - prefix: "".to_string(), - delimiter: Some("/".to_string()), - page_size: 1000, - urlencode_resp: false, - bucket_name: "a".to_string(), - bucket_id: Uuid::from([0x00; 32]), - }, - key_marker: None, - upload_id_marker: None, - } - } - - fn objs() -> Vec { - vec![ - Object::new( - bucket(), - "a/b/c".to_string(), - vec![objup_version([0x01; 32])], - ), - Object::new(bucket(), "d".to_string(), vec![objup_version([0x01; 32])]), - ] - } - - fn objup_version(uuid: [u8; 32]) -> ObjectVersion { - ObjectVersion { - uuid: Uuid::from(uuid), - timestamp: TS, - state: ObjectVersionState::Uploading(ObjectVersionHeaders { - content_type: "text/plain".to_string(), - other: BTreeMap::::new(), - }), - } - } - - #[test] - fn test_key_after_prefix() { - assert_eq!(UTF8_BEFORE_LAST_CHAR as u32, (char::MAX as u32) - 1); - assert_eq!(key_after_prefix("a/b/").unwrap().as_str(), "a/b0"); - assert_eq!(key_after_prefix("€").unwrap().as_str(), "₭"); - assert_eq!( - key_after_prefix("􏿽").unwrap().as_str(), - String::from(char::from_u32(0x10FFFE).unwrap()) - ); - - // When the last character is the biggest UTF8 char - let a = String::from_iter(['a', char::MAX].iter()); - assert_eq!(key_after_prefix(a.as_str()).unwrap().as_str(), "b"); - - // When all characters are the biggest UTF8 char - let b = String::from_iter([char::MAX; 3].iter()); - assert!(key_after_prefix(b.as_str()).is_none()); - - // Check utf8 surrogates - let c = String::from('\u{D7FF}'); - assert_eq!( - key_after_prefix(c.as_str()).unwrap().as_str(), - String::from('\u{E000}') - ); - - // Check the character before the biggest one - let d = String::from('\u{10FFFE}'); - assert_eq!( - key_after_prefix(d.as_str()).unwrap().as_str(), - String::from(char::MAX) - ); - } - - #[test] - fn test_common_prefixes() { - let mut query = query(); - let objs = objs(); - - query.common.prefix = "a/".to_string(); - assert_eq!( - common_prefix(objs.get(0).unwrap(), &query.common), - Some("a/b/") - ); - - query.common.prefix = "a/b/".to_string(); - assert_eq!(common_prefix(objs.get(0).unwrap(), &query.common), None); - } - - #[test] - fn test_extract_common_prefix() { - let mut query = query(); - query.common.prefix = "a/".to_string(); - let objs = objs(); - let mut acc = UploadAccumulator::new(query.common.page_size); - - let mut iter = objs.iter().peekable(); - match acc.extract_common_prefix(&mut iter, &query.common) { - Some(ExtractionResult::Extracted { key }) => assert_eq!(key, "a/b/c".to_string()), - _ => panic!("wrong result"), - } - assert_eq!(acc.common_prefixes.len(), 1); - assert_eq!(acc.common_prefixes.iter().next().unwrap(), "a/b/"); - } - - #[test] - fn test_extract_upload() { - let objs = vec![ - Object::new( - bucket(), - "b".to_string(), - vec![ - objup_version([0x01; 32]), - objup_version([0x80; 32]), - objup_version([0x8f; 32]), - objup_version([0xdd; 32]), - ], - ), - Object::new(bucket(), "c".to_string(), vec![]), - ]; - - let mut acc = UploadAccumulator::new(2); - let mut start = RangeBegin::AfterUpload { - key: "b".to_string(), - upload: Uuid::from([0x01; 32]), - }; - - let mut iter = objs.iter().peekable(); - - // Check the case where we skip some uploads - match acc.extract(&(query().common), &start, &mut iter) { - ExtractionResult::FilledAtUpload { key, upload } => { - assert_eq!(key, "b"); - assert_eq!(upload, Uuid::from([0x8f; 32])); - } - _ => panic!("wrong result"), - }; - - assert_eq!(acc.keys.len(), 2); - assert_eq!( - acc.keys.get(&Uuid::from([0x80; 32])).unwrap(), - &UploadInfo { - timestamp: TS, - key: "b".to_string() - } - ); - assert_eq!( - acc.keys.get(&Uuid::from([0x8f; 32])).unwrap(), - &UploadInfo { - timestamp: TS, - key: "b".to_string() - } - ); - - acc = UploadAccumulator::new(2); - start = RangeBegin::AfterUpload { - key: "b".to_string(), - upload: Uuid::from([0xff; 32]), - }; - iter = objs.iter().peekable(); - - // Check the case where we skip all the uploads - match acc.extract(&(query().common), &start, &mut iter) { - ExtractionResult::Extracted { key } if key.as_str() == "b" => (), - _ => panic!("wrong result"), - }; - } - - #[tokio::test] - async fn test_fetch_uploads_no_result() -> Result<(), Error> { - let query = query(); - let mut acc = query.build_accumulator(); - let page = fetch_list_entries( - &query.common, - query.begin()?, - &mut acc, - |_, _, _| async move { Ok(vec![]) }, - ) - .await?; - assert_eq!(page, None); - assert_eq!(acc.common_prefixes.len(), 0); - assert_eq!(acc.keys.len(), 0); - - Ok(()) - } - - #[tokio::test] - async fn test_fetch_uploads_basic() -> Result<(), Error> { - let query = query(); - let mut acc = query.build_accumulator(); - let mut fake_io = |_, _, _| async move { Ok(objs()) }; - let page = - fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?; - assert_eq!(page, None); - assert_eq!(acc.common_prefixes.len(), 1); - assert_eq!(acc.keys.len(), 1); - assert!(acc.common_prefixes.contains("a/")); - - Ok(()) - } - - #[tokio::test] - async fn test_fetch_uploads_advanced() -> Result<(), Error> { - let mut query = query(); - query.common.page_size = 2; - - let mut fake_io = |_, k: Option, _| async move { - Ok(match k.as_deref() { - Some("") => vec![ - Object::new(bucket(), "b/a".to_string(), vec![objup_version([0x01; 32])]), - Object::new(bucket(), "b/b".to_string(), vec![objup_version([0x01; 32])]), - Object::new(bucket(), "b/c".to_string(), vec![objup_version([0x01; 32])]), - ], - Some("b0") => vec![ - Object::new(bucket(), "c/a".to_string(), vec![objup_version([0x01; 32])]), - Object::new(bucket(), "c/b".to_string(), vec![objup_version([0x01; 32])]), - Object::new(bucket(), "c/c".to_string(), vec![objup_version([0x02; 32])]), - ], - Some("c0") => vec![Object::new( - bucket(), - "d".to_string(), - vec![objup_version([0x01; 32])], - )], - _ => panic!("wrong value {:?}", k), - }) - }; - - let mut acc = query.build_accumulator(); - let page = - fetch_list_entries(&query.common, query.begin()?, &mut acc, &mut fake_io).await?; - assert_eq!( - page, - Some(RangeBegin::IncludingKey { - key: "c0".to_string(), - fallback_key: Some("c/c".to_string()) - }) - ); - assert_eq!(acc.common_prefixes.len(), 2); - assert_eq!(acc.keys.len(), 0); - assert!(acc.common_prefixes.contains("b/")); - assert!(acc.common_prefixes.contains("c/")); - - Ok(()) - } - - fn version() -> Version { - let uuid = Uuid::from([0x08; 32]); - - let blocks = vec![ - ( - VersionBlockKey { - part_number: 1, - offset: 1, - }, - VersionBlock { - hash: uuid, - size: 3, - }, - ), - ( - VersionBlockKey { - part_number: 1, - offset: 2, - }, - VersionBlock { - hash: uuid, - size: 2, - }, - ), - ( - VersionBlockKey { - part_number: 2, - offset: 1, - }, - VersionBlock { - hash: uuid, - size: 8, - }, - ), - ( - VersionBlockKey { - part_number: 5, - offset: 1, - }, - VersionBlock { - hash: uuid, - size: 7, - }, - ), - ( - VersionBlockKey { - part_number: 8, - offset: 1, - }, - VersionBlock { - hash: uuid, - size: 5, - }, - ), - ]; - let etags = vec![ - (1, "etag1".to_string()), - (3, "etag2".to_string()), - (5, "etag3".to_string()), - (8, "etag4".to_string()), - (9, "etag5".to_string()), - ]; - - Version { - bucket_id: uuid, - key: "a".to_string(), - uuid, - deleted: false.into(), - blocks: crdt::Map::::from_iter(blocks), - parts_etags: crdt::Map::::from_iter(etags), - } - } - - fn obj() -> Object { - Object::new(bucket(), "d".to_string(), vec![objup_version([0x08; 32])]) - } - - #[test] - fn test_fetch_part_info() -> Result<(), Error> { - let uuid = Uuid::from([0x08; 32]); - let mut query = ListPartsQuery { - bucket_name: "a".to_string(), - bucket_id: uuid, - key: "a".to_string(), - upload_id: "xx".to_string(), - part_number_marker: None, - max_parts: 2, - }; - - assert!( - fetch_part_info(&query, None, None, uuid).is_err(), - "No object and version should fail" - ); - assert!( - fetch_part_info(&query, Some(obj()), None, uuid).is_err(), - "No version should faild" - ); - assert!( - fetch_part_info(&query, None, Some(version()), uuid).is_err(), - "No object should fail" - ); - - // Start from the beginning but with limited size to trigger pagination - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; - assert_eq!(pagination.unwrap(), 5); - assert_eq!( - info, - vec![ - PartInfo { - etag: "etag1".to_string(), - timestamp: TS, - part_number: 1, - size: 5 - }, - PartInfo { - etag: "etag3".to_string(), - timestamp: TS, - part_number: 5, - size: 7 - }, - ] - ); - - // Use previous pagination to make a new request - query.part_number_marker = Some(pagination.unwrap()); - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; - assert!(pagination.is_none()); - assert_eq!( - info, - vec![PartInfo { - etag: "etag4".to_string(), - timestamp: TS, - part_number: 8, - size: 5 - },] - ); - - // Trying to access a part that is way larger than registered ones - query.part_number_marker = Some(9999); - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; - assert!(pagination.is_none()); - assert_eq!(info, vec![]); - - // Try without any limitation - query.max_parts = 1000; - query.part_number_marker = None; - let (info, pagination) = fetch_part_info(&query, Some(obj()), Some(version()), uuid)?; - assert!(pagination.is_none()); - assert_eq!( - info, - vec![ - PartInfo { - etag: "etag1".to_string(), - timestamp: TS, - part_number: 1, - size: 5 - }, - PartInfo { - etag: "etag3".to_string(), - timestamp: TS, - part_number: 5, - size: 7 - }, - PartInfo { - etag: "etag4".to_string(), - timestamp: TS, - part_number: 8, - size: 5 - }, - ] - ); - - Ok(()) - } -} diff --git a/src/api/s3_post_object.rs b/src/api/s3_post_object.rs deleted file mode 100644 index 585e0304..00000000 --- a/src/api/s3_post_object.rs +++ /dev/null @@ -1,499 +0,0 @@ -use std::collections::HashMap; -use std::convert::TryInto; -use std::ops::RangeInclusive; -use std::sync::Arc; -use std::task::{Context, Poll}; - -use bytes::Bytes; -use chrono::{DateTime, Duration, Utc}; -use futures::{Stream, StreamExt}; -use hyper::header::{self, HeaderMap, HeaderName, HeaderValue}; -use hyper::{Body, Request, Response, StatusCode}; -use multer::{Constraints, Multipart, SizeLimit}; -use serde::Deserialize; - -use garage_model::garage::Garage; - -use crate::api_server::resolve_bucket; -use crate::error::*; -use crate::s3_put::{get_headers, save_stream}; -use crate::s3_xml; -use crate::signature::payload::{parse_date, verify_v4}; - -pub async fn handle_post_object( - garage: Arc, - req: Request, - bucket: String, -) -> Result, Error> { - let boundary = req - .headers() - .get(header::CONTENT_TYPE) - .and_then(|ct| ct.to_str().ok()) - .and_then(|ct| multer::parse_boundary(ct).ok()) - .ok_or_bad_request("Counld not get multipart boundary")?; - - // 16k seems plenty for a header. 5G is the max size of a single part, so it seems reasonable - // for a PostObject - let constraints = Constraints::new().size_limit( - SizeLimit::new() - .per_field(16 * 1024) - .for_field("file", 5 * 1024 * 1024 * 1024), - ); - - let (head, body) = req.into_parts(); - let mut multipart = Multipart::with_constraints(body, boundary, constraints); - - let mut params = HeaderMap::new(); - let field = loop { - let field = if let Some(field) = multipart.next_field().await? { - field - } else { - return Err(Error::BadRequest( - "Request did not contain a file".to_owned(), - )); - }; - let name: HeaderName = if let Some(Ok(name)) = field.name().map(TryInto::try_into) { - name - } else { - continue; - }; - if name == "file" { - break field; - } - - if let Ok(content) = HeaderValue::from_str(&field.text().await?) { - match name.as_str() { - "tag" => (/* tag need to be reencoded, but we don't support them yet anyway */), - "acl" => { - if params.insert("x-amz-acl", content).is_some() { - return Err(Error::BadRequest( - "Field 'acl' provided more than one time".to_string(), - )); - } - } - _ => { - if params.insert(&name, content).is_some() { - return Err(Error::BadRequest(format!( - "Field '{}' provided more than one time", - name - ))); - } - } - } - } - }; - - // Current part is file. Do some checks before handling to PutObject code - let key = params - .get("key") - .ok_or_bad_request("No key was provided")? - .to_str()?; - let credential = params - .get("x-amz-credential") - .ok_or_else(|| { - Error::Forbidden("Garage does not support anonymous access yet".to_string()) - })? - .to_str()?; - let policy = params - .get("policy") - .ok_or_bad_request("No policy was provided")? - .to_str()?; - let signature = params - .get("x-amz-signature") - .ok_or_bad_request("No signature was provided")? - .to_str()?; - let date = params - .get("x-amz-date") - .ok_or_bad_request("No date was provided")? - .to_str()?; - - let key = if key.contains("${filename}") { - // if no filename is provided, don't replace. This matches the behavior of AWS. - if let Some(filename) = field.file_name() { - key.replace("${filename}", filename) - } else { - key.to_owned() - } - } else { - key.to_owned() - }; - - let date = parse_date(date)?; - let api_key = verify_v4(&garage, credential, &date, signature, policy.as_bytes()).await?; - - let bucket_id = resolve_bucket(&garage, &bucket, &api_key).await?; - - if !api_key.allow_write(&bucket_id) { - return Err(Error::Forbidden( - "Operation is not allowed for this key.".to_string(), - )); - } - - let decoded_policy = base64::decode(&policy)?; - let decoded_policy: Policy = - serde_json::from_slice(&decoded_policy).ok_or_bad_request("Invalid policy")?; - - let expiration: DateTime = DateTime::parse_from_rfc3339(&decoded_policy.expiration) - .ok_or_bad_request("Invalid expiration date")? - .into(); - if Utc::now() - expiration > Duration::zero() { - return Err(Error::BadRequest( - "Expiration date is in the paste".to_string(), - )); - } - - let mut conditions = decoded_policy.into_conditions()?; - - for (param_key, value) in params.iter() { - let mut param_key = param_key.to_string(); - param_key.make_ascii_lowercase(); - match param_key.as_str() { - "policy" | "x-amz-signature" => (), // this is always accepted, as it's required to validate other fields - "content-type" => { - let conds = conditions.params.remove("content-type").ok_or_else(|| { - Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) - })?; - for cond in conds { - let ok = match cond { - Operation::Equal(s) => s.as_str() == value, - Operation::StartsWith(s) => { - value.to_str()?.split(',').all(|v| v.starts_with(&s)) - } - }; - if !ok { - return Err(Error::BadRequest(format!( - "Key '{}' has value not allowed in policy", - param_key - ))); - } - } - } - "key" => { - let conds = conditions.params.remove("key").ok_or_else(|| { - Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) - })?; - for cond in conds { - let ok = match cond { - Operation::Equal(s) => s == key, - Operation::StartsWith(s) => key.starts_with(&s), - }; - if !ok { - return Err(Error::BadRequest(format!( - "Key '{}' has value not allowed in policy", - param_key - ))); - } - } - } - _ => { - if param_key.starts_with("x-ignore-") { - // if a x-ignore is provided in policy, it's not removed here, so it will be - // rejected as provided in policy but not in the request. As odd as it is, it's - // how aws seems to behave. - continue; - } - let conds = conditions.params.remove(¶m_key).ok_or_else(|| { - Error::BadRequest(format!("Key '{}' is not allowed in policy", param_key)) - })?; - for cond in conds { - let ok = match cond { - Operation::Equal(s) => s.as_str() == value, - Operation::StartsWith(s) => value.to_str()?.starts_with(s.as_str()), - }; - if !ok { - return Err(Error::BadRequest(format!( - "Key '{}' has value not allowed in policy", - param_key - ))); - } - } - } - } - } - - if let Some((param_key, _)) = conditions.params.iter().next() { - return Err(Error::BadRequest(format!( - "Key '{}' is required in policy, but no value was provided", - param_key - ))); - } - - let headers = get_headers(¶ms)?; - - let stream = field.map(|r| r.map_err(Into::into)); - let (_, md5) = save_stream( - garage, - headers, - StreamLimiter::new(stream, conditions.content_length), - bucket_id, - &key, - None, - None, - ) - .await?; - - let etag = format!("\"{}\"", md5); - - let resp = if let Some(mut target) = params - .get("success_action_redirect") - .and_then(|h| h.to_str().ok()) - .and_then(|u| url::Url::parse(u).ok()) - .filter(|u| u.scheme() == "https" || u.scheme() == "http") - { - target - .query_pairs_mut() - .append_pair("bucket", &bucket) - .append_pair("key", &key) - .append_pair("etag", &etag); - let target = target.to_string(); - Response::builder() - .status(StatusCode::SEE_OTHER) - .header(header::LOCATION, target.clone()) - .header(header::ETAG, etag) - .body(target.into())? - } else { - let path = head - .uri - .into_parts() - .path_and_query - .map(|paq| paq.path().to_string()) - .unwrap_or_else(|| "/".to_string()); - let authority = head - .headers - .get(header::HOST) - .and_then(|h| h.to_str().ok()) - .unwrap_or_default(); - let proto = if !authority.is_empty() { - "https://" - } else { - "" - }; - - let url_key: String = form_urlencoded::byte_serialize(key.as_bytes()) - .flat_map(str::chars) - .collect(); - let location = format!("{}{}{}{}", proto, authority, path, url_key); - - let action = params - .get("success_action_status") - .and_then(|h| h.to_str().ok()) - .unwrap_or("204"); - let builder = Response::builder() - .header(header::LOCATION, location.clone()) - .header(header::ETAG, etag.clone()); - match action { - "200" => builder.status(StatusCode::OK).body(Body::empty())?, - "201" => { - let xml = s3_xml::PostObject { - xmlns: (), - location: s3_xml::Value(location), - bucket: s3_xml::Value(bucket), - key: s3_xml::Value(key), - etag: s3_xml::Value(etag), - }; - let body = s3_xml::to_xml_with_header(&xml)?; - builder - .status(StatusCode::CREATED) - .body(Body::from(body.into_bytes()))? - } - _ => builder.status(StatusCode::NO_CONTENT).body(Body::empty())?, - } - }; - - Ok(resp) -} - -#[derive(Deserialize)] -struct Policy { - expiration: String, - conditions: Vec, -} - -impl Policy { - fn into_conditions(self) -> Result { - let mut params = HashMap::<_, Vec<_>>::new(); - - let mut length = (0, u64::MAX); - for condition in self.conditions { - match condition { - PolicyCondition::Equal(map) => { - if map.len() != 1 { - return Err(Error::BadRequest("Invalid policy item".to_owned())); - } - let (mut k, v) = map.into_iter().next().expect("size was verified"); - k.make_ascii_lowercase(); - params.entry(k).or_default().push(Operation::Equal(v)); - } - PolicyCondition::OtherOp([cond, mut key, value]) => { - if key.remove(0) != '$' { - return Err(Error::BadRequest("Invalid policy item".to_owned())); - } - key.make_ascii_lowercase(); - match cond.as_str() { - "eq" => { - params.entry(key).or_default().push(Operation::Equal(value)); - } - "starts-with" => { - params - .entry(key) - .or_default() - .push(Operation::StartsWith(value)); - } - _ => return Err(Error::BadRequest("Invalid policy item".to_owned())), - } - } - PolicyCondition::SizeRange(key, min, max) => { - if key == "content-length-range" { - length.0 = length.0.max(min); - length.1 = length.1.min(max); - } else { - return Err(Error::BadRequest("Invalid policy item".to_owned())); - } - } - } - } - Ok(Conditions { - params, - content_length: RangeInclusive::new(length.0, length.1), - }) - } -} - -/// A single condition from a policy -#[derive(Debug, Deserialize)] -#[serde(untagged)] -enum PolicyCondition { - // will contain a single key-value pair - Equal(HashMap), - OtherOp([String; 3]), - SizeRange(String, u64, u64), -} - -#[derive(Debug)] -struct Conditions { - params: HashMap>, - content_length: RangeInclusive, -} - -#[derive(Debug, PartialEq, Eq)] -enum Operation { - Equal(String), - StartsWith(String), -} - -struct StreamLimiter { - inner: T, - length: RangeInclusive, - read: u64, -} - -impl StreamLimiter { - fn new(stream: T, length: RangeInclusive) -> Self { - StreamLimiter { - inner: stream, - length, - read: 0, - } - } -} - -impl Stream for StreamLimiter -where - T: Stream> + Unpin, -{ - type Item = Result; - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - ctx: &mut Context<'_>, - ) -> Poll> { - let res = std::pin::Pin::new(&mut self.inner).poll_next(ctx); - match &res { - Poll::Ready(Some(Ok(bytes))) => { - self.read += bytes.len() as u64; - // optimization to fail early when we know before the end it's too long - if self.length.end() < &self.read { - return Poll::Ready(Some(Err(Error::BadRequest( - "File size does not match policy".to_owned(), - )))); - } - } - Poll::Ready(None) => { - if !self.length.contains(&self.read) { - return Poll::Ready(Some(Err(Error::BadRequest( - "File size does not match policy".to_owned(), - )))); - } - } - _ => {} - } - res - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_policy_1() { - let policy_json = br#" -{ "expiration": "2007-12-01T12:00:00.000Z", - "conditions": [ - {"acl": "public-read" }, - {"bucket": "johnsmith" }, - ["starts-with", "$key", "user/eric/"] - ] -} - "#; - let policy_2: Policy = serde_json::from_slice(&policy_json[..]).unwrap(); - let mut conditions = policy_2.into_conditions().unwrap(); - - assert_eq!( - conditions.params.remove(&"acl".to_string()), - Some(vec![Operation::Equal("public-read".into())]) - ); - assert_eq!( - conditions.params.remove(&"bucket".to_string()), - Some(vec![Operation::Equal("johnsmith".into())]) - ); - assert_eq!( - conditions.params.remove(&"key".to_string()), - Some(vec![Operation::StartsWith("user/eric/".into())]) - ); - assert!(conditions.params.is_empty()); - assert_eq!(conditions.content_length, 0..=u64::MAX); - } - - #[test] - fn test_policy_2() { - let policy_json = br#" -{ "expiration": "2007-12-01T12:00:00.000Z", - "conditions": [ - [ "eq", "$acl", "public-read" ], - ["starts-with", "$Content-Type", "image/"], - ["starts-with", "$success_action_redirect", ""], - ["content-length-range", 1048576, 10485760] - ] -} - "#; - let policy_2: Policy = serde_json::from_slice(&policy_json[..]).unwrap(); - let mut conditions = policy_2.into_conditions().unwrap(); - - assert_eq!( - conditions.params.remove(&"acl".to_string()), - Some(vec![Operation::Equal("public-read".into())]) - ); - assert_eq!( - conditions.params.remove("content-type").unwrap(), - vec![Operation::StartsWith("image/".into())] - ); - assert_eq!( - conditions - .params - .remove(&"success_action_redirect".to_string()), - Some(vec![Operation::StartsWith("".into())]) - ); - assert!(conditions.params.is_empty()); - assert_eq!(conditions.content_length, 1048576..=10485760); - } -} diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs deleted file mode 100644 index ed0bf00b..00000000 --- a/src/api/s3_put.rs +++ /dev/null @@ -1,753 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet, VecDeque}; -use std::sync::Arc; - -use futures::prelude::*; -use hyper::body::{Body, Bytes}; -use hyper::header::{HeaderMap, HeaderValue}; -use hyper::{Request, Response}; -use md5::{digest::generic_array::*, Digest as Md5Digest, Md5}; -use sha2::Sha256; - -use garage_table::*; -use garage_util::data::*; -use garage_util::error::Error as GarageError; -use garage_util::time::*; - -use garage_block::manager::INLINE_THRESHOLD; -use garage_model::block_ref_table::*; -use garage_model::garage::Garage; -use garage_model::object_table::*; -use garage_model::version_table::*; - -use crate::error::*; -use crate::s3_xml; -use crate::signature::verify_signed_content; - -pub async fn handle_put( - garage: Arc, - req: Request, - bucket_id: Uuid, - key: &str, - content_sha256: Option, -) -> Result, Error> { - // Retrieve interesting headers from request - let headers = get_headers(req.headers())?; - debug!("Object headers: {:?}", headers); - - let content_md5 = match req.headers().get("content-md5") { - Some(x) => Some(x.to_str()?.to_string()), - None => None, - }; - - let (_head, body) = req.into_parts(); - let body = body.map_err(Error::from); - - save_stream( - garage, - headers, - body, - bucket_id, - key, - content_md5, - content_sha256, - ) - .await - .map(|(uuid, md5)| put_response(uuid, md5)) -} - -pub(crate) async fn save_stream> + Unpin>( - garage: Arc, - headers: ObjectVersionHeaders, - body: S, - bucket_id: Uuid, - key: &str, - content_md5: Option, - content_sha256: Option, -) -> Result<(Uuid, String), Error> { - // Generate identity of new version - let version_uuid = gen_uuid(); - let version_timestamp = now_msec(); - - let mut chunker = StreamChunker::new(body, garage.config.block_size); - let first_block = chunker.next().await?.unwrap_or_default(); - - // If body is small enough, store it directly in the object table - // as "inline data". We can then return immediately. - if first_block.len() < INLINE_THRESHOLD { - let mut md5sum = Md5::new(); - md5sum.update(&first_block[..]); - let data_md5sum = md5sum.finalize(); - let data_md5sum_hex = hex::encode(data_md5sum); - - let data_sha256sum = sha256sum(&first_block[..]); - - ensure_checksum_matches( - data_md5sum.as_slice(), - data_sha256sum, - content_md5.as_deref(), - content_sha256, - )?; - - let object_version = ObjectVersion { - uuid: version_uuid, - timestamp: version_timestamp, - state: ObjectVersionState::Complete(ObjectVersionData::Inline( - ObjectVersionMeta { - headers, - size: first_block.len() as u64, - etag: data_md5sum_hex.clone(), - }, - first_block, - )), - }; - - let object = Object::new(bucket_id, key.into(), vec![object_version]); - garage.object_table.insert(&object).await?; - - return Ok((version_uuid, data_md5sum_hex)); - } - - // Write version identifier in object table so that we have a trace - // that we are uploading something - let mut object_version = ObjectVersion { - uuid: version_uuid, - timestamp: version_timestamp, - state: ObjectVersionState::Uploading(headers.clone()), - }; - let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); - garage.object_table.insert(&object).await?; - - // Initialize corresponding entry in version table - // Write this entry now, even with empty block list, - // to prevent block_ref entries from being deleted (they can be deleted - // if the reference a version that isn't found in the version table) - let version = Version::new(version_uuid, bucket_id, key.into(), false); - garage.version_table.insert(&version).await?; - - // Transfer data and verify checksum - let first_block_hash = blake2sum(&first_block[..]); - let tx_result = read_and_put_blocks( - &garage, - &version, - 1, - first_block, - first_block_hash, - &mut chunker, - ) - .await - .and_then(|(total_size, data_md5sum, data_sha256sum)| { - ensure_checksum_matches( - data_md5sum.as_slice(), - data_sha256sum, - content_md5.as_deref(), - content_sha256, - ) - .map(|()| (total_size, data_md5sum)) - }); - - // If something went wrong, clean up - let (total_size, md5sum_arr) = match tx_result { - Ok(rv) => rv, - Err(e) => { - // Mark object as aborted, this will free the blocks further down - object_version.state = ObjectVersionState::Aborted; - let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); - garage.object_table.insert(&object).await?; - return Err(e); - } - }; - - // Save final object state, marked as Complete - let md5sum_hex = hex::encode(md5sum_arr); - object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( - ObjectVersionMeta { - headers, - size: total_size, - etag: md5sum_hex.clone(), - }, - first_block_hash, - )); - let object = Object::new(bucket_id, key.into(), vec![object_version]); - garage.object_table.insert(&object).await?; - - Ok((version_uuid, md5sum_hex)) -} - -/// Validate MD5 sum against content-md5 header -/// and sha256sum against signed content-sha256 -fn ensure_checksum_matches( - data_md5sum: &[u8], - data_sha256sum: garage_util::data::FixedBytes32, - content_md5: Option<&str>, - content_sha256: Option, -) -> Result<(), Error> { - if let Some(expected_sha256) = content_sha256 { - if expected_sha256 != data_sha256sum { - return Err(Error::BadRequest( - "Unable to validate x-amz-content-sha256".to_string(), - )); - } else { - trace!("Successfully validated x-amz-content-sha256"); - } - } - if let Some(expected_md5) = content_md5 { - if expected_md5.trim_matches('"') != base64::encode(data_md5sum) { - return Err(Error::BadRequest( - "Unable to validate content-md5".to_string(), - )); - } else { - trace!("Successfully validated content-md5"); - } - } - Ok(()) -} - -async fn read_and_put_blocks> + Unpin>( - garage: &Garage, - version: &Version, - part_number: u64, - first_block: Vec, - first_block_hash: Hash, - chunker: &mut StreamChunker, -) -> Result<(u64, GenericArray, Hash), Error> { - let mut md5hasher = Md5::new(); - let mut sha256hasher = Sha256::new(); - md5hasher.update(&first_block[..]); - sha256hasher.update(&first_block[..]); - - let mut next_offset = first_block.len(); - let mut put_curr_version_block = put_block_meta( - garage, - version, - part_number, - 0, - first_block_hash, - first_block.len() as u64, - ); - let mut put_curr_block = garage - .block_manager - .rpc_put_block(first_block_hash, first_block); - - loop { - let (_, _, next_block) = futures::try_join!( - put_curr_block.map_err(Error::from), - put_curr_version_block.map_err(Error::from), - chunker.next(), - )?; - if let Some(block) = next_block { - md5hasher.update(&block[..]); - sha256hasher.update(&block[..]); - let block_hash = blake2sum(&block[..]); - let block_len = block.len(); - put_curr_version_block = put_block_meta( - garage, - version, - part_number, - next_offset as u64, - block_hash, - block_len as u64, - ); - put_curr_block = garage.block_manager.rpc_put_block(block_hash, block); - next_offset += block_len; - } else { - break; - } - } - - let total_size = next_offset as u64; - let data_md5sum = md5hasher.finalize(); - - let data_sha256sum = sha256hasher.finalize(); - let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap(); - - Ok((total_size, data_md5sum, data_sha256sum)) -} - -async fn put_block_meta( - garage: &Garage, - version: &Version, - part_number: u64, - offset: u64, - hash: Hash, - size: u64, -) -> Result<(), GarageError> { - let mut version = version.clone(); - version.blocks.put( - VersionBlockKey { - part_number, - offset, - }, - VersionBlock { hash, size }, - ); - - let block_ref = BlockRef { - block: hash, - version: version.uuid, - deleted: false.into(), - }; - - futures::try_join!( - garage.version_table.insert(&version), - garage.block_ref_table.insert(&block_ref), - )?; - Ok(()) -} - -struct StreamChunker>> { - stream: S, - read_all: bool, - block_size: usize, - buf: VecDeque, -} - -impl> + Unpin> StreamChunker { - fn new(stream: S, block_size: usize) -> Self { - Self { - stream, - read_all: false, - block_size, - buf: VecDeque::with_capacity(2 * block_size), - } - } - - async fn next(&mut self) -> Result>, Error> { - while !self.read_all && self.buf.len() < self.block_size { - if let Some(block) = self.stream.next().await { - let bytes = block?; - trace!("Body next: {} bytes", bytes.len()); - self.buf.extend(bytes); - } else { - self.read_all = true; - } - } - - if self.buf.is_empty() { - Ok(None) - } else if self.buf.len() <= self.block_size { - let block = self.buf.drain(..).collect::>(); - Ok(Some(block)) - } else { - let block = self.buf.drain(..self.block_size).collect::>(); - Ok(Some(block)) - } - } -} - -pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response { - Response::builder() - .header("x-amz-version-id", hex::encode(version_uuid)) - .header("ETag", format!("\"{}\"", md5sum_hex)) - .body(Body::from(vec![])) - .unwrap() -} - -pub async fn handle_create_multipart_upload( - garage: Arc, - req: &Request, - bucket_name: &str, - bucket_id: Uuid, - key: &str, -) -> Result, Error> { - let version_uuid = gen_uuid(); - let headers = get_headers(req.headers())?; - - // Create object in object table - let object_version = ObjectVersion { - uuid: version_uuid, - timestamp: now_msec(), - state: ObjectVersionState::Uploading(headers), - }; - let object = Object::new(bucket_id, key.to_string(), vec![object_version]); - garage.object_table.insert(&object).await?; - - // Insert empty version so that block_ref entries refer to something - // (they are inserted concurrently with blocks in the version table, so - // there is the possibility that they are inserted before the version table - // is created, in which case it is allowed to delete them, e.g. in repair_*) - let version = Version::new(version_uuid, bucket_id, key.into(), false); - garage.version_table.insert(&version).await?; - - // Send success response - let result = s3_xml::InitiateMultipartUploadResult { - xmlns: (), - bucket: s3_xml::Value(bucket_name.to_string()), - key: s3_xml::Value(key.to_string()), - upload_id: s3_xml::Value(hex::encode(version_uuid)), - }; - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::new(Body::from(xml.into_bytes()))) -} - -pub async fn handle_put_part( - garage: Arc, - req: Request, - bucket_id: Uuid, - key: &str, - part_number: u64, - upload_id: &str, - content_sha256: Option, -) -> Result, Error> { - let version_uuid = decode_upload_id(upload_id)?; - - let content_md5 = match req.headers().get("content-md5") { - Some(x) => Some(x.to_str()?.to_string()), - None => None, - }; - - // Read first chuck, and at the same time try to get object to see if it exists - let key = key.to_string(); - - let body = req.into_body().map_err(Error::from); - let mut chunker = StreamChunker::new(body, garage.config.block_size); - - let (object, version, first_block) = futures::try_join!( - garage - .object_table - .get(&bucket_id, &key) - .map_err(Error::from), - garage - .version_table - .get(&version_uuid, &EmptyKey) - .map_err(Error::from), - chunker.next(), - )?; - - // Check object is valid and multipart block can be accepted - let first_block = first_block.ok_or_bad_request("Empty body")?; - let object = object.ok_or_bad_request("Object not found")?; - - if !object - .versions() - .iter() - .any(|v| v.uuid == version_uuid && v.is_uploading()) - { - return Err(Error::NoSuchUpload); - } - - // Check part hasn't already been uploaded - if let Some(v) = version { - if v.has_part_number(part_number) { - return Err(Error::BadRequest(format!( - "Part number {} has already been uploaded", - part_number - ))); - } - } - - // Copy block to store - let version = Version::new(version_uuid, bucket_id, key, false); - let first_block_hash = blake2sum(&first_block[..]); - let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( - &garage, - &version, - part_number, - first_block, - first_block_hash, - &mut chunker, - ) - .await?; - - // Verify that checksums map - ensure_checksum_matches( - data_md5sum.as_slice(), - data_sha256sum, - content_md5.as_deref(), - content_sha256, - )?; - - // Store part etag in version - let data_md5sum_hex = hex::encode(data_md5sum); - let mut version = version; - version - .parts_etags - .put(part_number, data_md5sum_hex.clone()); - garage.version_table.insert(&version).await?; - - let response = Response::builder() - .header("ETag", format!("\"{}\"", data_md5sum_hex)) - .body(Body::empty()) - .unwrap(); - Ok(response) -} - -pub async fn handle_complete_multipart_upload( - garage: Arc, - req: Request, - bucket_name: &str, - bucket_id: Uuid, - key: &str, - upload_id: &str, - content_sha256: Option, -) -> Result, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; - let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) - .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; - debug!( - "CompleteMultipartUpload list of parts: {:?}", - body_list_of_parts - ); - - let version_uuid = decode_upload_id(upload_id)?; - - // Get object and version - let key = key.to_string(); - let (object, version) = futures::try_join!( - garage.object_table.get(&bucket_id, &key), - garage.version_table.get(&version_uuid, &EmptyKey), - )?; - - let object = object.ok_or(Error::NoSuchKey)?; - let mut object_version = object - .versions() - .iter() - .find(|v| v.uuid == version_uuid && v.is_uploading()) - .cloned() - .ok_or(Error::NoSuchUpload)?; - - let version = version.ok_or(Error::NoSuchKey)?; - if version.blocks.is_empty() { - return Err(Error::BadRequest("No data was uploaded".to_string())); - } - - let headers = match object_version.state { - ObjectVersionState::Uploading(headers) => headers, - _ => unreachable!(), - }; - - // Check that part numbers are an increasing sequence. - // (it doesn't need to start at 1 nor to be a continuous sequence, - // see discussion in #192) - if body_list_of_parts.is_empty() { - return Err(Error::EntityTooSmall); - } - if !body_list_of_parts - .iter() - .zip(body_list_of_parts.iter().skip(1)) - .all(|(p1, p2)| p1.part_number < p2.part_number) - { - return Err(Error::InvalidPartOrder); - } - - // Garage-specific restriction, see #204: part numbers must be - // consecutive starting at 1 - if body_list_of_parts[0].part_number != 1 - || !body_list_of_parts - .iter() - .zip(body_list_of_parts.iter().skip(1)) - .all(|(p1, p2)| p1.part_number + 1 == p2.part_number) - { - return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into())); - } - - // Check that the list of parts they gave us corresponds to the parts we have here - debug!("Expected parts from request: {:?}", body_list_of_parts); - debug!("Parts stored in version: {:?}", version.parts_etags.items()); - let parts = version - .parts_etags - .items() - .iter() - .map(|pair| (&pair.0, &pair.1)); - let same_parts = body_list_of_parts - .iter() - .map(|x| (&x.part_number, &x.etag)) - .eq(parts); - if !same_parts { - return Err(Error::InvalidPart); - } - - // Check that all blocks belong to one of the parts - let block_parts = version - .blocks - .items() - .iter() - .map(|(bk, _)| bk.part_number) - .collect::>(); - let same_parts = body_list_of_parts - .iter() - .map(|x| x.part_number) - .eq(block_parts.into_iter()); - if !same_parts { - return Err(Error::BadRequest( - "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again.".into(), - )); - } - - // Calculate etag of final object - // To understand how etags are calculated, read more here: - // https://teppen.io/2018/06/23/aws_s3_etags/ - let num_parts = body_list_of_parts.len(); - let mut etag_md5_hasher = Md5::new(); - for (_, etag) in version.parts_etags.items().iter() { - etag_md5_hasher.update(etag.as_bytes()); - } - let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts); - - // Calculate total size of final object - let total_size = version.blocks.items().iter().map(|x| x.1.size).sum(); - - // Write final object version - object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( - ObjectVersionMeta { - headers, - size: total_size, - etag: etag.clone(), - }, - version.blocks.items()[0].1.hash, - )); - - let final_object = Object::new(bucket_id, key.clone(), vec![object_version]); - garage.object_table.insert(&final_object).await?; - - // Send response saying ok we're done - let result = s3_xml::CompleteMultipartUploadResult { - xmlns: (), - location: None, - bucket: s3_xml::Value(bucket_name.to_string()), - key: s3_xml::Value(key), - etag: s3_xml::Value(format!("\"{}\"", etag)), - }; - let xml = s3_xml::to_xml_with_header(&result)?; - - Ok(Response::new(Body::from(xml.into_bytes()))) -} - -pub async fn handle_abort_multipart_upload( - garage: Arc, - bucket_id: Uuid, - key: &str, - upload_id: &str, -) -> Result, Error> { - let version_uuid = decode_upload_id(upload_id)?; - - let object = garage - .object_table - .get(&bucket_id, &key.to_string()) - .await?; - let object = object.ok_or(Error::NoSuchKey)?; - - let object_version = object - .versions() - .iter() - .find(|v| v.uuid == version_uuid && v.is_uploading()); - let mut object_version = match object_version { - None => return Err(Error::NoSuchUpload), - Some(x) => x.clone(), - }; - - object_version.state = ObjectVersionState::Aborted; - let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); - garage.object_table.insert(&final_object).await?; - - Ok(Response::new(Body::from(vec![]))) -} - -fn get_mime_type(headers: &HeaderMap) -> Result { - Ok(headers - .get(hyper::header::CONTENT_TYPE) - .map(|x| x.to_str()) - .unwrap_or(Ok("blob"))? - .to_string()) -} - -pub(crate) fn get_headers(headers: &HeaderMap) -> Result { - let content_type = get_mime_type(headers)?; - let mut other = BTreeMap::new(); - - // Preserve standard headers - let standard_header = vec![ - hyper::header::CACHE_CONTROL, - hyper::header::CONTENT_DISPOSITION, - hyper::header::CONTENT_ENCODING, - hyper::header::CONTENT_LANGUAGE, - hyper::header::EXPIRES, - ]; - for h in standard_header.iter() { - if let Some(v) = headers.get(h) { - match v.to_str() { - Ok(v_str) => { - other.insert(h.to_string(), v_str.to_string()); - } - Err(e) => { - warn!("Discarding header {}, error in .to_str(): {}", h, e); - } - } - } - } - - // Preserve x-amz-meta- headers - for (k, v) in headers.iter() { - if k.as_str().starts_with("x-amz-meta-") { - match v.to_str() { - Ok(v_str) => { - other.insert(k.to_string(), v_str.to_string()); - } - Err(e) => { - warn!("Discarding header {}, error in .to_str(): {}", k, e); - } - } - } - } - - Ok(ObjectVersionHeaders { - content_type, - other, - }) -} - -pub fn decode_upload_id(id: &str) -> Result { - let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?; - if id_bin.len() != 32 { - return Err(Error::NoSuchUpload); - } - let mut uuid = [0u8; 32]; - uuid.copy_from_slice(&id_bin[..]); - Ok(Uuid::from(uuid)) -} - -#[derive(Debug)] -struct CompleteMultipartUploadPart { - etag: String, - part_number: u64, -} - -fn parse_complete_multipart_upload_body( - xml: &roxmltree::Document, -) -> Option> { - let mut parts = vec![]; - - let root = xml.root(); - let cmu = root.first_child()?; - if !cmu.has_tag_name("CompleteMultipartUpload") { - return None; - } - - for item in cmu.children() { - // Only parse nodes - if !item.is_element() { - continue; - } - - if item.has_tag_name("Part") { - let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?; - let part_number = item - .children() - .find(|e| e.has_tag_name("PartNumber"))? - .text()?; - parts.push(CompleteMultipartUploadPart { - etag: etag.trim_matches('"').to_string(), - part_number: part_number.parse().ok()?, - }); - } else { - return None; - } - } - - Some(parts) -} diff --git a/src/api/s3_router.rs b/src/api/s3_router.rs deleted file mode 100644 index 95a7eceb..00000000 --- a/src/api/s3_router.rs +++ /dev/null @@ -1,1278 +0,0 @@ -use crate::error::{Error, OkOrBadRequest}; - -use std::borrow::Cow; - -use hyper::header::HeaderValue; -use hyper::{HeaderMap, Method, Request}; - -/// This macro is used to generate very repetitive match {} blocks in this module -/// It is _not_ made to be used anywhere else -macro_rules! s3_match { - (@match $enum:expr , [ $($endpoint:ident,)* ]) => {{ - // usage: s3_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] } - // returns true if the variant was one of the listed variants, false otherwise. - use Endpoint::*; - match $enum { - $( - $endpoint { .. } => true, - )* - _ => false - } - }}; - (@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{ - // usage: s3_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] } - // returns Some(field_value), or None if the variant was not one of the listed variants. - use Endpoint::*; - match $enum { - $( - $endpoint {$param, ..} => Some($param), - )* - _ => None - } - }}; - (@gen_parser ($keyword:expr, $key:expr, $query:expr, $header:expr), - key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*], - no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{ - // usage: s3_match {@gen_parser (keyword, key, query, header), - // key: [ - // SOME_KEYWORD => VariantWithKey, - // ... - // ], - // no_key: [ - // SOME_KEYWORD => VariantWithoutKey, - // ... - // ] - // } - // See in from_{method} for more detailed usage. - use Endpoint::*; - use keywords::*; - match ($keyword, !$key.is_empty()){ - $( - ($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k { - key: $key, - $($( - $param_k: s3_match!(@@parse_param $query, $conv_k, $param_k), - )*)? - }), - )* - $( - ($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk { - $($( - $param_nk: s3_match!(@@parse_param $query, $conv_nk, $param_nk), - )*)? - }), - )* - (kw, _) => Err(Error::BadRequest(format!("Invalid endpoint: {}", kw))) - } - }}; - - (@@parse_param $query:expr, query_opt, $param:ident) => {{ - // extract optional query parameter - $query.$param.take().map(|param| param.into_owned()) - }}; - (@@parse_param $query:expr, query, $param:ident) => {{ - // extract mendatory query parameter - $query.$param.take().ok_or_bad_request("Missing argument for endpoint")?.into_owned() - }}; - (@@parse_param $query:expr, opt_parse, $param:ident) => {{ - // extract and parse optional query parameter - // missing parameter is file, however parse error is reported as an error - $query.$param - .take() - .map(|param| param.parse()) - .transpose() - .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? - }}; - (@@parse_param $query:expr, parse, $param:ident) => {{ - // extract and parse mandatory query parameter - // both missing and un-parseable parameters are reported as errors - $query.$param.take().ok_or_bad_request("Missing argument for endpoint")? - .parse() - .map_err(|_| Error::BadRequest("Failed to parse query parameter".to_owned()))? - }}; - (@func - $(#[$doc:meta])* - pub enum Endpoint { - $( - $(#[$outer:meta])* - $variant:ident $({ - $($name:ident: $ty:ty,)* - })?, - )* - }) => { - $(#[$doc])* - pub enum Endpoint { - $( - $(#[$outer])* - $variant $({ - $($name: $ty, )* - })?, - )* - } - impl Endpoint { - pub fn name(&self) -> &'static str { - match self { - $(Endpoint::$variant $({ $($name: _,)* .. })? => stringify!($variant),)* - } - } - } - }; - (@if ($($cond:tt)+) then ($($then:tt)*) else ($($else:tt)*)) => { - $($then)* - }; - (@if () then ($($then:tt)*) else ($($else:tt)*)) => { - $($else)* - }; -} - -s3_match! {@func - -/// List of all S3 API endpoints. -/// -/// For each endpoint, it contains the parameters this endpoint receive by url (bucket, key and -/// query parameters). Parameters it may receive by header are left out, however headers are -/// considered when required to determine between one endpoint or another (for CopyObject and -/// UploadObject, for instance). -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Endpoint { - AbortMultipartUpload { - key: String, - upload_id: String, - }, - CompleteMultipartUpload { - key: String, - upload_id: String, - }, - CopyObject { - key: String, - }, - CreateBucket { - }, - CreateMultipartUpload { - key: String, - }, - DeleteBucket { - }, - DeleteBucketAnalyticsConfiguration { - id: String, - }, - DeleteBucketCors { - }, - DeleteBucketEncryption { - }, - DeleteBucketIntelligentTieringConfiguration { - id: String, - }, - DeleteBucketInventoryConfiguration { - id: String, - }, - DeleteBucketLifecycle { - }, - DeleteBucketMetricsConfiguration { - id: String, - }, - DeleteBucketOwnershipControls { - }, - DeleteBucketPolicy { - }, - DeleteBucketReplication { - }, - DeleteBucketTagging { - }, - DeleteBucketWebsite { - }, - DeleteObject { - key: String, - version_id: Option, - }, - DeleteObjects { - }, - DeleteObjectTagging { - key: String, - version_id: Option, - }, - DeletePublicAccessBlock { - }, - GetBucketAccelerateConfiguration { - }, - GetBucketAcl { - }, - GetBucketAnalyticsConfiguration { - id: String, - }, - GetBucketCors { - }, - GetBucketEncryption { - }, - GetBucketIntelligentTieringConfiguration { - id: String, - }, - GetBucketInventoryConfiguration { - id: String, - }, - GetBucketLifecycleConfiguration { - }, - GetBucketLocation { - }, - GetBucketLogging { - }, - GetBucketMetricsConfiguration { - id: String, - }, - GetBucketNotificationConfiguration { - }, - GetBucketOwnershipControls { - }, - GetBucketPolicy { - }, - GetBucketPolicyStatus { - }, - GetBucketReplication { - }, - GetBucketRequestPayment { - }, - GetBucketTagging { - }, - GetBucketVersioning { - }, - GetBucketWebsite { - }, - /// There are actually many more query parameters, used to add headers to the answer. They were - /// not added here as they are best handled in a dedicated route. - GetObject { - key: String, - part_number: Option, - version_id: Option, - }, - GetObjectAcl { - key: String, - version_id: Option, - }, - GetObjectLegalHold { - key: String, - version_id: Option, - }, - GetObjectLockConfiguration { - }, - GetObjectRetention { - key: String, - version_id: Option, - }, - GetObjectTagging { - key: String, - version_id: Option, - }, - GetObjectTorrent { - key: String, - }, - GetPublicAccessBlock { - }, - HeadBucket { - }, - HeadObject { - key: String, - part_number: Option, - version_id: Option, - }, - ListBucketAnalyticsConfigurations { - continuation_token: Option, - }, - ListBucketIntelligentTieringConfigurations { - continuation_token: Option, - }, - ListBucketInventoryConfigurations { - continuation_token: Option, - }, - ListBucketMetricsConfigurations { - continuation_token: Option, - }, - ListBuckets, - ListMultipartUploads { - delimiter: Option, - encoding_type: Option, - key_marker: Option, - max_uploads: Option, - prefix: Option, - upload_id_marker: Option, - }, - ListObjects { - delimiter: Option, - encoding_type: Option, - marker: Option, - max_keys: Option, - prefix: Option, - }, - ListObjectsV2 { - // This value should always be 2. It is not checked when constructing the struct - list_type: String, - continuation_token: Option, - delimiter: Option, - encoding_type: Option, - fetch_owner: Option, - max_keys: Option, - prefix: Option, - start_after: Option, - }, - ListObjectVersions { - delimiter: Option, - encoding_type: Option, - key_marker: Option, - max_keys: Option, - prefix: Option, - version_id_marker: Option, - }, - ListParts { - key: String, - max_parts: Option, - part_number_marker: Option, - upload_id: String, - }, - Options, - PutBucketAccelerateConfiguration { - }, - PutBucketAcl { - }, - PutBucketAnalyticsConfiguration { - id: String, - }, - PutBucketCors { - }, - PutBucketEncryption { - }, - PutBucketIntelligentTieringConfiguration { - id: String, - }, - PutBucketInventoryConfiguration { - id: String, - }, - PutBucketLifecycleConfiguration { - }, - PutBucketLogging { - }, - PutBucketMetricsConfiguration { - id: String, - }, - PutBucketNotificationConfiguration { - }, - PutBucketOwnershipControls { - }, - PutBucketPolicy { - }, - PutBucketReplication { - }, - PutBucketRequestPayment { - }, - PutBucketTagging { - }, - PutBucketVersioning { - }, - PutBucketWebsite { - }, - PutObject { - key: String, - }, - PutObjectAcl { - key: String, - version_id: Option, - }, - PutObjectLegalHold { - key: String, - version_id: Option, - }, - PutObjectLockConfiguration { - }, - PutObjectRetention { - key: String, - version_id: Option, - }, - PutObjectTagging { - key: String, - version_id: Option, - }, - PutPublicAccessBlock { - }, - RestoreObject { - key: String, - version_id: Option, - }, - SelectObjectContent { - key: String, - // This value should always be 2. It is not checked when constructing the struct - select_type: String, - }, - UploadPart { - key: String, - part_number: u64, - upload_id: String, - }, - UploadPartCopy { - key: String, - part_number: u64, - upload_id: String, - }, - // This endpoint is not documented with others because it has special use case : - // It's intended to be used with HTML forms, using a multipart/form-data body. - // It works a lot like presigned requests, but everything is in the form instead - // of being query parameters of the URL, so authenticating it is a bit different. - PostObject, -}} - -impl Endpoint { - /// Determine which S3 endpoint a request is for using the request, and a bucket which was - /// possibly extracted from the Host header. - /// Returns Self plus bucket name, if endpoint is not Endpoint::ListBuckets - pub fn from_request( - req: &Request, - bucket: Option, - ) -> Result<(Self, Option), Error> { - let uri = req.uri(); - let path = uri.path().trim_start_matches('/'); - let query = uri.query(); - if bucket.is_none() && path.is_empty() { - if *req.method() == Method::OPTIONS { - return Ok((Self::Options, None)); - } else { - return Ok((Self::ListBuckets, None)); - } - } - - let (bucket, key) = if let Some(bucket) = bucket { - (bucket, path) - } else { - path.split_once('/') - .map(|(b, p)| (b.to_owned(), p.trim_start_matches('/'))) - .unwrap_or((path.to_owned(), "")) - }; - - if *req.method() == Method::OPTIONS { - return Ok((Self::Options, Some(bucket))); - } - - let key = percent_encoding::percent_decode_str(key) - .decode_utf8()? - .into_owned(); - - let mut query = QueryParameters::from_query(query.unwrap_or_default())?; - - let res = match *req.method() { - Method::GET => Self::from_get(key, &mut query)?, - Method::HEAD => Self::from_head(key, &mut query)?, - Method::POST => Self::from_post(key, &mut query)?, - Method::PUT => Self::from_put(key, &mut query, req.headers())?, - Method::DELETE => Self::from_delete(key, &mut query)?, - _ => return Err(Error::BadRequest("Unknown method".to_owned())), - }; - - if let Some(message) = query.nonempty_message() { - debug!("Unused query parameter: {}", message) - } - Ok((res, Some(bucket))) - } - - /// Determine which endpoint a request is for, knowing it is a GET. - fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result { - s3_match! { - @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), - key: [ - EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker), - EMPTY => GetObject (query_opt::version_id, opt_parse::part_number), - ACL => GetObjectAcl (query_opt::version_id), - LEGAL_HOLD => GetObjectLegalHold (query_opt::version_id), - RETENTION => GetObjectRetention (query_opt::version_id), - TAGGING => GetObjectTagging (query_opt::version_id), - TORRENT => GetObjectTorrent, - ], - no_key: [ - EMPTY if list_type => ListObjectsV2 (query::list_type, query_opt::continuation_token, - opt_parse::delimiter, query_opt::encoding_type, - opt_parse::fetch_owner, opt_parse::max_keys, - query_opt::prefix, query_opt::start_after), - EMPTY => ListObjects (opt_parse::delimiter, query_opt::encoding_type, query_opt::marker, - opt_parse::max_keys, opt_parse::prefix), - ACCELERATE => GetBucketAccelerateConfiguration, - ACL => GetBucketAcl, - ANALYTICS if id => GetBucketAnalyticsConfiguration (query::id), - ANALYTICS => ListBucketAnalyticsConfigurations (query_opt::continuation_token), - CORS => GetBucketCors, - ENCRYPTION => GetBucketEncryption, - INTELLIGENT_TIERING if id => GetBucketIntelligentTieringConfiguration (query::id), - INTELLIGENT_TIERING => ListBucketIntelligentTieringConfigurations (query_opt::continuation_token), - INVENTORY if id => GetBucketInventoryConfiguration (query::id), - INVENTORY => ListBucketInventoryConfigurations (query_opt::continuation_token), - LIFECYCLE => GetBucketLifecycleConfiguration, - LOCATION => GetBucketLocation, - LOGGING => GetBucketLogging, - METRICS if id => GetBucketMetricsConfiguration (query::id), - METRICS => ListBucketMetricsConfigurations (query_opt::continuation_token), - NOTIFICATION => GetBucketNotificationConfiguration, - OBJECT_LOCK => GetObjectLockConfiguration, - OWNERSHIP_CONTROLS => GetBucketOwnershipControls, - POLICY => GetBucketPolicy, - POLICY_STATUS => GetBucketPolicyStatus, - PUBLIC_ACCESS_BLOCK => GetPublicAccessBlock, - REPLICATION => GetBucketReplication, - REQUEST_PAYMENT => GetBucketRequestPayment, - TAGGING => GetBucketTagging, - UPLOADS => ListMultipartUploads (opt_parse::delimiter, query_opt::encoding_type, - query_opt::key_marker, opt_parse::max_uploads, - query_opt::prefix, query_opt::upload_id_marker), - VERSIONING => GetBucketVersioning, - VERSIONS => ListObjectVersions (opt_parse::delimiter, query_opt::encoding_type, - query_opt::key_marker, opt_parse::max_keys, - query_opt::prefix, query_opt::version_id_marker), - WEBSITE => GetBucketWebsite, - ] - } - } - - /// Determine which endpoint a request is for, knowing it is a HEAD. - fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result { - s3_match! { - @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), - key: [ - EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), - ], - no_key: [ - EMPTY => HeadBucket, - ] - } - } - - /// Determine which endpoint a request is for, knowing it is a POST. - fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result { - s3_match! { - @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), - key: [ - EMPTY if upload_id => CompleteMultipartUpload (query::upload_id), - RESTORE => RestoreObject (query_opt::version_id), - SELECT => SelectObjectContent (query::select_type), - UPLOADS => CreateMultipartUpload, - ], - no_key: [ - EMPTY => PostObject, - DELETE => DeleteObjects, - ] - } - } - - /// Determine which endpoint a request is for, knowing it is a PUT. - fn from_put( - key: String, - query: &mut QueryParameters<'_>, - headers: &HeaderMap, - ) -> Result { - s3_match! { - @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, headers), - key: [ - EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), - EMPTY header "x-amz-copy-source" => CopyObject, - EMPTY if part_number => UploadPart (parse::part_number, query::upload_id), - EMPTY => PutObject, - ACL => PutObjectAcl (query_opt::version_id), - LEGAL_HOLD => PutObjectLegalHold (query_opt::version_id), - RETENTION => PutObjectRetention (query_opt::version_id), - TAGGING => PutObjectTagging (query_opt::version_id), - - ], - no_key: [ - EMPTY => CreateBucket, - ACCELERATE => PutBucketAccelerateConfiguration, - ACL => PutBucketAcl, - ANALYTICS => PutBucketAnalyticsConfiguration (query::id), - CORS => PutBucketCors, - ENCRYPTION => PutBucketEncryption, - INTELLIGENT_TIERING => PutBucketIntelligentTieringConfiguration(query::id), - INVENTORY => PutBucketInventoryConfiguration(query::id), - LIFECYCLE => PutBucketLifecycleConfiguration, - LOGGING => PutBucketLogging, - METRICS => PutBucketMetricsConfiguration(query::id), - NOTIFICATION => PutBucketNotificationConfiguration, - OBJECT_LOCK => PutObjectLockConfiguration, - OWNERSHIP_CONTROLS => PutBucketOwnershipControls, - POLICY => PutBucketPolicy, - PUBLIC_ACCESS_BLOCK => PutPublicAccessBlock, - REPLICATION => PutBucketReplication, - REQUEST_PAYMENT => PutBucketRequestPayment, - TAGGING => PutBucketTagging, - VERSIONING => PutBucketVersioning, - WEBSITE => PutBucketWebsite, - ] - } - } - - /// Determine which endpoint a request is for, knowing it is a DELETE. - fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result { - s3_match! { - @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), - key: [ - EMPTY if upload_id => AbortMultipartUpload (query::upload_id), - EMPTY => DeleteObject (query_opt::version_id), - TAGGING => DeleteObjectTagging (query_opt::version_id), - ], - no_key: [ - EMPTY => DeleteBucket, - ANALYTICS => DeleteBucketAnalyticsConfiguration (query::id), - CORS => DeleteBucketCors, - ENCRYPTION => DeleteBucketEncryption, - INTELLIGENT_TIERING => DeleteBucketIntelligentTieringConfiguration (query::id), - INVENTORY => DeleteBucketInventoryConfiguration (query::id), - LIFECYCLE => DeleteBucketLifecycle, - METRICS => DeleteBucketMetricsConfiguration (query::id), - OWNERSHIP_CONTROLS => DeleteBucketOwnershipControls, - POLICY => DeleteBucketPolicy, - PUBLIC_ACCESS_BLOCK => DeletePublicAccessBlock, - REPLICATION => DeleteBucketReplication, - TAGGING => DeleteBucketTagging, - WEBSITE => DeleteBucketWebsite, - ] - } - } - - /// Get the key the request target. Returns None for requests which don't use a key. - #[allow(dead_code)] - pub fn get_key(&self) -> Option<&str> { - s3_match! { - @extract - self, - key, - [ - AbortMultipartUpload, - CompleteMultipartUpload, - CopyObject, - CreateMultipartUpload, - DeleteObject, - DeleteObjectTagging, - GetObject, - GetObjectAcl, - GetObjectLegalHold, - GetObjectRetention, - GetObjectTagging, - GetObjectTorrent, - HeadObject, - ListParts, - PutObject, - PutObjectAcl, - PutObjectLegalHold, - PutObjectRetention, - PutObjectTagging, - RestoreObject, - SelectObjectContent, - UploadPart, - UploadPartCopy, - ] - } - } - - /// Get the kind of authorization which is required to perform the operation. - pub fn authorization_type(&self) -> Authorization { - if let Endpoint::ListBuckets = self { - return Authorization::None; - }; - let readonly = s3_match! { - @match - self, - [ - GetBucketAccelerateConfiguration, - GetBucketAcl, - GetBucketAnalyticsConfiguration, - GetBucketEncryption, - GetBucketIntelligentTieringConfiguration, - GetBucketInventoryConfiguration, - GetBucketLifecycleConfiguration, - GetBucketLocation, - GetBucketLogging, - GetBucketMetricsConfiguration, - GetBucketNotificationConfiguration, - GetBucketOwnershipControls, - GetBucketPolicy, - GetBucketPolicyStatus, - GetBucketReplication, - GetBucketRequestPayment, - GetBucketTagging, - GetBucketVersioning, - GetObject, - GetObjectAcl, - GetObjectLegalHold, - GetObjectLockConfiguration, - GetObjectRetention, - GetObjectTagging, - GetObjectTorrent, - GetPublicAccessBlock, - HeadBucket, - HeadObject, - ListBucketAnalyticsConfigurations, - ListBucketIntelligentTieringConfigurations, - ListBucketInventoryConfigurations, - ListBucketMetricsConfigurations, - ListMultipartUploads, - ListObjects, - ListObjectsV2, - ListObjectVersions, - ListParts, - SelectObjectContent, - ] - }; - let owner = s3_match! { - @match - self, - [ - DeleteBucket, - GetBucketWebsite, - PutBucketWebsite, - DeleteBucketWebsite, - GetBucketCors, - PutBucketCors, - DeleteBucketCors, - ] - }; - if readonly { - Authorization::Read - } else if owner { - Authorization::Owner - } else { - Authorization::Write - } - } -} - -/// What kind of authorization is required to perform a given action -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Authorization { - /// No authorization is required - None, - /// Having Read permission on bucket - Read, - /// Having Write permission on bucket - Write, - /// Having Owner permission on bucket - Owner, -} - -/// This macro is used to generate part of the code in this module. It must be called only one, and -/// is useless outside of this module. -macro_rules! generateQueryParameters { - ( $($rest:expr => $name:ident),* ) => { - /// Struct containing all query parameters used in endpoints. Think of it as an HashMap, - /// but with keys statically known. - #[derive(Debug, Default)] - struct QueryParameters<'a> { - keyword: Option>, - $( - $name: Option>, - )* - } - - impl<'a> QueryParameters<'a> { - /// Build this struct from the query part of an URI. - fn from_query(query: &'a str) -> Result { - let mut res: Self = Default::default(); - for (k, v) in url::form_urlencoded::parse(query.as_bytes()) { - let repeated = match k.as_ref() { - $( - $rest => if !v.is_empty() { - res.$name.replace(v).is_some() - } else { - false - }, - )* - _ => { - if k.starts_with("response-") || k.starts_with("X-Amz-") { - false - } else if v.as_ref().is_empty() { - if res.keyword.replace(k).is_some() { - return Err(Error::BadRequest("Multiple keywords".to_owned())); - } - continue; - } else { - debug!("Received an unknown query parameter: '{}'", k); - false - } - } - }; - if repeated { - return Err(Error::BadRequest(format!( - "Query parameter repeated: '{}'", - k - ))); - } - } - Ok(res) - } - - /// Get an error message in case not all parameters where used when extracting them to - /// build an Enpoint variant - fn nonempty_message(&self) -> Option<&str> { - if self.keyword.is_some() { - Some("Keyword not used") - } $( - else if self.$name.is_some() { - Some(concat!("'", $rest, "'")) - } - )* else { - None - } - } - } - } -} - -// parameter name => struct field -generateQueryParameters! { - "continuation-token" => continuation_token, - "delimiter" => delimiter, - "encoding-type" => encoding_type, - "fetch-owner" => fetch_owner, - "id" => id, - "key-marker" => key_marker, - "list-type" => list_type, - "marker" => marker, - "max-keys" => max_keys, - "max-parts" => max_parts, - "max-uploads" => max_uploads, - "partNumber" => part_number, - "part-number-marker" => part_number_marker, - "prefix" => prefix, - "select-type" => select_type, - "start-after" => start_after, - "uploadId" => upload_id, - "upload-id-marker" => upload_id_marker, - "versionId" => version_id, - "version-id-marker" => version_id_marker -} - -mod keywords { - //! This module contain all query parameters with no associated value S3 uses to differentiate - //! endpoints. - pub const EMPTY: &str = ""; - - pub const ACCELERATE: &str = "accelerate"; - pub const ACL: &str = "acl"; - pub const ANALYTICS: &str = "analytics"; - pub const CORS: &str = "cors"; - pub const DELETE: &str = "delete"; - pub const ENCRYPTION: &str = "encryption"; - pub const INTELLIGENT_TIERING: &str = "intelligent-tiering"; - pub const INVENTORY: &str = "inventory"; - pub const LEGAL_HOLD: &str = "legal-hold"; - pub const LIFECYCLE: &str = "lifecycle"; - pub const LOCATION: &str = "location"; - pub const LOGGING: &str = "logging"; - pub const METRICS: &str = "metrics"; - pub const NOTIFICATION: &str = "notification"; - pub const OBJECT_LOCK: &str = "object-lock"; - pub const OWNERSHIP_CONTROLS: &str = "ownershipControls"; - pub const POLICY: &str = "policy"; - pub const POLICY_STATUS: &str = "policyStatus"; - pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock"; - pub const REPLICATION: &str = "replication"; - pub const REQUEST_PAYMENT: &str = "requestPayment"; - pub const RESTORE: &str = "restore"; - pub const RETENTION: &str = "retention"; - pub const SELECT: &str = "select"; - pub const TAGGING: &str = "tagging"; - pub const TORRENT: &str = "torrent"; - pub const UPLOADS: &str = "uploads"; - pub const VERSIONING: &str = "versioning"; - pub const VERSIONS: &str = "versions"; - pub const WEBSITE: &str = "website"; -} - -#[cfg(test)] -mod tests { - use super::*; - - fn parse( - method: &str, - uri: &str, - bucket: Option, - header: Option<(&str, &str)>, - ) -> (Endpoint, Option) { - let mut req = Request::builder().method(method).uri(uri); - if let Some((k, v)) = header { - req = req.header(k, v) - } - let req = req.body(()).unwrap(); - - Endpoint::from_request(&req, bucket).unwrap() - } - - macro_rules! test_cases { - ($($method:ident $uri:expr => $variant:ident )*) => {{ - $( - assert!( - matches!( - parse(test_cases!{@actual_method $method}, $uri, Some("my_bucket".to_owned()), None).0, - Endpoint::$variant { .. } - ) - ); - assert!( - matches!( - parse(test_cases!{@actual_method $method}, concat!("/my_bucket", $uri), None, None).0, - Endpoint::$variant { .. } - ) - ); - - test_cases!{@auth $method $uri} - )* - }}; - - (@actual_method HEAD) => {{ "HEAD" }}; - (@actual_method GET) => {{ "GET" }}; - (@actual_method OWNER_GET) => {{ "GET" }}; - (@actual_method PUT) => {{ "PUT" }}; - (@actual_method OWNER_PUT) => {{ "PUT" }}; - (@actual_method POST) => {{ "POST" }}; - (@actual_method DELETE) => {{ "DELETE" }}; - (@actual_method OWNER_DELETE) => {{ "DELETE" }}; - - (@auth HEAD $uri:expr) => {{ - assert_eq!(parse("HEAD", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Read) - }}; - (@auth GET $uri:expr) => {{ - assert_eq!(parse("GET", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Read) - }}; - (@auth OWNER_GET $uri:expr) => {{ - assert_eq!(parse("GET", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Owner) - }}; - (@auth PUT $uri:expr) => {{ - assert_eq!(parse("PUT", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Write) - }}; - (@auth OWNER_PUT $uri:expr) => {{ - assert_eq!(parse("PUT", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Owner) - }}; - (@auth POST $uri:expr) => {{ - assert_eq!(parse("POST", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Write) - }}; - (@auth DELETE $uri:expr) => {{ - assert_eq!(parse("DELETE", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Write) - }}; - (@auth OWNER_DELETE $uri:expr) => {{ - assert_eq!(parse("DELETE", concat!("/my_bucket", $uri), None, None).0.authorization_type(), - Authorization::Owner) - }}; - } - - #[test] - fn test_bucket_extraction() { - assert_eq!( - parse("GET", "/my/key", Some("my_bucket".to_owned()), None).1, - parse("GET", "/my_bucket/my/key", None, None).1 - ); - assert_eq!( - parse("GET", "/my_bucket/my/key", None, None).1.unwrap(), - "my_bucket" - ); - assert!(parse("GET", "/", None, None).1.is_none()); - } - - #[test] - fn test_key() { - assert_eq!( - parse("GET", "/my/key", Some("my_bucket".to_owned()), None) - .0 - .get_key(), - parse("GET", "/my_bucket/my/key", None, None).0.get_key() - ); - assert_eq!( - parse("GET", "/my_bucket/my/key", None, None) - .0 - .get_key() - .unwrap(), - "my/key" - ); - assert_eq!( - parse("GET", "/my_bucket/my/key?acl", None, None) - .0 - .get_key() - .unwrap(), - "my/key" - ); - assert!(parse("GET", "/my_bucket/?list-type=2", None, None) - .0 - .get_key() - .is_none()); - - assert_eq!( - parse("GET", "/my_bucket/%26%2B%3F%25%C3%A9/something", None, None) - .0 - .get_key() - .unwrap(), - "&+?%é/something" - ); - - /* - * this case is failing. We should verify how clients encode space in url - assert_eq!( - parse("GET", "/my_bucket/+", None, None).get_key().unwrap(), - " "); - */ - } - - #[test] - fn invalid_endpoint() { - let req = Request::builder() - .method("GET") - .uri("/bucket/key?website") - .body(()) - .unwrap(); - - assert!(Endpoint::from_request(&req, None).is_err()) - } - - #[test] - fn test_aws_doc_examples() { - test_cases!( - DELETE "/example-object?uploadId=VXBsb2FkIElEIGZvciBlbHZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZ" => AbortMultipartUpload - DELETE "/Key+?uploadId=UploadId" => AbortMultipartUpload - POST "/example-object?uploadId=AAAsb2FkIElEIGZvciBlbHZpbmcncyWeeS1tb3ZpZS5tMnRzIRRwbG9hZA" => CompleteMultipartUpload - POST "/Key+?uploadId=UploadId" => CompleteMultipartUpload - PUT "/" => CreateBucket - POST "/example-object?uploads" => CreateMultipartUpload - POST "/{Key+}?uploads" => CreateMultipartUpload - OWNER_DELETE "/" => DeleteBucket - DELETE "/?analytics&id=list1" => DeleteBucketAnalyticsConfiguration - DELETE "/?analytics&id=Id" => DeleteBucketAnalyticsConfiguration - OWNER_DELETE "/?cors" => DeleteBucketCors - DELETE "/?encryption" => DeleteBucketEncryption - DELETE "/?intelligent-tiering&id=Id" => DeleteBucketIntelligentTieringConfiguration - DELETE "/?inventory&id=list1" => DeleteBucketInventoryConfiguration - DELETE "/?inventory&id=Id" => DeleteBucketInventoryConfiguration - DELETE "/?lifecycle" => DeleteBucketLifecycle - DELETE "/?metrics&id=ExampleMetrics" => DeleteBucketMetricsConfiguration - DELETE "/?metrics&id=Id" => DeleteBucketMetricsConfiguration - DELETE "/?ownershipControls" => DeleteBucketOwnershipControls - DELETE "/?policy" => DeleteBucketPolicy - DELETE "/?replication" => DeleteBucketReplication - DELETE "/?tagging" => DeleteBucketTagging - OWNER_DELETE "/?website" => DeleteBucketWebsite - DELETE "/my-second-image.jpg" => DeleteObject - DELETE "/my-third-image.jpg?versionId=UIORUnfndfiufdisojhr398493jfdkjFJjkndnqUifhnw89493jJFJ" => DeleteObject - DELETE "/Key+?versionId=VersionId" => DeleteObject - POST "/?delete" => DeleteObjects - DELETE "/exampleobject?tagging" => DeleteObjectTagging - DELETE "/{Key+}?tagging&versionId=VersionId" => DeleteObjectTagging - DELETE "/?publicAccessBlock" => DeletePublicAccessBlock - GET "/?accelerate" => GetBucketAccelerateConfiguration - GET "/?acl" => GetBucketAcl - GET "/?analytics&id=Id" => GetBucketAnalyticsConfiguration - OWNER_GET "/?cors" => GetBucketCors - GET "/?encryption" => GetBucketEncryption - GET "/?intelligent-tiering&id=Id" => GetBucketIntelligentTieringConfiguration - GET "/?inventory&id=list1" => GetBucketInventoryConfiguration - GET "/?inventory&id=Id" => GetBucketInventoryConfiguration - GET "/?lifecycle" => GetBucketLifecycleConfiguration - GET "/?location" => GetBucketLocation - GET "/?logging" => GetBucketLogging - GET "/?metrics&id=Documents" => GetBucketMetricsConfiguration - GET "/?metrics&id=Id" => GetBucketMetricsConfiguration - GET "/?notification" => GetBucketNotificationConfiguration - GET "/?ownershipControls" => GetBucketOwnershipControls - GET "/?policy" => GetBucketPolicy - GET "/?policyStatus" => GetBucketPolicyStatus - GET "/?replication" => GetBucketReplication - GET "/?requestPayment" => GetBucketRequestPayment - GET "/?tagging" => GetBucketTagging - GET "/?versioning" => GetBucketVersioning - OWNER_GET "/?website" => GetBucketWebsite - GET "/my-image.jpg" => GetObject - GET "/myObject?versionId=3/L4kqtJlcpXroDTDmpUMLUo" => GetObject - GET "/Junk3.txt?response-cache-control=No-cache&response-content-disposition=attachment%3B%20filename%3Dtesting.txt&response-content-encoding=x-gzip&response-content-language=mi%2C%20en&response-expires=Thu%2C%2001%20Dec%201994%2016:00:00%20GMT" => GetObject - GET "/Key+?partNumber=1&response-cache-control=ResponseCacheControl&response-content-disposition=ResponseContentDisposition&response-content-encoding=ResponseContentEncoding&response-content-language=ResponseContentLanguage&response-content-type=ResponseContentType&response-expires=ResponseExpires&versionId=VersionId" => GetObject - GET "/my-image.jpg?acl" => GetObjectAcl - GET "/my-image.jpg?versionId=3/L4kqtJlcpXroDVBH40Nr8X8gdRQBpUMLUo&acl" => GetObjectAcl - GET "/{Key+}?acl&versionId=VersionId" => GetObjectAcl - GET "/{Key+}?legal-hold&versionId=VersionId" => GetObjectLegalHold - GET "/?object-lock" => GetObjectLockConfiguration - GET "/{Key+}?retention&versionId=VersionId" => GetObjectRetention - GET "/example-object?tagging" => GetObjectTagging - GET "/{Key+}?tagging&versionId=VersionId" => GetObjectTagging - GET "/quotes/Nelson?torrent" => GetObjectTorrent - GET "/{Key+}?torrent" => GetObjectTorrent - GET "/?publicAccessBlock" => GetPublicAccessBlock - HEAD "/" => HeadBucket - HEAD "/my-image.jpg" => HeadObject - HEAD "/my-image.jpg?versionId=3HL4kqCxf3vjVBH40Nrjfkd" => HeadObject - HEAD "/Key+?partNumber=3&versionId=VersionId" => HeadObject - GET "/?analytics" => ListBucketAnalyticsConfigurations - GET "/?analytics&continuation-token=ContinuationToken" => ListBucketAnalyticsConfigurations - GET "/?intelligent-tiering" => ListBucketIntelligentTieringConfigurations - GET "/?intelligent-tiering&continuation-token=ContinuationToken" => ListBucketIntelligentTieringConfigurations - GET "/?inventory" => ListBucketInventoryConfigurations - GET "/?inventory&continuation-token=ContinuationToken" => ListBucketInventoryConfigurations - GET "/?metrics" => ListBucketMetricsConfigurations - GET "/?metrics&continuation-token=ContinuationToken" => ListBucketMetricsConfigurations - GET "/?uploads&max-uploads=3" => ListMultipartUploads - GET "/?uploads&delimiter=/" => ListMultipartUploads - GET "/?uploads&delimiter=/&prefix=photos/2006/" => ListMultipartUploads - GET "/?uploads&delimiter=D&encoding-type=EncodingType&key-marker=KeyMarker&max-uploads=1&prefix=Prefix&upload-id-marker=UploadIdMarker" => ListMultipartUploads - GET "/" => ListObjects - GET "/?prefix=N&marker=Ned&max-keys=40" => ListObjects - GET "/?delimiter=/" => ListObjects - GET "/?prefix=photos/2006/&delimiter=/" => ListObjects - - GET "/?delimiter=D&encoding-type=EncodingType&marker=Marker&max-keys=1&prefix=Prefix" => ListObjects - GET "/?list-type=2" => ListObjectsV2 - GET "/?list-type=2&max-keys=3&prefix=E&start-after=ExampleGuide.pdf" => ListObjectsV2 - GET "/?list-type=2&delimiter=/" => ListObjectsV2 - GET "/?list-type=2&prefix=photos/2006/&delimiter=/" => ListObjectsV2 - GET "/?list-type=2" => ListObjectsV2 - GET "/?list-type=2&continuation-token=1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=" => ListObjectsV2 - GET "/?list-type=2&continuation-token=ContinuationToken&delimiter=D&encoding-type=EncodingType&fetch-owner=true&max-keys=1&prefix=Prefix&start-after=StartAfter" => ListObjectsV2 - GET "/?versions" => ListObjectVersions - GET "/?versions&key-marker=key2" => ListObjectVersions - GET "/?versions&key-marker=key3&version-id-marker=t46ZenlYTZBnj" => ListObjectVersions - GET "/?versions&key-marker=key3&version-id-marker=t46Z0menlYTZBnj&max-keys=3" => ListObjectVersions - GET "/?versions&delimiter=/" => ListObjectVersions - GET "/?versions&prefix=photos/2006/&delimiter=/" => ListObjectVersions - GET "/?versions&delimiter=D&encoding-type=EncodingType&key-marker=KeyMarker&max-keys=2&prefix=Prefix&version-id-marker=VersionIdMarker" => ListObjectVersions - GET "/example-object?uploadId=XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA&max-parts=2&part-number-marker=1" => ListParts - GET "/Key+?max-parts=2&part-number-marker=2&uploadId=UploadId" => ListParts - PUT "/?accelerate" => PutBucketAccelerateConfiguration - PUT "/?acl" => PutBucketAcl - PUT "/?analytics&id=report1" => PutBucketAnalyticsConfiguration - PUT "/?analytics&id=Id" => PutBucketAnalyticsConfiguration - OWNER_PUT "/?cors" => PutBucketCors - PUT "/?encryption" => PutBucketEncryption - PUT "/?intelligent-tiering&id=Id" => PutBucketIntelligentTieringConfiguration - PUT "/?inventory&id=report1" => PutBucketInventoryConfiguration - PUT "/?inventory&id=Id" => PutBucketInventoryConfiguration - PUT "/?lifecycle" => PutBucketLifecycleConfiguration - PUT "/?logging" => PutBucketLogging - PUT "/?metrics&id=EntireBucket" => PutBucketMetricsConfiguration - PUT "/?metrics&id=Id" => PutBucketMetricsConfiguration - PUT "/?notification" => PutBucketNotificationConfiguration - PUT "/?ownershipControls" => PutBucketOwnershipControls - PUT "/?policy" => PutBucketPolicy - PUT "/?replication" => PutBucketReplication - PUT "/?requestPayment" => PutBucketRequestPayment - PUT "/?tagging" => PutBucketTagging - PUT "/?versioning" => PutBucketVersioning - OWNER_PUT "/?website" => PutBucketWebsite - PUT "/my-image.jpg" => PutObject - PUT "/Key+" => PutObject - PUT "/my-image.jpg?acl" => PutObjectAcl - PUT "/my-image.jpg?acl&versionId=3HL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nrjfkd" => PutObjectAcl - PUT "/{Key+}?acl&versionId=VersionId" => PutObjectAcl - PUT "/{Key+}?legal-hold&versionId=VersionId" => PutObjectLegalHold - PUT "/?object-lock" => PutObjectLockConfiguration - PUT "/{Key+}?retention&versionId=VersionId" => PutObjectRetention - PUT "/object-key?tagging" => PutObjectTagging - PUT "/{Key+}?tagging&versionId=VersionId" => PutObjectTagging - PUT "/?publicAccessBlock" => PutPublicAccessBlock - POST "/object-one.csv?restore" => RestoreObject - POST "/{Key+}?restore&versionId=VersionId" => RestoreObject - PUT "/my-movie.m2ts?partNumber=1&uploadId=VCVsb2FkIElEIGZvciBlbZZpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZR" => UploadPart - PUT "/Key+?partNumber=2&uploadId=UploadId" => UploadPart - POST "/" => PostObject - ); - // no bucket, won't work with the rest of the test suite - assert!(matches!( - parse("GET", "/", None, None).0, - Endpoint::ListBuckets { .. } - )); - assert!(matches!( - parse("GET", "/", None, None).0.authorization_type(), - Authorization::None - )); - - // require a header - assert!(matches!( - parse( - "PUT", - "/Key+", - Some("my_bucket".to_owned()), - Some(("x-amz-copy-source", "some/key")) - ) - .0, - Endpoint::CopyObject { .. } - )); - assert!(matches!( - parse( - "PUT", - "/my_bucket/Key+", - None, - Some(("x-amz-copy-source", "some/key")) - ) - .0, - Endpoint::CopyObject { .. } - )); - assert!(matches!( - parse( - "PUT", - "/my_bucket/Key+", - None, - Some(("x-amz-copy-source", "some/key")) - ) - .0 - .authorization_type(), - Authorization::Write - )); - - // require a header - assert!(matches!( - parse( - "PUT", - "/Key+?partNumber=2&uploadId=UploadId", - Some("my_bucket".to_owned()), - Some(("x-amz-copy-source", "some/key")) - ) - .0, - Endpoint::UploadPartCopy { .. } - )); - assert!(matches!( - parse( - "PUT", - "/my_bucket/Key+?partNumber=2&uploadId=UploadId", - None, - Some(("x-amz-copy-source", "some/key")) - ) - .0, - Endpoint::UploadPartCopy { .. } - )); - assert!(matches!( - parse( - "PUT", - "/my_bucket/Key+?partNumber=2&uploadId=UploadId", - None, - Some(("x-amz-copy-source", "some/key")) - ) - .0 - .authorization_type(), - Authorization::Write - )); - - // POST request, but with GET semantic for permissions purpose - assert!(matches!( - parse( - "POST", - "/{Key+}?select&select-type=2", - Some("my_bucket".to_owned()), - None - ) - .0, - Endpoint::SelectObjectContent { .. } - )); - assert!(matches!( - parse("POST", "/my_bucket/{Key+}?select&select-type=2", None, None).0, - Endpoint::SelectObjectContent { .. } - )); - assert!(matches!( - parse("POST", "/my_bucket/{Key+}?select&select-type=2", None, None) - .0 - .authorization_type(), - Authorization::Read - )); - } -} diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs deleted file mode 100644 index b464dd45..00000000 --- a/src/api/s3_website.rs +++ /dev/null @@ -1,369 +0,0 @@ -use quick_xml::de::from_reader; -use std::sync::Arc; - -use hyper::{Body, Request, Response, StatusCode}; -use serde::{Deserialize, Serialize}; - -use crate::error::*; -use crate::s3_xml::{to_xml_with_header, xmlns_tag, IntValue, Value}; -use crate::signature::verify_signed_content; - -use garage_model::bucket_table::*; -use garage_model::garage::Garage; -use garage_table::*; -use garage_util::data::*; - -pub async fn handle_get_website(bucket: &Bucket) -> Result, Error> { - let param = bucket - .params() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - if let Some(website) = param.website_config.get() { - let wc = WebsiteConfiguration { - xmlns: (), - error_document: website.error_document.as_ref().map(|v| Key { - key: Value(v.to_string()), - }), - index_document: Some(Suffix { - suffix: Value(website.index_document.to_string()), - }), - redirect_all_requests_to: None, - routing_rules: None, - }; - let xml = to_xml_with_header(&wc)?; - Ok(Response::builder() - .status(StatusCode::OK) - .header(http::header::CONTENT_TYPE, "application/xml") - .body(Body::from(xml))?) - } else { - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) - } -} - -pub async fn handle_delete_website( - garage: Arc, - bucket_id: Uuid, -) -> Result, Error> { - let mut bucket = garage - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .ok_or(Error::NoSuchBucket)?; - - let param = bucket - .params_mut() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - param.website_config.update(None); - garage.bucket_table.insert(&bucket).await?; - - Ok(Response::builder() - .status(StatusCode::NO_CONTENT) - .body(Body::empty())?) -} - -pub async fn handle_put_website( - garage: Arc, - bucket_id: Uuid, - req: Request, - content_sha256: Option, -) -> Result, Error> { - let body = hyper::body::to_bytes(req.into_body()).await?; - - if let Some(content_sha256) = content_sha256 { - verify_signed_content(content_sha256, &body[..])?; - } - - let mut bucket = garage - .bucket_table - .get(&EmptyKey, &bucket_id) - .await? - .ok_or(Error::NoSuchBucket)?; - - let param = bucket - .params_mut() - .ok_or_internal_error("Bucket should not be deleted at this point")?; - - let conf: WebsiteConfiguration = from_reader(&body as &[u8])?; - conf.validate()?; - - param - .website_config - .update(Some(conf.into_garage_website_config()?)); - garage.bucket_table.insert(&bucket).await?; - - Ok(Response::builder() - .status(StatusCode::OK) - .body(Body::empty())?) -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct WebsiteConfiguration { - #[serde(serialize_with = "xmlns_tag", skip_deserializing)] - pub xmlns: (), - #[serde(rename = "ErrorDocument")] - pub error_document: Option, - #[serde(rename = "IndexDocument")] - pub index_document: Option, - #[serde(rename = "RedirectAllRequestsTo")] - pub redirect_all_requests_to: Option, - #[serde(rename = "RoutingRules")] - pub routing_rules: Option>, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct RoutingRule { - #[serde(rename = "RoutingRule")] - pub inner: RoutingRuleInner, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct RoutingRuleInner { - #[serde(rename = "Condition")] - pub condition: Option, - #[serde(rename = "Redirect")] - pub redirect: Redirect, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Key { - #[serde(rename = "Key")] - pub key: Value, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Suffix { - #[serde(rename = "Suffix")] - pub suffix: Value, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Target { - #[serde(rename = "HostName")] - pub hostname: Value, - #[serde(rename = "Protocol")] - pub protocol: Option, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Condition { - #[serde(rename = "HttpErrorCodeReturnedEquals")] - pub http_error_code: Option, - #[serde(rename = "KeyPrefixEquals")] - pub prefix: Option, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Redirect { - #[serde(rename = "HostName")] - pub hostname: Option, - #[serde(rename = "Protocol")] - pub protocol: Option, - #[serde(rename = "HttpRedirectCode")] - pub http_redirect_code: Option, - #[serde(rename = "ReplaceKeyPrefixWith")] - pub replace_prefix: Option, - #[serde(rename = "ReplaceKeyWith")] - pub replace_full: Option, -} - -impl WebsiteConfiguration { - pub fn validate(&self) -> Result<(), Error> { - if self.redirect_all_requests_to.is_some() - && (self.error_document.is_some() - || self.index_document.is_some() - || self.routing_rules.is_some()) - { - return Err(Error::BadRequest( - "Bad XML: can't have RedirectAllRequestsTo and other fields".to_owned(), - )); - } - if let Some(ref ed) = self.error_document { - ed.validate()?; - } - if let Some(ref id) = self.index_document { - id.validate()?; - } - if let Some(ref rart) = self.redirect_all_requests_to { - rart.validate()?; - } - if let Some(ref rrs) = self.routing_rules { - for rr in rrs { - rr.inner.validate()?; - } - } - - Ok(()) - } - - pub fn into_garage_website_config(self) -> Result { - if self.redirect_all_requests_to.is_some() { - Err(Error::NotImplemented( - "S3 website redirects are not currently implemented in Garage.".into(), - )) - } else if self.routing_rules.map(|x| !x.is_empty()).unwrap_or(false) { - Err(Error::NotImplemented( - "S3 routing rules are not currently implemented in Garage.".into(), - )) - } else { - Ok(WebsiteConfig { - index_document: self - .index_document - .map(|x| x.suffix.0) - .unwrap_or_else(|| "index.html".to_string()), - error_document: self.error_document.map(|x| x.key.0), - }) - } - } -} - -impl Key { - pub fn validate(&self) -> Result<(), Error> { - if self.key.0.is_empty() { - Err(Error::BadRequest( - "Bad XML: error document specified but empty".to_owned(), - )) - } else { - Ok(()) - } - } -} - -impl Suffix { - pub fn validate(&self) -> Result<(), Error> { - if self.suffix.0.is_empty() | self.suffix.0.contains('/') { - Err(Error::BadRequest( - "Bad XML: index document is empty or contains /".to_owned(), - )) - } else { - Ok(()) - } - } -} - -impl Target { - pub fn validate(&self) -> Result<(), Error> { - if let Some(ref protocol) = self.protocol { - if protocol.0 != "http" && protocol.0 != "https" { - return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned())); - } - } - Ok(()) - } -} - -impl RoutingRuleInner { - pub fn validate(&self) -> Result<(), Error> { - let has_prefix = self - .condition - .as_ref() - .and_then(|c| c.prefix.as_ref()) - .is_some(); - self.redirect.validate(has_prefix) - } -} - -impl Redirect { - pub fn validate(&self, has_prefix: bool) -> Result<(), Error> { - if self.replace_prefix.is_some() { - if self.replace_full.is_some() { - return Err(Error::BadRequest( - "Bad XML: both ReplaceKeyPrefixWith and ReplaceKeyWith are set".to_owned(), - )); - } - if !has_prefix { - return Err(Error::BadRequest( - "Bad XML: ReplaceKeyPrefixWith is set, but KeyPrefixEquals isn't".to_owned(), - )); - } - } - if let Some(ref protocol) = self.protocol { - if protocol.0 != "http" && protocol.0 != "https" { - return Err(Error::BadRequest("Bad XML: invalid protocol".to_owned())); - } - } - // TODO there are probably more invalide cases, but which ones? - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use quick_xml::de::from_str; - - #[test] - fn test_deserialize() -> Result<(), Error> { - let message = r#" - - - my-error-doc - - - my-index - - - garage.tld - https - - - - - 404 - prefix1 - - - gara.ge - http - 303 - prefix2 - fullkey - - - -"#; - let conf: WebsiteConfiguration = from_str(message).unwrap(); - let ref_value = WebsiteConfiguration { - xmlns: (), - error_document: Some(Key { - key: Value("my-error-doc".to_owned()), - }), - index_document: Some(Suffix { - suffix: Value("my-index".to_owned()), - }), - redirect_all_requests_to: Some(Target { - hostname: Value("garage.tld".to_owned()), - protocol: Some(Value("https".to_owned())), - }), - routing_rules: Some(vec![RoutingRule { - inner: RoutingRuleInner { - condition: Some(Condition { - http_error_code: Some(IntValue(404)), - prefix: Some(Value("prefix1".to_owned())), - }), - redirect: Redirect { - hostname: Some(Value("gara.ge".to_owned())), - protocol: Some(Value("http".to_owned())), - http_redirect_code: Some(IntValue(303)), - replace_prefix: Some(Value("prefix2".to_owned())), - replace_full: Some(Value("fullkey".to_owned())), - }, - }, - }]), - }; - assert_eq! { - ref_value, - conf - } - - let message2 = to_xml_with_header(&ref_value)?; - - let cleanup = |c: &str| c.replace(char::is_whitespace, ""); - assert_eq!(cleanup(message), cleanup(&message2)); - - Ok(()) - } -} diff --git a/src/api/s3_xml.rs b/src/api/s3_xml.rs deleted file mode 100644 index 75ec4559..00000000 --- a/src/api/s3_xml.rs +++ /dev/null @@ -1,844 +0,0 @@ -use quick_xml::se::to_string; -use serde::{Deserialize, Serialize, Serializer}; - -use crate::Error as ApiError; - -pub fn to_xml_with_header(x: &T) -> Result { - let mut xml = r#""#.to_string(); - xml.push_str(&to_string(x)?); - Ok(xml) -} - -pub fn xmlns_tag(_v: &(), s: S) -> Result { - s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct Value(#[serde(rename = "$value")] pub String); - -impl From<&str> for Value { - fn from(s: &str) -> Value { - Value(s.to_string()) - } -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -pub struct IntValue(#[serde(rename = "$value")] pub i64); - -#[derive(Debug, Serialize, PartialEq)] -pub struct Bucket { - #[serde(rename = "CreationDate")] - pub creation_date: Value, - #[serde(rename = "Name")] - pub name: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct Owner { - #[serde(rename = "DisplayName")] - pub display_name: Value, - #[serde(rename = "ID")] - pub id: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct BucketList { - #[serde(rename = "Bucket")] - pub entries: Vec, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListAllMyBucketsResult { - #[serde(rename = "Buckets")] - pub buckets: BucketList, - #[serde(rename = "Owner")] - pub owner: Owner, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct LocationConstraint { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "$value")] - pub region: String, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct Deleted { - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "VersionId")] - pub version_id: Value, - #[serde(rename = "DeleteMarkerVersionId")] - pub delete_marker_version_id: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct Error { - #[serde(rename = "Code")] - pub code: Value, - #[serde(rename = "Message")] - pub message: Value, - #[serde(rename = "Resource")] - pub resource: Option, - #[serde(rename = "Region")] - pub region: Option, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct DeleteError { - #[serde(rename = "Code")] - pub code: Value, - #[serde(rename = "Key")] - pub key: Option, - #[serde(rename = "Message")] - pub message: Value, - #[serde(rename = "VersionId")] - pub version_id: Option, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct DeleteResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Deleted")] - pub deleted: Vec, - #[serde(rename = "Error")] - pub errors: Vec, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct InitiateMultipartUploadResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Bucket")] - pub bucket: Value, - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "UploadId")] - pub upload_id: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct CompleteMultipartUploadResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Location")] - pub location: Option, - #[serde(rename = "Bucket")] - pub bucket: Value, - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "ETag")] - pub etag: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct Initiator { - #[serde(rename = "DisplayName")] - pub display_name: Value, - #[serde(rename = "ID")] - pub id: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListMultipartItem { - #[serde(rename = "Initiated")] - pub initiated: Value, - #[serde(rename = "Initiator")] - pub initiator: Initiator, - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "UploadId")] - pub upload_id: Value, - #[serde(rename = "Owner")] - pub owner: Owner, - #[serde(rename = "StorageClass")] - pub storage_class: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListMultipartUploadsResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Bucket")] - pub bucket: Value, - #[serde(rename = "KeyMarker")] - pub key_marker: Option, - #[serde(rename = "UploadIdMarker")] - pub upload_id_marker: Option, - #[serde(rename = "NextKeyMarker")] - pub next_key_marker: Option, - #[serde(rename = "NextUploadIdMarker")] - pub next_upload_id_marker: Option, - #[serde(rename = "Prefix")] - pub prefix: Value, - #[serde(rename = "Delimiter")] - pub delimiter: Option, - #[serde(rename = "MaxUploads")] - pub max_uploads: IntValue, - #[serde(rename = "IsTruncated")] - pub is_truncated: Value, - #[serde(rename = "Upload")] - pub upload: Vec, - #[serde(rename = "CommonPrefixes")] - pub common_prefixes: Vec, - #[serde(rename = "EncodingType")] - pub encoding_type: Option, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct PartItem { - #[serde(rename = "ETag")] - pub etag: Value, - #[serde(rename = "LastModified")] - pub last_modified: Value, - #[serde(rename = "PartNumber")] - pub part_number: IntValue, - #[serde(rename = "Size")] - pub size: IntValue, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListPartsResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Bucket")] - pub bucket: Value, - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "UploadId")] - pub upload_id: Value, - #[serde(rename = "PartNumberMarker")] - pub part_number_marker: Option, - #[serde(rename = "NextPartNumberMarker")] - pub next_part_number_marker: Option, - #[serde(rename = "MaxParts")] - pub max_parts: IntValue, - #[serde(rename = "IsTruncated")] - pub is_truncated: Value, - #[serde(rename = "Part", default)] - pub parts: Vec, - #[serde(rename = "Initiator")] - pub initiator: Initiator, - #[serde(rename = "Owner")] - pub owner: Owner, - #[serde(rename = "StorageClass")] - pub storage_class: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListBucketItem { - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "LastModified")] - pub last_modified: Value, - #[serde(rename = "ETag")] - pub etag: Value, - #[serde(rename = "Size")] - pub size: IntValue, - #[serde(rename = "StorageClass")] - pub storage_class: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct CommonPrefix { - #[serde(rename = "Prefix")] - pub prefix: Value, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct ListBucketResult { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Name")] - pub name: Value, - #[serde(rename = "Prefix")] - pub prefix: Value, - #[serde(rename = "Marker")] - pub marker: Option, - #[serde(rename = "NextMarker")] - pub next_marker: Option, - #[serde(rename = "StartAfter")] - pub start_after: Option, - #[serde(rename = "ContinuationToken")] - pub continuation_token: Option, - #[serde(rename = "NextContinuationToken")] - pub next_continuation_token: Option, - #[serde(rename = "KeyCount")] - pub key_count: Option, - #[serde(rename = "MaxKeys")] - pub max_keys: IntValue, - #[serde(rename = "Delimiter")] - pub delimiter: Option, - #[serde(rename = "EncodingType")] - pub encoding_type: Option, - #[serde(rename = "IsTruncated")] - pub is_truncated: Value, - #[serde(rename = "Contents")] - pub contents: Vec, - #[serde(rename = "CommonPrefixes")] - pub common_prefixes: Vec, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct VersioningConfiguration { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Status")] - pub status: Option, -} - -#[derive(Debug, Serialize, PartialEq)] -pub struct PostObject { - #[serde(serialize_with = "xmlns_tag")] - pub xmlns: (), - #[serde(rename = "Location")] - pub location: Value, - #[serde(rename = "Bucket")] - pub bucket: Value, - #[serde(rename = "Key")] - pub key: Value, - #[serde(rename = "ETag")] - pub etag: Value, -} - -#[cfg(test)] -mod tests { - use super::*; - - use garage_util::time::*; - - #[test] - fn error_message() -> Result<(), ApiError> { - let error = Error { - code: Value("TestError".to_string()), - message: Value("A dummy error message".to_string()), - resource: Some(Value("/bucket/a/plop".to_string())), - region: Some(Value("garage".to_string())), - }; - assert_eq!( - to_xml_with_header(&error)?, - "\ -\ - TestError\ - A dummy error message\ - /bucket/a/plop\ - garage\ -" - ); - Ok(()) - } - - #[test] - fn list_all_my_buckets_result() -> Result<(), ApiError> { - let list_buckets = ListAllMyBucketsResult { - owner: Owner { - display_name: Value("owner_name".to_string()), - id: Value("qsdfjklm".to_string()), - }, - buckets: BucketList { - entries: vec![ - Bucket { - creation_date: Value(msec_to_rfc3339(0)), - name: Value("bucket_A".to_string()), - }, - Bucket { - creation_date: Value(msec_to_rfc3339(3600 * 24 * 1000)), - name: Value("bucket_B".to_string()), - }, - ], - }, - }; - assert_eq!( - to_xml_with_header(&list_buckets)?, - "\ -\ - \ - \ - 1970-01-01T00:00:00.000Z\ - bucket_A\ - \ - \ - 1970-01-02T00:00:00.000Z\ - bucket_B\ - \ - \ - \ - owner_name\ - qsdfjklm\ - \ -" - ); - Ok(()) - } - - #[test] - fn get_bucket_location_result() -> Result<(), ApiError> { - let get_bucket_location = LocationConstraint { - xmlns: (), - region: "garage".to_string(), - }; - assert_eq!( - to_xml_with_header(&get_bucket_location)?, - "\ -garage" - ); - Ok(()) - } - - #[test] - fn get_bucket_versioning_result() -> Result<(), ApiError> { - let get_bucket_versioning = VersioningConfiguration { - xmlns: (), - status: None, - }; - assert_eq!( - to_xml_with_header(&get_bucket_versioning)?, - "\ -" - ); - let get_bucket_versioning2 = VersioningConfiguration { - xmlns: (), - status: Some(Value("Suspended".to_string())), - }; - assert_eq!( - to_xml_with_header(&get_bucket_versioning2)?, - "\ -Suspended" - ); - - Ok(()) - } - - #[test] - fn delete_result() -> Result<(), ApiError> { - let delete_result = DeleteResult { - xmlns: (), - deleted: vec![ - Deleted { - key: Value("a/plop".to_string()), - version_id: Value("qsdfjklm".to_string()), - delete_marker_version_id: Value("wxcvbn".to_string()), - }, - Deleted { - key: Value("b/plip".to_string()), - version_id: Value("1234".to_string()), - delete_marker_version_id: Value("4321".to_string()), - }, - ], - errors: vec![ - DeleteError { - code: Value("NotFound".to_string()), - key: Some(Value("c/plap".to_string())), - message: Value("Object c/plap not found".to_string()), - version_id: None, - }, - DeleteError { - code: Value("Forbidden".to_string()), - key: Some(Value("d/plep".to_string())), - message: Value("Not authorized".to_string()), - version_id: Some(Value("789".to_string())), - }, - ], - }; - assert_eq!( - to_xml_with_header(&delete_result)?, - "\ -\ - \ - a/plop\ - qsdfjklm\ - wxcvbn\ - \ - \ - b/plip\ - 1234\ - 4321\ - \ - \ - NotFound\ - c/plap\ - Object c/plap not found\ - \ - \ - Forbidden\ - d/plep\ - Not authorized\ - 789\ - \ -" - ); - Ok(()) - } - - #[test] - fn initiate_multipart_upload_result() -> Result<(), ApiError> { - let result = InitiateMultipartUploadResult { - xmlns: (), - bucket: Value("mybucket".to_string()), - key: Value("a/plop".to_string()), - upload_id: Value("azerty".to_string()), - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - mybucket\ - a/plop\ - azerty\ -" - ); - Ok(()) - } - - #[test] - fn complete_multipart_upload_result() -> Result<(), ApiError> { - let result = CompleteMultipartUploadResult { - xmlns: (), - location: Some(Value("https://garage.tld/mybucket/a/plop".to_string())), - bucket: Value("mybucket".to_string()), - key: Value("a/plop".to_string()), - etag: Value("\"3858f62230ac3c915f300c664312c11f-9\"".to_string()), - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - https://garage.tld/mybucket/a/plop\ - mybucket\ - a/plop\ - "3858f62230ac3c915f300c664312c11f-9"\ -" - ); - Ok(()) - } - - #[test] - fn list_multipart_uploads_result() -> Result<(), ApiError> { - let result = ListMultipartUploadsResult { - xmlns: (), - bucket: Value("example-bucket".to_string()), - key_marker: None, - next_key_marker: None, - upload_id_marker: None, - encoding_type: None, - next_upload_id_marker: None, - upload: vec![], - delimiter: Some(Value("/".to_string())), - prefix: Value("photos/2006/".to_string()), - max_uploads: IntValue(1000), - is_truncated: Value("false".to_string()), - common_prefixes: vec![ - CommonPrefix { - prefix: Value("photos/2006/February/".to_string()), - }, - CommonPrefix { - prefix: Value("photos/2006/January/".to_string()), - }, - CommonPrefix { - prefix: Value("photos/2006/March/".to_string()), - }, - ], - }; - - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - example-bucket\ - photos/2006/\ - /\ - 1000\ - false\ - \ - photos/2006/February/\ - \ - \ - photos/2006/January/\ - \ - \ - photos/2006/March/\ - \ -" - ); - - Ok(()) - } - - #[test] - fn list_objects_v1_1() -> Result<(), ApiError> { - let result = ListBucketResult { - xmlns: (), - name: Value("example-bucket".to_string()), - prefix: Value("".to_string()), - marker: Some(Value("".to_string())), - next_marker: None, - start_after: None, - continuation_token: None, - next_continuation_token: None, - key_count: None, - max_keys: IntValue(1000), - encoding_type: None, - delimiter: Some(Value("/".to_string())), - is_truncated: Value("false".to_string()), - contents: vec![ListBucketItem { - key: Value("sample.jpg".to_string()), - last_modified: Value(msec_to_rfc3339(0)), - etag: Value("\"bf1d737a4d46a19f3bced6905cc8b902\"".to_string()), - size: IntValue(142863), - storage_class: Value("STANDARD".to_string()), - }], - common_prefixes: vec![CommonPrefix { - prefix: Value("photos/".to_string()), - }], - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - example-bucket\ - \ - \ - 1000\ - /\ - false\ - \ - sample.jpg\ - 1970-01-01T00:00:00.000Z\ - "bf1d737a4d46a19f3bced6905cc8b902"\ - 142863\ - STANDARD\ - \ - \ - photos/\ - \ -" - ); - Ok(()) - } - - #[test] - fn list_objects_v1_2() -> Result<(), ApiError> { - let result = ListBucketResult { - xmlns: (), - name: Value("example-bucket".to_string()), - prefix: Value("photos/2006/".to_string()), - marker: Some(Value("".to_string())), - next_marker: None, - start_after: None, - continuation_token: None, - next_continuation_token: None, - key_count: None, - max_keys: IntValue(1000), - delimiter: Some(Value("/".to_string())), - encoding_type: None, - is_truncated: Value("false".to_string()), - contents: vec![], - common_prefixes: vec![ - CommonPrefix { - prefix: Value("photos/2006/February/".to_string()), - }, - CommonPrefix { - prefix: Value("photos/2006/January/".to_string()), - }, - ], - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - example-bucket\ - photos/2006/\ - \ - 1000\ - /\ - false\ - \ - photos/2006/February/\ - \ - \ - photos/2006/January/\ - \ -" - ); - Ok(()) - } - - #[test] - fn list_objects_v2_1() -> Result<(), ApiError> { - let result = ListBucketResult { - xmlns: (), - name: Value("quotes".to_string()), - prefix: Value("E".to_string()), - marker: None, - next_marker: None, - start_after: Some(Value("ExampleGuide.pdf".to_string())), - continuation_token: None, - next_continuation_token: None, - key_count: None, - max_keys: IntValue(3), - delimiter: None, - encoding_type: None, - is_truncated: Value("false".to_string()), - contents: vec![ListBucketItem { - key: Value("ExampleObject.txt".to_string()), - last_modified: Value(msec_to_rfc3339(0)), - etag: Value("\"599bab3ed2c697f1d26842727561fd94\"".to_string()), - size: IntValue(857), - storage_class: Value("REDUCED_REDUNDANCY".to_string()), - }], - common_prefixes: vec![], - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - quotes\ - E\ - ExampleGuide.pdf\ - 3\ - false\ - \ - ExampleObject.txt\ - 1970-01-01T00:00:00.000Z\ - "599bab3ed2c697f1d26842727561fd94"\ - 857\ - REDUCED_REDUNDANCY\ - \ -" - ); - Ok(()) - } - - #[test] - fn list_objects_v2_2() -> Result<(), ApiError> { - let result = ListBucketResult { - xmlns: (), - name: Value("bucket".to_string()), - prefix: Value("".to_string()), - marker: None, - next_marker: None, - start_after: None, - continuation_token: Some(Value( - "1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=".to_string(), - )), - next_continuation_token: Some(Value("qsdfjklm".to_string())), - key_count: Some(IntValue(112)), - max_keys: IntValue(1000), - delimiter: None, - encoding_type: None, - is_truncated: Value("false".to_string()), - contents: vec![ListBucketItem { - key: Value("happyfacex.jpg".to_string()), - last_modified: Value(msec_to_rfc3339(0)), - etag: Value("\"70ee1738b6b21e2c8a43f3a5ab0eee71\"".to_string()), - size: IntValue(1111), - storage_class: Value("STANDARD".to_string()), - }], - common_prefixes: vec![], - }; - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - bucket\ - \ - 1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=\ - qsdfjklm\ - 112\ - 1000\ - false\ - \ - happyfacex.jpg\ - 1970-01-01T00:00:00.000Z\ - "70ee1738b6b21e2c8a43f3a5ab0eee71"\ - 1111\ - STANDARD\ - \ -" - ); - Ok(()) - } - - #[test] - fn list_parts() -> Result<(), ApiError> { - let result = ListPartsResult { - xmlns: (), - bucket: Value("example-bucket".to_string()), - key: Value("example-object".to_string()), - upload_id: Value( - "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA".to_string(), - ), - part_number_marker: Some(IntValue(1)), - next_part_number_marker: Some(IntValue(3)), - max_parts: IntValue(2), - is_truncated: Value("true".to_string()), - parts: vec![ - PartItem { - etag: Value("\"7778aef83f66abc1fa1e8477f296d394\"".to_string()), - last_modified: Value("2010-11-10T20:48:34.000Z".to_string()), - part_number: IntValue(2), - size: IntValue(10485760), - }, - PartItem { - etag: Value("\"aaaa18db4cc2f85cedef654fccc4a4x8\"".to_string()), - last_modified: Value("2010-11-10T20:48:33.000Z".to_string()), - part_number: IntValue(3), - size: IntValue(10485760), - }, - ], - initiator: Initiator { - display_name: Value("umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx".to_string()), - id: Value( - "arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx" - .to_string(), - ), - }, - owner: Owner { - display_name: Value("someName".to_string()), - id: Value( - "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a".to_string(), - ), - }, - storage_class: Value("STANDARD".to_string()), - }; - - assert_eq!( - to_xml_with_header(&result)?, - "\ -\ - example-bucket\ - example-object\ - XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA\ - 1\ - 3\ - 2\ - true\ - \ - "7778aef83f66abc1fa1e8477f296d394"\ - 2010-11-10T20:48:34.000Z\ - 2\ - 10485760\ - \ - \ - "aaaa18db4cc2f85cedef654fccc4a4x8"\ - 2010-11-10T20:48:33.000Z\ - 3\ - 10485760\ - \ - \ - umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx\ - arn:aws:iam::111122223333:user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx\ - \ - \ - someName\ - 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a\ - \ - STANDARD\ -" - ); - - Ok(()) - } -} diff --git a/src/api/signature/mod.rs b/src/api/signature/mod.rs index ebdee6da..5646f4fa 100644 --- a/src/api/signature/mod.rs +++ b/src/api/signature/mod.rs @@ -42,6 +42,11 @@ pub fn signing_hmac( Ok(hmac) } -pub fn compute_scope(datetime: &DateTime, region: &str) -> String { - format!("{}/{}/s3/aws4_request", datetime.format(SHORT_DATE), region,) +pub fn compute_scope(datetime: &DateTime, region: &str, service: &str) -> String { + format!( + "{}/{}/{}/aws4_request", + datetime.format(SHORT_DATE), + region, + service + ) } diff --git a/src/api/signature/payload.rs b/src/api/signature/payload.rs index 2a41b307..9137dd2d 100644 --- a/src/api/signature/payload.rs +++ b/src/api/signature/payload.rs @@ -11,14 +11,15 @@ use garage_util::data::Hash; use garage_model::garage::Garage; use garage_model::key_table::*; -use super::signing_hmac; -use super::{LONG_DATETIME, SHORT_DATE}; +use super::LONG_DATETIME; +use super::{compute_scope, signing_hmac}; use crate::encoding::uri_encode; use crate::error::*; pub async fn check_payload_signature( garage: &Garage, + service: &str, request: &Request, ) -> Result<(Option, Option), Error> { let mut headers = HashMap::new(); @@ -64,6 +65,7 @@ pub async fn check_payload_signature( let key = verify_v4( garage, + service, &authorization.credential, &authorization.date, &authorization.signature, @@ -281,6 +283,7 @@ pub fn parse_date(date: &str) -> Result, Error> { pub async fn verify_v4( garage: &Garage, + service: &str, credential: &str, date: &DateTime, signature: &str, @@ -288,11 +291,7 @@ pub async fn verify_v4( ) -> Result { let (key_id, scope) = parse_credential(credential)?; - let scope_expected = format!( - "{}/{}/s3/aws4_request", - date.format(SHORT_DATE), - garage.config.s3_api.s3_region - ); + let scope_expected = compute_scope(date, &garage.config.s3_api.s3_region, service); if scope != scope_expected { return Err(Error::AuthorizationHeaderMalformed(scope.to_string())); } @@ -309,7 +308,7 @@ pub async fn verify_v4( date, &key_p.secret_key, &garage.config.s3_api.s3_region, - "s3", + service, ) .ok_or_internal_error("Unable to build signing HMAC")?; hmac.update(payload); diff --git a/src/api/signature/streaming.rs b/src/api/signature/streaming.rs index 969a45d6..ded9d993 100644 --- a/src/api/signature/streaming.rs +++ b/src/api/signature/streaming.rs @@ -1,19 +1,68 @@ use std::pin::Pin; -use chrono::{DateTime, Utc}; +use chrono::{DateTime, NaiveDateTime, Utc}; use futures::prelude::*; use futures::task; +use garage_model::key_table::Key; +use hmac::Mac; use hyper::body::Bytes; +use hyper::{Body, Request}; use garage_util::data::Hash; -use hmac::Mac; -use super::sha256sum; -use super::HmacSha256; -use super::LONG_DATETIME; +use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME}; use crate::error::*; +pub fn parse_streaming_body( + api_key: &Key, + req: Request, + content_sha256: &mut Option, + region: &str, + service: &str, +) -> Result, Error> { + match req.headers().get("x-amz-content-sha256") { + Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => { + let signature = content_sha256 + .take() + .ok_or_bad_request("No signature provided")?; + + let secret_key = &api_key + .state + .as_option() + .ok_or_internal_error("Deleted key state")? + .secret_key; + + let date = req + .headers() + .get("x-amz-date") + .ok_or_bad_request("Missing X-Amz-Date field")? + .to_str()?; + let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME) + .ok_or_bad_request("Invalid date")?; + let date: DateTime = DateTime::from_utc(date, Utc); + + let scope = compute_scope(&date, region, service); + let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service) + .ok_or_internal_error("Unable to build signing HMAC")?; + + Ok(req.map(move |body| { + Body::wrap_stream( + SignedPayloadStream::new( + body.map_err(Error::from), + signing_hmac, + date, + &scope, + signature, + ) + .map_err(Error::from), + ) + })) + } + _ => Ok(req), + } +} + /// Result of `sha256("")` const EMPTY_STRING_HEX_DIGEST: &str = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; @@ -295,7 +344,7 @@ mod tests { .with_timezone(&Utc); let secret_key = "test"; let region = "test"; - let scope = crate::signature::compute_scope(&datetime, region); + let scope = crate::signature::compute_scope(&datetime, region, "s3"); let signing_hmac = crate::signature::signing_hmac(&datetime, secret_key, region, "s3").unwrap(); diff --git a/src/block/manager.rs b/src/block/manager.rs index 1c04a335..9b2d9cad 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -132,7 +132,7 @@ impl BlockManager { let endpoint = system .netapp - .endpoint("garage_model/block.rs/Rpc".to_string()); + .endpoint("garage_block/manager.rs/Rpc".to_string()); let manager_locked = BlockManagerLocked(); diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index 59f402ff..3b69d7bc 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -63,3 +63,11 @@ hyper = { version = "0.14", features = ["client", "http1", "runtime"] } sha2 = "0.9" static_init = "1.0" +assert-json-diff = "2.0" +serde_json = "1.0" +base64 = "0.13" + + +[features] +kubernetes-discovery = [ "garage_rpc/kubernetes-discovery" ] +k2v = [ "garage_util/k2v", "garage_api/k2v" ] diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 0b20bb20..af0c3f22 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -21,8 +21,8 @@ use garage_model::garage::Garage; use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::key_table::*; use garage_model::migrate::Migrate; -use garage_model::object_table::ObjectFilter; use garage_model::permission::*; +use garage_model::s3::object_table::ObjectFilter; use crate::cli::*; use crate::repair::Repair; @@ -80,7 +80,13 @@ impl AdminRpcHandler { let buckets = self .garage .bucket_table - .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) + .get_range( + &EmptyKey, + None, + Some(DeletedFilter::NotDeleted), + 10000, + EnumerationOrder::Forward, + ) .await?; Ok(AdminRpc::BucketList(buckets)) } @@ -210,7 +216,13 @@ impl AdminRpcHandler { let objects = self .garage .object_table - .get_range(&bucket_id, None, Some(ObjectFilter::IsData), 10) + .get_range( + &bucket_id, + None, + Some(ObjectFilter::IsData), + 10, + EnumerationOrder::Forward, + ) .await?; if !objects.is_empty() { return Err(Error::BadRequest(format!( @@ -445,6 +457,7 @@ impl AdminRpcHandler { None, Some(KeyFilter::Deleted(DeletedFilter::NotDeleted)), 10000, + EnumerationOrder::Forward, ) .await? .iter() diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index a90277a0..2a799868 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -85,13 +85,14 @@ pub async fn cmd_status(rpc_cli: &Endpoint, rpc_host: NodeID) -> format_table(healthy_nodes); let status_keys = status.iter().map(|adv| adv.id).collect::>(); - let failure_case_1 = status.iter().any(|adv| !adv.is_up); + let failure_case_1 = status + .iter() + .any(|adv| !adv.is_up && matches!(layout.roles.get(&adv.id), Some(NodeRoleV(Some(_))))); let failure_case_2 = layout .roles .items() .iter() - .filter(|(_, _, v)| v.0.is_some()) - .any(|(id, _, _)| !status_keys.contains(id)); + .any(|(id, _, v)| !status_keys.contains(id) && v.0.is_some()); if failure_case_1 || failure_case_2 { println!("\n==== FAILED NODES ===="); let mut failed_nodes = diff --git a/src/garage/repair.rs b/src/garage/repair.rs index 3666ca8f..830eac71 100644 --- a/src/garage/repair.rs +++ b/src/garage/repair.rs @@ -2,10 +2,10 @@ use std::sync::Arc; use tokio::sync::watch; -use garage_model::block_ref_table::*; use garage_model::garage::Garage; -use garage_model::object_table::*; -use garage_model::version_table::*; +use garage_model::s3::block_ref_table::*; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; use garage_table::*; use garage_util::error::Error; diff --git a/src/garage/server.rs b/src/garage/server.rs index 58c9e782..24bb25b3 100644 --- a/src/garage/server.rs +++ b/src/garage/server.rs @@ -8,10 +8,13 @@ use garage_util::error::Error; use garage_admin::metrics::*; use garage_admin::tracing_setup::*; -use garage_api::run_api_server; +use garage_api::s3::api_server::S3ApiServer; use garage_model::garage::Garage; use garage_web::run_web_server; +#[cfg(feature = "k2v")] +use garage_api::k2v::api_server::K2VApiServer; + use crate::admin::*; async fn wait_from(mut chan: watch::Receiver) { @@ -56,12 +59,21 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> { info!("Create admin RPC handler..."); AdminRpcHandler::new(garage.clone()); - info!("Initializing API server..."); - let api_server = tokio::spawn(run_api_server( + info!("Initializing S3 API server..."); + let s3_api_server = tokio::spawn(S3ApiServer::run( garage.clone(), wait_from(watch_cancel.clone()), )); + #[cfg(feature = "k2v")] + let k2v_api_server = { + info!("Initializing K2V API server..."); + tokio::spawn(K2VApiServer::run( + garage.clone(), + wait_from(watch_cancel.clone()), + )) + }; + info!("Initializing web server..."); let web_server = tokio::spawn(run_web_server( garage.clone(), @@ -80,8 +92,12 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> { // Stuff runs // When a cancel signal is sent, stuff stops - if let Err(e) = api_server.await? { - warn!("API server exited with error: {}", e); + if let Err(e) = s3_api_server.await? { + warn!("S3 API server exited with error: {}", e); + } + #[cfg(feature = "k2v")] + if let Err(e) = k2v_api_server.await? { + warn!("K2V API server exited with error: {}", e); } if let Err(e) = web_server.await? { warn!("Web server exited with error: {}", e); diff --git a/src/garage/tests/common/client.rs b/src/garage/tests/common/client.rs index c5ddc6e5..212588b5 100644 --- a/src/garage/tests/common/client.rs +++ b/src/garage/tests/common/client.rs @@ -10,7 +10,7 @@ pub fn build_client(instance: &Instance) -> Client { None, "garage-integ-test", ); - let endpoint = Endpoint::immutable(instance.uri()); + let endpoint = Endpoint::immutable(instance.s3_uri()); let config = Config::builder() .region(super::REGION) diff --git a/src/garage/tests/common/custom_requester.rs b/src/garage/tests/common/custom_requester.rs index 580691a1..1700cc90 100644 --- a/src/garage/tests/common/custom_requester.rs +++ b/src/garage/tests/common/custom_requester.rs @@ -17,14 +17,25 @@ use garage_api::signature; pub struct CustomRequester { key: Key, uri: Uri, + service: &'static str, client: Client, } impl CustomRequester { - pub fn new(instance: &Instance) -> Self { + pub fn new_s3(instance: &Instance) -> Self { CustomRequester { key: instance.key.clone(), - uri: instance.uri(), + uri: instance.s3_uri(), + service: "s3", + client: Client::new(), + } + } + + pub fn new_k2v(instance: &Instance) -> Self { + CustomRequester { + key: instance.key.clone(), + uri: instance.k2v_uri(), + service: "k2v", client: Client::new(), } } @@ -32,6 +43,7 @@ impl CustomRequester { pub fn builder(&self, bucket: String) -> RequestBuilder<'_> { RequestBuilder { requester: self, + service: self.service, bucket, method: Method::GET, path: String::new(), @@ -47,6 +59,7 @@ impl CustomRequester { pub struct RequestBuilder<'a> { requester: &'a CustomRequester, + service: &'static str, bucket: String, method: Method, path: String, @@ -59,13 +72,17 @@ pub struct RequestBuilder<'a> { } impl<'a> RequestBuilder<'a> { + pub fn service(&mut self, service: &'static str) -> &mut Self { + self.service = service; + self + } pub fn method(&mut self, method: Method) -> &mut Self { self.method = method; self } - pub fn path(&mut self, path: String) -> &mut Self { - self.path = path; + pub fn path(&mut self, path: impl ToString) -> &mut Self { + self.path = path.to_string(); self } @@ -74,16 +91,38 @@ impl<'a> RequestBuilder<'a> { self } + pub fn query_param(&mut self, param: T, value: Option) -> &mut Self + where + T: ToString, + U: ToString, + { + self.query_params + .insert(param.to_string(), value.as_ref().map(ToString::to_string)); + self + } + pub fn signed_headers(&mut self, signed_headers: HashMap) -> &mut Self { self.signed_headers = signed_headers; self } + pub fn signed_header(&mut self, name: impl ToString, value: impl ToString) -> &mut Self { + self.signed_headers + .insert(name.to_string(), value.to_string()); + self + } + pub fn unsigned_headers(&mut self, unsigned_headers: HashMap) -> &mut Self { self.unsigned_headers = unsigned_headers; self } + pub fn unsigned_header(&mut self, name: impl ToString, value: impl ToString) -> &mut Self { + self.unsigned_headers + .insert(name.to_string(), value.to_string()); + self + } + pub fn body(&mut self, body: Vec) -> &mut Self { self.body = body; self @@ -106,24 +145,24 @@ impl<'a> RequestBuilder<'a> { let query = query_param_to_string(&self.query_params); let (host, path) = if self.vhost_style { ( - format!("{}.s3.garage", self.bucket), + format!("{}.{}.garage", self.bucket, self.service), format!("{}{}", self.path, query), ) } else { ( - "s3.garage".to_owned(), + format!("{}.garage", self.service), format!("{}/{}{}", self.bucket, self.path, query), ) }; let uri = format!("{}{}", self.requester.uri, path); let now = Utc::now(); - let scope = signature::compute_scope(&now, super::REGION.as_ref()); + let scope = signature::compute_scope(&now, super::REGION.as_ref(), self.service); let mut signer = signature::signing_hmac( &now, &self.requester.key.secret, super::REGION.as_ref(), - "s3", + self.service, ) .unwrap(); let streaming_signer = signer.clone(); diff --git a/src/garage/tests/common/garage.rs b/src/garage/tests/common/garage.rs index 88c51501..44d727f9 100644 --- a/src/garage/tests/common/garage.rs +++ b/src/garage/tests/common/garage.rs @@ -22,7 +22,9 @@ pub struct Instance { process: process::Child, pub path: PathBuf, pub key: Key, - pub api_port: u16, + pub s3_port: u16, + pub k2v_port: u16, + pub web_port: u16, } impl Instance { @@ -58,9 +60,12 @@ rpc_secret = "{secret}" [s3_api] s3_region = "{region}" -api_bind_addr = "127.0.0.1:{api_port}" +api_bind_addr = "127.0.0.1:{s3_port}" root_domain = ".s3.garage" +[k2v_api] +api_bind_addr = "127.0.0.1:{k2v_port}" + [s3_web] bind_addr = "127.0.0.1:{web_port}" root_domain = ".web.garage" @@ -72,10 +77,11 @@ api_bind_addr = "127.0.0.1:{admin_port}" path = path.display(), secret = GARAGE_TEST_SECRET, region = super::REGION, - api_port = port, - rpc_port = port + 1, - web_port = port + 2, - admin_port = port + 3, + s3_port = port, + k2v_port = port + 1, + rpc_port = port + 2, + web_port = port + 3, + admin_port = port + 4, ); fs::write(path.join("config.toml"), config).expect("Could not write garage config file"); @@ -88,7 +94,7 @@ api_bind_addr = "127.0.0.1:{admin_port}" .arg("server") .stdout(stdout) .stderr(stderr) - .env("RUST_LOG", "garage=info,garage_api=debug") + .env("RUST_LOG", "garage=info,garage_api=trace") .spawn() .expect("Could not start garage"); @@ -96,7 +102,9 @@ api_bind_addr = "127.0.0.1:{admin_port}" process: child, path, key: Key::default(), - api_port: port, + s3_port: port, + k2v_port: port + 1, + web_port: port + 3, } } @@ -147,8 +155,14 @@ api_bind_addr = "127.0.0.1:{admin_port}" String::from_utf8(output.stdout).unwrap() } - pub fn uri(&self) -> http::Uri { - format!("http://127.0.0.1:{api_port}", api_port = self.api_port) + pub fn s3_uri(&self) -> http::Uri { + format!("http://127.0.0.1:{s3_port}", s3_port = self.s3_port) + .parse() + .expect("Could not build garage endpoint URI") + } + + pub fn k2v_uri(&self) -> http::Uri { + format!("http://127.0.0.1:{k2v_port}", k2v_port = self.k2v_port) .parse() .expect("Could not build garage endpoint URI") } diff --git a/src/garage/tests/common/mod.rs b/src/garage/tests/common/mod.rs index 8f88c731..28874b02 100644 --- a/src/garage/tests/common/mod.rs +++ b/src/garage/tests/common/mod.rs @@ -17,18 +17,27 @@ pub struct Context { pub garage: &'static garage::Instance, pub client: Client, pub custom_request: CustomRequester, + pub k2v: K2VContext, +} + +pub struct K2VContext { + pub request: CustomRequester, } impl Context { fn new() -> Self { let garage = garage::instance(); let client = client::build_client(garage); - let custom_request = CustomRequester::new(garage); + let custom_request = CustomRequester::new_s3(garage); + let k2v_request = CustomRequester::new_k2v(garage); Context { garage, client, custom_request, + k2v: K2VContext { + request: k2v_request, + }, } } diff --git a/src/garage/tests/k2v/batch.rs b/src/garage/tests/k2v/batch.rs new file mode 100644 index 00000000..1182a298 --- /dev/null +++ b/src/garage/tests/k2v/batch.rs @@ -0,0 +1,525 @@ +use std::collections::HashMap; + +use crate::common; + +use assert_json_diff::assert_json_eq; +use serde_json::json; + +use super::json_body; +use hyper::Method; + +#[tokio::test] +async fn test_batch() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-batch"); + + let mut values = HashMap::new(); + values.insert("a", "initial test 1"); + values.insert("b", "initial test 2"); + values.insert("c", "initial test 3"); + values.insert("d.1", "initial test 4"); + values.insert("d.2", "initial test 5"); + values.insert("e", "initial test 6"); + let mut ct = HashMap::new(); + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .body( + format!( + r#"[ + {{"pk": "root", "sk": "a", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "b", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "c", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "d.1", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "e", "ct": null, "v": "{}"}} + ]"#, + base64::encode(values.get(&"a").unwrap()), + base64::encode(values.get(&"b").unwrap()), + base64::encode(values.get(&"c").unwrap()), + base64::encode(values.get(&"d.1").unwrap()), + base64::encode(values.get(&"d.2").unwrap()), + base64::encode(values.get(&"e").unwrap()), + ) + .into_bytes(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + for sk in ["a", "b", "c", "d.1", "d.2", "e"] { + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/octet-stream" + ); + ct.insert( + sk, + res.headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(), + ); + let res_body = hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res_body, values.get(sk).unwrap().as_bytes()); + } + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {"partitionKey": "root"}, + {"partitionKey": "root", "start": "c"}, + {"partitionKey": "root", "start": "c", "reverse": true, "end": "a"}, + {"partitionKey": "root", "limit": 1}, + {"partitionKey": "root", "prefix": "d"} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + let json_res = json_body(res).await; + assert_json_eq!( + json_res, + json!([ + { + "partitionKey": "root", + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]}, + {"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]}, + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]}, + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]}, + {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]} + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": null, + "start": "c", + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]}, + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]}, + {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]} + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": null, + "start": "c", + "end": "a", + "limit": null, + "reverse": true, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]}, + {"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": null, + "start": null, + "end": null, + "limit": 1, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]} + ], + "more": true, + "nextStart": "b", + }, + { + "partitionKey": "root", + "prefix": "d", + "start": null, + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]} + ], + "more": false, + "nextStart": null, + }, + ]) + ); + + // Insert some new values + values.insert("c'", "new test 3"); + values.insert("d.1'", "new test 4"); + values.insert("d.2'", "new test 5"); + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .body( + format!( + r#"[ + {{"pk": "root", "sk": "b", "ct": "{}", "v": null}}, + {{"pk": "root", "sk": "c", "ct": null, "v": "{}"}}, + {{"pk": "root", "sk": "d.1", "ct": "{}", "v": "{}"}}, + {{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}} + ]"#, + ct.get(&"b").unwrap(), + base64::encode(values.get(&"c'").unwrap()), + ct.get(&"d.1").unwrap(), + base64::encode(values.get(&"d.1'").unwrap()), + base64::encode(values.get(&"d.2'").unwrap()), + ) + .into_bytes(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + for sk in ["b", "c", "d.1", "d.2"] { + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + if sk == "b" { + assert_eq!(res.status(), 204); + } else { + assert_eq!(res.status(), 200); + } + ct.insert( + sk, + res.headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(), + ); + } + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {"partitionKey": "root"}, + {"partitionKey": "root", "prefix": "d"}, + {"partitionKey": "root", "prefix": "d.", "end": "d.2"}, + {"partitionKey": "root", "prefix": "d.", "limit": 1}, + {"partitionKey": "root", "prefix": "d.", "start": "d.2", "limit": 1}, + {"partitionKey": "root", "prefix": "d.", "reverse": true}, + {"partitionKey": "root", "prefix": "d.", "start": "d.2", "reverse": true}, + {"partitionKey": "root", "prefix": "d.", "limit": 2} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + let json_res = json_body(res).await; + assert_json_eq!( + json_res, + json!([ + { + "partitionKey": "root", + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]}, + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]}, + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]} + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d", + "start": null, + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": null, + "end": "d.2", + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": null, + "end": null, + "limit": 1, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + ], + "more": true, + "nextStart": "d.2", + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": "d.2", + "end": null, + "limit": 1, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": null, + "end": null, + "limit": null, + "reverse": true, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": "d.2", + "end": null, + "limit": null, + "reverse": true, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": "d.", + "start": null, + "end": null, + "limit": 2, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]}, + {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + ]) + ); + + // Test DeleteBatch + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("delete", Option::<&str>::None) + .body( + br#"[ + {"partitionKey": "root", "start": "a", "end": "c"}, + {"partitionKey": "root", "prefix": "d"} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + let json_res = json_body(res).await; + assert_json_eq!( + json_res, + json!([ + { + "partitionKey": "root", + "prefix": null, + "start": "a", + "end": "c", + "singleItem": false, + "deletedItems": 1, + }, + { + "partitionKey": "root", + "prefix": "d", + "start": null, + "end": null, + "singleItem": false, + "deletedItems": 2, + }, + ]) + ); + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {"partitionKey": "root"}, + {"partitionKey": "root", "reverse": true} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + let json_res = json_body(res).await; + assert_json_eq!( + json_res, + json!([ + { + "partitionKey": "root", + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]}, + {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]} + ], + "more": false, + "nextStart": null, + }, + { + "partitionKey": "root", + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": true, + "conflictsOnly": false, + "tombstones": false, + "singleItem": false, + "items": [ + {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}, + {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]}, + ], + "more": false, + "nextStart": null, + }, + ]) + ); +} diff --git a/src/garage/tests/k2v/errorcodes.rs b/src/garage/tests/k2v/errorcodes.rs new file mode 100644 index 00000000..2fcc45bc --- /dev/null +++ b/src/garage/tests/k2v/errorcodes.rs @@ -0,0 +1,141 @@ +use crate::common; + +use hyper::Method; + +#[tokio::test] +async fn test_error_codes() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-error-codes"); + + // Regular insert should work (code 200) + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::PUT) + .path("root") + .query_param("sort_key", Some("test1")) + .body(b"Hello, world!".to_vec()) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Insert with trash causality token: invalid request + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::PUT) + .path("root") + .query_param("sort_key", Some("test1")) + .signed_header("x-garage-causality-token", "tra$sh") + .body(b"Hello, world!".to_vec()) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Search without partition key: invalid request + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {}, + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Search with start that is not in prefix: invalid request + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {"partition_key": "root", "prefix": "a", "start": "bx"}, + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Search with invalid json: 400 + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .query_param("search", Option::<&str>::None) + .body( + br#"[ + {"partition_key": "root" + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Batch insert with invalid causality token: 400 + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .body( + br#"[ + {"pk": "root", "sk": "a", "ct": "tra$h", "v": "aGVsbG8sIHdvcmxkCg=="} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Batch insert with invalid data: 400 + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .body( + br#"[ + {"pk": "root", "sk": "a", "ct": null, "v": "aGVsbG8sIHdvcmx$Cg=="} + ]"# + .to_vec(), + ) + .method(Method::POST) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); + + // Poll with invalid causality token: 400 + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("test1")) + .query_param("causality_token", Some("tra$h")) + .query_param("timeout", Some("10")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 400); +} diff --git a/src/garage/tests/k2v/item.rs b/src/garage/tests/k2v/item.rs new file mode 100644 index 00000000..bf2b01f8 --- /dev/null +++ b/src/garage/tests/k2v/item.rs @@ -0,0 +1,719 @@ +use crate::common; + +use assert_json_diff::assert_json_eq; +use serde_json::json; + +use super::json_body; +use hyper::Method; + +#[tokio::test] +async fn test_items_and_indices() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-item-and-index"); + + // ReadIndex -- there should be nothing + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .send() + .await + .unwrap(); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [], + "more": false, + "nextStart": null + }) + ); + + let content2_len = "_: hello universe".len(); + let content3_len = "_: concurrent value".len(); + + for (i, sk) in ["a", "b", "c", "d"].iter().enumerate() { + let content = format!("{}: hello world", sk).into_bytes(); + let content2 = format!("{}: hello universe", sk).into_bytes(); + let content3 = format!("{}: concurrent value", sk).into_bytes(); + + // Put initially, no causality token + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .body(content.clone()) + .method(Method::PUT) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Get value back + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/octet-stream" + ); + let ct = res + .headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(); + let res_body = hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res_body, content); + + // ReadIndex -- now there should be some stuff + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .send() + .await + .unwrap(); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [ + { + "pk": "root", + "entries": i+1, + "conflicts": i, + "values": i+i+1, + "bytes": i*(content2.len() + content3.len()) + content.len(), + } + ], + "more": false, + "nextStart": null + }) + ); + + // Put again, this time with causality token + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("x-garage-causality-token", ct.clone()) + .body(content2.clone()) + .method(Method::PUT) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Get value back + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/octet-stream" + ); + let res_body = hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res_body, content2); + + // ReadIndex -- now there should be some stuff + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .send() + .await + .unwrap(); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [ + { + "pk": "root", + "entries": i+1, + "conflicts": i, + "values": i+i+1, + "bytes": i*content3.len() + (i+1)*content2.len(), + } + ], + "more": false, + "nextStart": null + }) + ); + + // Put again with same CT, now we have concurrent values + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("x-garage-causality-token", ct.clone()) + .body(content3.clone()) + .method(Method::PUT) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Get value back + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_json = json_body(res).await; + assert_json_eq!( + res_json, + [base64::encode(&content2), base64::encode(&content3)] + ); + + // ReadIndex -- now there should be some stuff + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .send() + .await + .unwrap(); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [ + { + "pk": "root", + "entries": i+1, + "conflicts": i+1, + "values": 2*(i+1), + "bytes": (i+1)*(content2.len() + content3.len()), + } + ], + "more": false, + "nextStart": null + }) + ); + } + + // Now delete things + for (i, sk) in ["a", "b", "c", "d"].iter().enumerate() { + // Get value back (we just need the CT) + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + let ct = res + .headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(); + + // Delete it + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::DELETE) + .path("root") + .query_param("sort_key", Some(sk)) + .signed_header("x-garage-causality-token", ct) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 204); + + // ReadIndex -- now there should be some stuff + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .send() + .await + .unwrap(); + let res_body = json_body(res).await; + if i < 3 { + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [ + { + "pk": "root", + "entries": 3-i, + "conflicts": 3-i, + "values": 2*(3-i), + "bytes": (3-i)*(content2_len + content3_len), + } + ], + "more": false, + "nextStart": null + }) + ); + } else { + assert_json_eq!( + res_body, + json!({ + "prefix": null, + "start": null, + "end": null, + "limit": null, + "reverse": false, + "partitionKeys": [], + "more": false, + "nextStart": null + }) + ); + } + } +} + +#[tokio::test] +async fn test_item_return_format() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-item-return-format"); + + let single_value = b"A single value".to_vec(); + let concurrent_value = b"A concurrent value".to_vec(); + + // -- Test with a single value -- + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .body(single_value.clone()) + .method(Method::PUT) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // f0: either + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/octet-stream" + ); + let ct = res + .headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(); + let res_body = hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res_body, single_value); + + // f1: not specified + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([base64::encode(&single_value)])); + + // f2: binary + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/octet-stream" + ); + let res_body = hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res_body, single_value); + + // f3: json + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/json") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([base64::encode(&single_value)])); + + // -- Test with a second, concurrent value -- + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .body(concurrent_value.clone()) + .method(Method::PUT) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // f0: either + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!([ + base64::encode(&single_value), + base64::encode(&concurrent_value) + ]) + ); + + // f1: not specified + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!([ + base64::encode(&single_value), + base64::encode(&concurrent_value) + ]) + ); + + // f2: binary + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 409); // CONFLICT + + // f3: json + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/json") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!( + res_body, + json!([ + base64::encode(&single_value), + base64::encode(&concurrent_value) + ]) + ); + + // -- Delete first value, concurrently with second insert -- + // -- (we now have a concurrent value and a deletion) -- + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .method(Method::DELETE) + .signed_header("x-garage-causality-token", ct) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 204); + + // f0: either + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null])); + + // f1: not specified + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let ct = res + .headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null])); + + // f2: binary + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 409); // CONFLICT + + // f3: json + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/json") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null])); + + // -- Delete everything -- + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .method(Method::DELETE) + .signed_header("x-garage-causality-token", ct) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 204); + + // f0: either + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "*/*") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 204); // NO CONTENT + + // f1: not specified + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([null])); + + // f2: binary + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 204); // NO CONTENT + + // f3: json + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("v1")) + .signed_header("accept", "application/json") + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + assert_eq!( + res.headers().get("content-type").unwrap().to_str().unwrap(), + "application/json" + ); + let res_body = json_body(res).await; + assert_json_eq!(res_body, json!([null])); +} diff --git a/src/garage/tests/k2v/mod.rs b/src/garage/tests/k2v/mod.rs new file mode 100644 index 00000000..a009460e --- /dev/null +++ b/src/garage/tests/k2v/mod.rs @@ -0,0 +1,18 @@ +pub mod batch; +pub mod errorcodes; +pub mod item; +pub mod poll; +pub mod simple; + +use hyper::{Body, Response}; + +pub async fn json_body(res: Response) -> serde_json::Value { + let res_body: serde_json::Value = serde_json::from_slice( + &hyper::body::to_bytes(res.into_body()) + .await + .unwrap() + .to_vec()[..], + ) + .unwrap(); + res_body +} diff --git a/src/garage/tests/k2v/poll.rs b/src/garage/tests/k2v/poll.rs new file mode 100644 index 00000000..70dc0410 --- /dev/null +++ b/src/garage/tests/k2v/poll.rs @@ -0,0 +1,98 @@ +use hyper::Method; +use std::time::Duration; + +use crate::common; + +#[tokio::test] +async fn test_poll() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-poll"); + + // Write initial value + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::PUT) + .path("root") + .query_param("sort_key", Some("test1")) + .body(b"Initial value".to_vec()) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Retrieve initial value to get its causality token + let res2 = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("test1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res2.status(), 200); + let ct = res2 + .headers() + .get("x-garage-causality-token") + .unwrap() + .to_str() + .unwrap() + .to_string(); + + let res2_body = hyper::body::to_bytes(res2.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res2_body, b"Initial value"); + + // Start poll operation + let poll = { + let bucket = bucket.clone(); + let ct = ct.clone(); + tokio::spawn(async move { + let ctx = common::context(); + ctx.k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("test1")) + .query_param("causality_token", Some(ct)) + .query_param("timeout", Some("10")) + .signed_header("accept", "application/octet-stream") + .send() + .await + }) + }; + + // Write new value that supersedes initial one + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::PUT) + .path("root") + .query_param("sort_key", Some("test1")) + .signed_header("x-garage-causality-token", ct) + .body(b"New value".to_vec()) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + // Check poll finishes with correct value + let poll_res = tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(10)) => panic!("poll did not terminate in time"), + res = poll => res.unwrap().unwrap(), + }; + + assert_eq!(poll_res.status(), 200); + + let poll_res_body = hyper::body::to_bytes(poll_res.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(poll_res_body, b"New value"); +} diff --git a/src/garage/tests/k2v/simple.rs b/src/garage/tests/k2v/simple.rs new file mode 100644 index 00000000..ae9a8674 --- /dev/null +++ b/src/garage/tests/k2v/simple.rs @@ -0,0 +1,40 @@ +use crate::common; + +use hyper::Method; + +#[tokio::test] +async fn test_simple() { + let ctx = common::context(); + let bucket = ctx.create_bucket("test-k2v-simple"); + + let res = ctx + .k2v + .request + .builder(bucket.clone()) + .method(Method::PUT) + .path("root") + .query_param("sort_key", Some("test1")) + .body(b"Hello, world!".to_vec()) + .send() + .await + .unwrap(); + assert_eq!(res.status(), 200); + + let res2 = ctx + .k2v + .request + .builder(bucket.clone()) + .path("root") + .query_param("sort_key", Some("test1")) + .signed_header("accept", "application/octet-stream") + .send() + .await + .unwrap(); + assert_eq!(res2.status(), 200); + + let res2_body = hyper::body::to_bytes(res2.into_body()) + .await + .unwrap() + .to_vec(); + assert_eq!(res2_body, b"Hello, world!"); +} diff --git a/src/garage/tests/lib.rs b/src/garage/tests/lib.rs index 8799c395..0106ad10 100644 --- a/src/garage/tests/lib.rs +++ b/src/garage/tests/lib.rs @@ -3,9 +3,5 @@ mod common; mod admin; mod bucket; -mod list; -mod multipart; -mod objects; -mod simple; -mod streaming_signature; -mod website; +mod k2v; +mod s3; diff --git a/src/garage/tests/list.rs b/src/garage/tests/list.rs deleted file mode 100644 index bb03f250..00000000 --- a/src/garage/tests/list.rs +++ /dev/null @@ -1,615 +0,0 @@ -use crate::common; - -const KEYS: [&str; 8] = ["a", "a/a", "a/b", "a/c", "a/d/a", "a/é", "b", "c"]; -const KEYS_MULTIPART: [&str; 5] = ["a", "a", "c", "c/a", "c/b"]; - -#[tokio::test] -async fn test_listobjectsv2() { - let ctx = common::context(); - let bucket = ctx.create_bucket("listobjectsv2"); - - for k in KEYS { - ctx.client - .put_object() - .bucket(&bucket) - .key(k) - .send() - .await - .unwrap(); - } - - { - // Scoping the variable to avoid reusing it - // in a following assert due to copy paste - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 8); - assert!(r.common_prefixes.is_none()); - } - - //@FIXME aws-sdk-s3 automatically checks max-key values. - // If we set it to zero, it drops it, and it is probably - // the same behavior on values bigger than 1000. - // Boto and awscli do not perform these tests, we should write - // our own minimal library to bypass AWS SDK's tests and be - // sure that we behave correctly. - - { - // With 2 elements - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .max_keys(2) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 2); - assert!(r.common_prefixes.is_none()); - assert!(r.next_continuation_token.is_some()); - } - - { - // With pagination - let mut cnt = 0; - let mut next = None; - let last_idx = KEYS.len() - 1; - - for i in 0..KEYS.len() { - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .set_continuation_token(next) - .max_keys(1) - .send() - .await - .unwrap(); - - cnt += 1; - next = r.next_continuation_token; - - assert_eq!(r.contents.unwrap().len(), 1); - assert!(r.common_prefixes.is_none()); - if i != last_idx { - assert!(next.is_some()); - } - } - assert_eq!(cnt, KEYS.len()); - } - - { - // With a delimiter - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 3); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - - { - // With a delimiter and pagination - let mut cnt_pfx = 0; - let mut cnt_key = 0; - let mut next = None; - - for _i in 0..KEYS.len() { - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .set_continuation_token(next) - .delimiter("/") - .max_keys(1) - .send() - .await - .unwrap(); - - next = r.next_continuation_token; - match (r.contents, r.common_prefixes) { - (Some(k), None) if k.len() == 1 => cnt_key += 1, - (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, - _ => unreachable!("logic error"), - }; - if next.is_none() { - break; - } - } - assert_eq!(cnt_key, 3); - assert_eq!(cnt_pfx, 1); - } - - { - // With a prefix - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .prefix("a/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 5); - assert!(r.common_prefixes.is_none()); - } - - { - // With a prefix and a delimiter - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .prefix("a/") - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 4); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - - { - // With a prefix, a delimiter and max_key - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .prefix("a/") - .delimiter("/") - .max_keys(1) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.as_ref().unwrap().len(), 1); - assert_eq!( - r.contents - .unwrap() - .first() - .unwrap() - .key - .as_ref() - .unwrap() - .as_str(), - "a/a" - ); - assert!(r.common_prefixes.is_none()); - } - { - // With start_after before all keys - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .start_after("Z") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 8); - assert!(r.common_prefixes.is_none()); - } - { - // With start_after after all keys - let r = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .start_after("c") - .send() - .await - .unwrap(); - - assert!(r.contents.is_none()); - assert!(r.common_prefixes.is_none()); - } -} - -#[tokio::test] -async fn test_listobjectsv1() { - let ctx = common::context(); - let bucket = ctx.create_bucket("listobjects"); - - for k in KEYS { - ctx.client - .put_object() - .bucket(&bucket) - .key(k) - .send() - .await - .unwrap(); - } - - { - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 8); - assert!(r.common_prefixes.is_none()); - } - - { - // With 2 elements - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .max_keys(2) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 2); - assert!(r.common_prefixes.is_none()); - assert!(r.next_marker.is_some()); - } - - { - // With pagination - let mut cnt = 0; - let mut next = None; - let last_idx = KEYS.len() - 1; - - for i in 0..KEYS.len() { - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .set_marker(next) - .max_keys(1) - .send() - .await - .unwrap(); - - cnt += 1; - next = r.next_marker; - - assert_eq!(r.contents.unwrap().len(), 1); - assert!(r.common_prefixes.is_none()); - if i != last_idx { - assert!(next.is_some()); - } - } - assert_eq!(cnt, KEYS.len()); - } - - { - // With a delimiter - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 3); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - - { - // With a delimiter and pagination - let mut cnt_pfx = 0; - let mut cnt_key = 0; - let mut next = None; - - for _i in 0..KEYS.len() { - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .delimiter("/") - .set_marker(next) - .max_keys(1) - .send() - .await - .unwrap(); - - next = r.next_marker; - match (r.contents, r.common_prefixes) { - (Some(k), None) if k.len() == 1 => cnt_key += 1, - (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, - _ => unreachable!("logic error"), - }; - if next.is_none() { - break; - } - } - assert_eq!(cnt_key, 3); - // We have no optimization to skip the whole prefix - // on listobjectsv1 so we return the same one 5 times, - // for each element. It is up to the client to merge its result. - // This is compliant with AWS spec. - assert_eq!(cnt_pfx, 5); - } - - { - // With a prefix - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .prefix("a/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 5); - assert!(r.common_prefixes.is_none()); - } - - { - // With a prefix and a delimiter - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .prefix("a/") - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 4); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - - { - // With a prefix, a delimiter and max_key - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .prefix("a/") - .delimiter("/") - .max_keys(1) - .send() - .await - .unwrap(); - - assert_eq!(r.contents.as_ref().unwrap().len(), 1); - assert_eq!( - r.contents - .unwrap() - .first() - .unwrap() - .key - .as_ref() - .unwrap() - .as_str(), - "a/a" - ); - assert!(r.common_prefixes.is_none()); - } - { - // With marker before all keys - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .marker("Z") - .send() - .await - .unwrap(); - - assert_eq!(r.contents.unwrap().len(), 8); - assert!(r.common_prefixes.is_none()); - } - { - // With start_after after all keys - let r = ctx - .client - .list_objects() - .bucket(&bucket) - .marker("c") - .send() - .await - .unwrap(); - - assert!(r.contents.is_none()); - assert!(r.common_prefixes.is_none()); - } -} - -#[tokio::test] -async fn test_listmultipart() { - let ctx = common::context(); - let bucket = ctx.create_bucket("listmultipartuploads"); - - for k in KEYS_MULTIPART { - ctx.client - .create_multipart_upload() - .bucket(&bucket) - .key(k) - .send() - .await - .unwrap(); - } - - { - // Default - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 5); - assert!(r.common_prefixes.is_none()); - } - { - // With pagination - let mut next = None; - let mut upnext = None; - let last_idx = KEYS_MULTIPART.len() - 1; - - for i in 0..KEYS_MULTIPART.len() { - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .set_key_marker(next) - .set_upload_id_marker(upnext) - .max_uploads(1) - .send() - .await - .unwrap(); - - next = r.next_key_marker; - upnext = r.next_upload_id_marker; - - assert_eq!(r.uploads.unwrap().len(), 1); - assert!(r.common_prefixes.is_none()); - if i != last_idx { - assert!(next.is_some()); - } - } - } - { - // With delimiter - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 3); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - { - // With delimiter and pagination - let mut next = None; - let mut upnext = None; - let mut upcnt = 0; - let mut pfxcnt = 0; - let mut loopcnt = 0; - - while loopcnt < KEYS_MULTIPART.len() { - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .delimiter("/") - .max_uploads(1) - .set_key_marker(next) - .set_upload_id_marker(upnext) - .send() - .await - .unwrap(); - - next = r.next_key_marker; - upnext = r.next_upload_id_marker; - - loopcnt += 1; - upcnt += r.uploads.unwrap_or_default().len(); - pfxcnt += r.common_prefixes.unwrap_or_default().len(); - - if next.is_none() { - break; - } - } - - assert_eq!(upcnt + pfxcnt, loopcnt); - assert_eq!(upcnt, 3); - assert_eq!(pfxcnt, 1); - } - { - // With prefix - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .prefix("c") - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 3); - assert!(r.common_prefixes.is_none()); - } - { - // With prefix and delimiter - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .prefix("c") - .delimiter("/") - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 1); - assert_eq!(r.common_prefixes.unwrap().len(), 1); - } - { - // With prefix, delimiter and max keys - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .prefix("c") - .delimiter("/") - .max_uploads(1) - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 1); - assert!(r.common_prefixes.is_none()); - } - { - // With starting token before the first element - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .key_marker("ZZZZZ") - .send() - .await - .unwrap(); - - assert_eq!(r.uploads.unwrap().len(), 5); - assert!(r.common_prefixes.is_none()); - } - { - // With starting token after the last element - let r = ctx - .client - .list_multipart_uploads() - .bucket(&bucket) - .key_marker("d") - .send() - .await - .unwrap(); - - assert!(r.uploads.is_none()); - assert!(r.common_prefixes.is_none()); - } -} diff --git a/src/garage/tests/multipart.rs b/src/garage/tests/multipart.rs deleted file mode 100644 index 895a2993..00000000 --- a/src/garage/tests/multipart.rs +++ /dev/null @@ -1,415 +0,0 @@ -use crate::common; -use aws_sdk_s3::model::{CompletedMultipartUpload, CompletedPart}; -use aws_sdk_s3::types::ByteStream; - -const SZ_5MB: usize = 5 * 1024 * 1024; -const SZ_10MB: usize = 10 * 1024 * 1024; - -#[tokio::test] -async fn test_uploadlistpart() { - let ctx = common::context(); - let bucket = ctx.create_bucket("uploadpart"); - - let u1 = vec![0xee; SZ_5MB]; - let u2 = vec![0x11; SZ_5MB]; - - let up = ctx - .client - .create_multipart_upload() - .bucket(&bucket) - .key("a") - .send() - .await - .unwrap(); - let uid = up.upload_id.as_ref().unwrap(); - - assert!(up.upload_id.is_some()); - - { - let r = ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .send() - .await - .unwrap(); - - assert!(r.parts.is_none()); - } - - let p1 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .part_number(2) - .body(ByteStream::from(u1)) - .send() - .await - .unwrap(); - - { - // ListPart on 1st element - let r = ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .send() - .await - .unwrap(); - - let ps = r.parts.unwrap(); - assert_eq!(ps.len(), 1); - let fp = ps.iter().find(|x| x.part_number == 2).unwrap(); - assert!(fp.last_modified.is_some()); - assert_eq!( - fp.e_tag.as_ref().unwrap(), - "\"3366bb9dcf710d6801b5926467d02e19\"" - ); - assert_eq!(fp.size, SZ_5MB as i64); - } - - let p2 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .part_number(1) - .body(ByteStream::from(u2)) - .send() - .await - .unwrap(); - - { - // ListPart on the 2 elements - let r = ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .send() - .await - .unwrap(); - - let ps = r.parts.unwrap(); - assert_eq!(ps.len(), 2); - let fp = ps.iter().find(|x| x.part_number == 1).unwrap(); - assert!(fp.last_modified.is_some()); - assert_eq!( - fp.e_tag.as_ref().unwrap(), - "\"3c484266f9315485694556e6c693bfa2\"" - ); - assert_eq!(fp.size, SZ_5MB as i64); - } - - { - // Call pagination - let r = ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .max_parts(1) - .send() - .await - .unwrap(); - - assert!(r.part_number_marker.is_none()); - assert!(r.next_part_number_marker.is_some()); - assert_eq!(r.max_parts, 1_i32); - assert!(r.is_truncated); - assert_eq!(r.key.unwrap(), "a"); - assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str()); - assert_eq!(r.parts.unwrap().len(), 1); - - let r2 = ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .max_parts(1) - .part_number_marker(r.next_part_number_marker.as_ref().unwrap()) - .send() - .await - .unwrap(); - - assert_eq!( - r2.part_number_marker.as_ref().unwrap(), - r.next_part_number_marker.as_ref().unwrap() - ); - assert_eq!(r2.max_parts, 1_i32); - assert!(r2.is_truncated); - assert_eq!(r2.key.unwrap(), "a"); - assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str()); - assert_eq!(r2.parts.unwrap().len(), 1); - } - - let cmp = CompletedMultipartUpload::builder() - .parts( - CompletedPart::builder() - .part_number(1) - .e_tag(p2.e_tag.unwrap()) - .build(), - ) - .parts( - CompletedPart::builder() - .part_number(2) - .e_tag(p1.e_tag.unwrap()) - .build(), - ) - .build(); - - ctx.client - .complete_multipart_upload() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .multipart_upload(cmp) - .send() - .await - .unwrap(); - - // The multipart upload must not appear anymore - assert!(ctx - .client - .list_parts() - .bucket(&bucket) - .key("a") - .upload_id(uid) - .send() - .await - .is_err()); - - { - // The object must appear as a regular object - let r = ctx - .client - .head_object() - .bucket(&bucket) - .key("a") - .send() - .await - .unwrap(); - - assert_eq!(r.content_length, (SZ_5MB * 2) as i64); - } -} - -#[tokio::test] -async fn test_uploadpartcopy() { - let ctx = common::context(); - let bucket = ctx.create_bucket("uploadpartcopy"); - - let u1 = vec![0x11; SZ_10MB]; - let u2 = vec![0x22; SZ_5MB]; - let u3 = vec![0x33; SZ_5MB]; - let u4 = vec![0x44; SZ_5MB]; - let u5 = vec![0x55; SZ_5MB]; - - let overflow = 5500000 - SZ_5MB; - let mut exp_obj = u3.clone(); - exp_obj.extend(&u4[500..]); - exp_obj.extend(&u5[..overflow + 1]); - exp_obj.extend(&u2); - exp_obj.extend(&u1[500..5500000 + 1]); - - // (setup) Upload a single part object - ctx.client - .put_object() - .bucket(&bucket) - .key("source1") - .body(ByteStream::from(u1)) - .send() - .await - .unwrap(); - - // (setup) Upload a multipart object with 2 parts - { - let up = ctx - .client - .create_multipart_upload() - .bucket(&bucket) - .key("source2") - .send() - .await - .unwrap(); - let uid = up.upload_id.as_ref().unwrap(); - - let p1 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("source2") - .upload_id(uid) - .part_number(1) - .body(ByteStream::from(u4)) - .send() - .await - .unwrap(); - - let p2 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("source2") - .upload_id(uid) - .part_number(2) - .body(ByteStream::from(u5)) - .send() - .await - .unwrap(); - - let cmp = CompletedMultipartUpload::builder() - .parts( - CompletedPart::builder() - .part_number(1) - .e_tag(p1.e_tag.unwrap()) - .build(), - ) - .parts( - CompletedPart::builder() - .part_number(2) - .e_tag(p2.e_tag.unwrap()) - .build(), - ) - .build(); - - ctx.client - .complete_multipart_upload() - .bucket(&bucket) - .key("source2") - .upload_id(uid) - .multipart_upload(cmp) - .send() - .await - .unwrap(); - } - - // Our multipart object that does copy - let up = ctx - .client - .create_multipart_upload() - .bucket(&bucket) - .key("target") - .send() - .await - .unwrap(); - let uid = up.upload_id.as_ref().unwrap(); - - let p3 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("target") - .upload_id(uid) - .part_number(3) - .body(ByteStream::from(u2)) - .send() - .await - .unwrap(); - - let p1 = ctx - .client - .upload_part() - .bucket(&bucket) - .key("target") - .upload_id(uid) - .part_number(1) - .body(ByteStream::from(u3)) - .send() - .await - .unwrap(); - - let p2 = ctx - .client - .upload_part_copy() - .bucket(&bucket) - .key("target") - .upload_id(uid) - .part_number(2) - .copy_source("uploadpartcopy/source2") - .copy_source_range("bytes=500-5500000") - .send() - .await - .unwrap(); - - let p4 = ctx - .client - .upload_part_copy() - .bucket(&bucket) - .key("target") - .upload_id(uid) - .part_number(4) - .copy_source("uploadpartcopy/source1") - .copy_source_range("bytes=500-5500000") - .send() - .await - .unwrap(); - - let cmp = CompletedMultipartUpload::builder() - .parts( - CompletedPart::builder() - .part_number(1) - .e_tag(p1.e_tag.unwrap()) - .build(), - ) - .parts( - CompletedPart::builder() - .part_number(2) - .e_tag(p2.copy_part_result.unwrap().e_tag.unwrap()) - .build(), - ) - .parts( - CompletedPart::builder() - .part_number(3) - .e_tag(p3.e_tag.unwrap()) - .build(), - ) - .parts( - CompletedPart::builder() - .part_number(4) - .e_tag(p4.copy_part_result.unwrap().e_tag.unwrap()) - .build(), - ) - .build(); - - ctx.client - .complete_multipart_upload() - .bucket(&bucket) - .key("target") - .upload_id(uid) - .multipart_upload(cmp) - .send() - .await - .unwrap(); - - // (check) Get object - - let obj = ctx - .client - .get_object() - .bucket(&bucket) - .key("target") - .send() - .await - .unwrap(); - - let real_obj = obj - .body - .collect() - .await - .expect("Error reading data") - .into_bytes(); - - assert_eq!(real_obj.len(), exp_obj.len()); - assert_eq!(real_obj, exp_obj); -} diff --git a/src/garage/tests/objects.rs b/src/garage/tests/objects.rs deleted file mode 100644 index e1175b81..00000000 --- a/src/garage/tests/objects.rs +++ /dev/null @@ -1,266 +0,0 @@ -use crate::common; -use aws_sdk_s3::model::{Delete, ObjectIdentifier}; -use aws_sdk_s3::types::ByteStream; - -const STD_KEY: &str = "hello world"; -const CTRL_KEY: &str = "\x00\x01\x02\x00"; -const UTF8_KEY: &str = "\u{211D}\u{1F923}\u{1F44B}"; -const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - -#[tokio::test] -async fn test_putobject() { - let ctx = common::context(); - let bucket = ctx.create_bucket("putobject"); - - { - // Send an empty object (can serve as a directory marker) - // with a content type - let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; - let content_type = "text/csv"; - let r = ctx - .client - .put_object() - .bucket(&bucket) - .key(STD_KEY) - .content_type(content_type) - .send() - .await - .unwrap(); - - assert_eq!(r.e_tag.unwrap().as_str(), etag); - // We return a version ID here - // We should check if Amazon is returning one when versioning is not enabled - assert!(r.version_id.is_some()); - - let _version = r.version_id.unwrap(); - - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(STD_KEY) - .send() - .await - .unwrap(); - - assert_bytes_eq!(o.body, b""); - assert_eq!(o.e_tag.unwrap(), etag); - // We do not return version ID - // We should check if Amazon is returning one when versioning is not enabled - // assert_eq!(o.version_id.unwrap(), _version); - assert_eq!(o.content_type.unwrap(), content_type); - assert!(o.last_modified.is_some()); - assert_eq!(o.content_length, 0); - assert_eq!(o.parts_count, 0); - assert_eq!(o.tag_count, 0); - } - - { - // Key with control characters, - // no content type and some data - let etag = "\"49f68a5c8493ec2c0bf489821c21fc3b\""; - let data = ByteStream::from_static(b"hi"); - - let r = ctx - .client - .put_object() - .bucket(&bucket) - .key(CTRL_KEY) - .body(data) - .send() - .await - .unwrap(); - - assert_eq!(r.e_tag.unwrap().as_str(), etag); - assert!(r.version_id.is_some()); - - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(CTRL_KEY) - .send() - .await - .unwrap(); - - assert_bytes_eq!(o.body, b"hi"); - assert_eq!(o.e_tag.unwrap(), etag); - assert!(o.last_modified.is_some()); - assert_eq!(o.content_length, 2); - assert_eq!(o.parts_count, 0); - assert_eq!(o.tag_count, 0); - } - - { - // Key with UTF8 codepoints including emoji - let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; - - let r = ctx - .client - .put_object() - .bucket(&bucket) - .key(UTF8_KEY) - .send() - .await - .unwrap(); - - assert_eq!(r.e_tag.unwrap().as_str(), etag); - assert!(r.version_id.is_some()); - - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(UTF8_KEY) - .send() - .await - .unwrap(); - - assert_bytes_eq!(o.body, b""); - assert_eq!(o.e_tag.unwrap(), etag); - assert!(o.last_modified.is_some()); - assert_eq!(o.content_length, 0); - assert_eq!(o.parts_count, 0); - assert_eq!(o.tag_count, 0); - } -} - -#[tokio::test] -async fn test_getobject() { - let ctx = common::context(); - let bucket = ctx.create_bucket("getobject"); - - let etag = "\"46cf18a9b447991b450cad3facf5937e\""; - let data = ByteStream::from_static(BODY); - - let r = ctx - .client - .put_object() - .bucket(&bucket) - .key(STD_KEY) - .body(data) - .send() - .await - .unwrap(); - - assert_eq!(r.e_tag.unwrap().as_str(), etag); - - { - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(STD_KEY) - .range("bytes=1-9") - .send() - .await - .unwrap(); - - assert_eq!(o.content_range.unwrap().as_str(), "bytes 1-9/62"); - assert_bytes_eq!(o.body, &BODY[1..10]); - } - { - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(STD_KEY) - .range("bytes=9-") - .send() - .await - .unwrap(); - assert_eq!(o.content_range.unwrap().as_str(), "bytes 9-61/62"); - assert_bytes_eq!(o.body, &BODY[9..]); - } - { - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(STD_KEY) - .range("bytes=-5") - .send() - .await - .unwrap(); - assert_eq!(o.content_range.unwrap().as_str(), "bytes 57-61/62"); - assert_bytes_eq!(o.body, &BODY[57..]); - } -} - -#[tokio::test] -async fn test_deleteobject() { - let ctx = common::context(); - let bucket = ctx.create_bucket("deleteobject"); - - let mut to_del = Delete::builder(); - - // add content without data - for i in 0..5 { - let k = format!("k-{}", i); - ctx.client - .put_object() - .bucket(&bucket) - .key(k.to_string()) - .send() - .await - .unwrap(); - if i > 0 { - to_del = to_del.objects(ObjectIdentifier::builder().key(k).build()); - } - } - - // add content with data - for i in 0..5 { - let k = format!("l-{}", i); - let data = ByteStream::from_static(BODY); - ctx.client - .put_object() - .bucket(&bucket) - .key(k.to_string()) - .body(data) - .send() - .await - .unwrap(); - - if i > 0 { - to_del = to_del.objects(ObjectIdentifier::builder().key(k).build()); - } - } - - ctx.client - .delete_object() - .bucket(&bucket) - .key("k-0") - .send() - .await - .unwrap(); - - ctx.client - .delete_object() - .bucket(&bucket) - .key("l-0") - .send() - .await - .unwrap(); - - let r = ctx - .client - .delete_objects() - .bucket(&bucket) - .delete(to_del.build()) - .send() - .await - .unwrap(); - - assert_eq!(r.deleted.unwrap().len(), 8); - - let l = ctx - .client - .list_objects_v2() - .bucket(&bucket) - .send() - .await - .unwrap(); - - assert!(l.contents.is_none()); -} diff --git a/src/garage/tests/s3/list.rs b/src/garage/tests/s3/list.rs new file mode 100644 index 00000000..bb03f250 --- /dev/null +++ b/src/garage/tests/s3/list.rs @@ -0,0 +1,615 @@ +use crate::common; + +const KEYS: [&str; 8] = ["a", "a/a", "a/b", "a/c", "a/d/a", "a/é", "b", "c"]; +const KEYS_MULTIPART: [&str; 5] = ["a", "a", "c", "c/a", "c/b"]; + +#[tokio::test] +async fn test_listobjectsv2() { + let ctx = common::context(); + let bucket = ctx.create_bucket("listobjectsv2"); + + for k in KEYS { + ctx.client + .put_object() + .bucket(&bucket) + .key(k) + .send() + .await + .unwrap(); + } + + { + // Scoping the variable to avoid reusing it + // in a following assert due to copy paste + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 8); + assert!(r.common_prefixes.is_none()); + } + + //@FIXME aws-sdk-s3 automatically checks max-key values. + // If we set it to zero, it drops it, and it is probably + // the same behavior on values bigger than 1000. + // Boto and awscli do not perform these tests, we should write + // our own minimal library to bypass AWS SDK's tests and be + // sure that we behave correctly. + + { + // With 2 elements + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .max_keys(2) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 2); + assert!(r.common_prefixes.is_none()); + assert!(r.next_continuation_token.is_some()); + } + + { + // With pagination + let mut cnt = 0; + let mut next = None; + let last_idx = KEYS.len() - 1; + + for i in 0..KEYS.len() { + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .set_continuation_token(next) + .max_keys(1) + .send() + .await + .unwrap(); + + cnt += 1; + next = r.next_continuation_token; + + assert_eq!(r.contents.unwrap().len(), 1); + assert!(r.common_prefixes.is_none()); + if i != last_idx { + assert!(next.is_some()); + } + } + assert_eq!(cnt, KEYS.len()); + } + + { + // With a delimiter + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 3); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + + { + // With a delimiter and pagination + let mut cnt_pfx = 0; + let mut cnt_key = 0; + let mut next = None; + + for _i in 0..KEYS.len() { + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .set_continuation_token(next) + .delimiter("/") + .max_keys(1) + .send() + .await + .unwrap(); + + next = r.next_continuation_token; + match (r.contents, r.common_prefixes) { + (Some(k), None) if k.len() == 1 => cnt_key += 1, + (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, + _ => unreachable!("logic error"), + }; + if next.is_none() { + break; + } + } + assert_eq!(cnt_key, 3); + assert_eq!(cnt_pfx, 1); + } + + { + // With a prefix + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .prefix("a/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 5); + assert!(r.common_prefixes.is_none()); + } + + { + // With a prefix and a delimiter + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .prefix("a/") + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 4); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + + { + // With a prefix, a delimiter and max_key + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .prefix("a/") + .delimiter("/") + .max_keys(1) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.as_ref().unwrap().len(), 1); + assert_eq!( + r.contents + .unwrap() + .first() + .unwrap() + .key + .as_ref() + .unwrap() + .as_str(), + "a/a" + ); + assert!(r.common_prefixes.is_none()); + } + { + // With start_after before all keys + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .start_after("Z") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 8); + assert!(r.common_prefixes.is_none()); + } + { + // With start_after after all keys + let r = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .start_after("c") + .send() + .await + .unwrap(); + + assert!(r.contents.is_none()); + assert!(r.common_prefixes.is_none()); + } +} + +#[tokio::test] +async fn test_listobjectsv1() { + let ctx = common::context(); + let bucket = ctx.create_bucket("listobjects"); + + for k in KEYS { + ctx.client + .put_object() + .bucket(&bucket) + .key(k) + .send() + .await + .unwrap(); + } + + { + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 8); + assert!(r.common_prefixes.is_none()); + } + + { + // With 2 elements + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .max_keys(2) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 2); + assert!(r.common_prefixes.is_none()); + assert!(r.next_marker.is_some()); + } + + { + // With pagination + let mut cnt = 0; + let mut next = None; + let last_idx = KEYS.len() - 1; + + for i in 0..KEYS.len() { + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .set_marker(next) + .max_keys(1) + .send() + .await + .unwrap(); + + cnt += 1; + next = r.next_marker; + + assert_eq!(r.contents.unwrap().len(), 1); + assert!(r.common_prefixes.is_none()); + if i != last_idx { + assert!(next.is_some()); + } + } + assert_eq!(cnt, KEYS.len()); + } + + { + // With a delimiter + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 3); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + + { + // With a delimiter and pagination + let mut cnt_pfx = 0; + let mut cnt_key = 0; + let mut next = None; + + for _i in 0..KEYS.len() { + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .delimiter("/") + .set_marker(next) + .max_keys(1) + .send() + .await + .unwrap(); + + next = r.next_marker; + match (r.contents, r.common_prefixes) { + (Some(k), None) if k.len() == 1 => cnt_key += 1, + (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1, + _ => unreachable!("logic error"), + }; + if next.is_none() { + break; + } + } + assert_eq!(cnt_key, 3); + // We have no optimization to skip the whole prefix + // on listobjectsv1 so we return the same one 5 times, + // for each element. It is up to the client to merge its result. + // This is compliant with AWS spec. + assert_eq!(cnt_pfx, 5); + } + + { + // With a prefix + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .prefix("a/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 5); + assert!(r.common_prefixes.is_none()); + } + + { + // With a prefix and a delimiter + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .prefix("a/") + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 4); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + + { + // With a prefix, a delimiter and max_key + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .prefix("a/") + .delimiter("/") + .max_keys(1) + .send() + .await + .unwrap(); + + assert_eq!(r.contents.as_ref().unwrap().len(), 1); + assert_eq!( + r.contents + .unwrap() + .first() + .unwrap() + .key + .as_ref() + .unwrap() + .as_str(), + "a/a" + ); + assert!(r.common_prefixes.is_none()); + } + { + // With marker before all keys + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .marker("Z") + .send() + .await + .unwrap(); + + assert_eq!(r.contents.unwrap().len(), 8); + assert!(r.common_prefixes.is_none()); + } + { + // With start_after after all keys + let r = ctx + .client + .list_objects() + .bucket(&bucket) + .marker("c") + .send() + .await + .unwrap(); + + assert!(r.contents.is_none()); + assert!(r.common_prefixes.is_none()); + } +} + +#[tokio::test] +async fn test_listmultipart() { + let ctx = common::context(); + let bucket = ctx.create_bucket("listmultipartuploads"); + + for k in KEYS_MULTIPART { + ctx.client + .create_multipart_upload() + .bucket(&bucket) + .key(k) + .send() + .await + .unwrap(); + } + + { + // Default + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 5); + assert!(r.common_prefixes.is_none()); + } + { + // With pagination + let mut next = None; + let mut upnext = None; + let last_idx = KEYS_MULTIPART.len() - 1; + + for i in 0..KEYS_MULTIPART.len() { + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .set_key_marker(next) + .set_upload_id_marker(upnext) + .max_uploads(1) + .send() + .await + .unwrap(); + + next = r.next_key_marker; + upnext = r.next_upload_id_marker; + + assert_eq!(r.uploads.unwrap().len(), 1); + assert!(r.common_prefixes.is_none()); + if i != last_idx { + assert!(next.is_some()); + } + } + } + { + // With delimiter + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 3); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + { + // With delimiter and pagination + let mut next = None; + let mut upnext = None; + let mut upcnt = 0; + let mut pfxcnt = 0; + let mut loopcnt = 0; + + while loopcnt < KEYS_MULTIPART.len() { + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .delimiter("/") + .max_uploads(1) + .set_key_marker(next) + .set_upload_id_marker(upnext) + .send() + .await + .unwrap(); + + next = r.next_key_marker; + upnext = r.next_upload_id_marker; + + loopcnt += 1; + upcnt += r.uploads.unwrap_or_default().len(); + pfxcnt += r.common_prefixes.unwrap_or_default().len(); + + if next.is_none() { + break; + } + } + + assert_eq!(upcnt + pfxcnt, loopcnt); + assert_eq!(upcnt, 3); + assert_eq!(pfxcnt, 1); + } + { + // With prefix + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .prefix("c") + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 3); + assert!(r.common_prefixes.is_none()); + } + { + // With prefix and delimiter + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .prefix("c") + .delimiter("/") + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 1); + assert_eq!(r.common_prefixes.unwrap().len(), 1); + } + { + // With prefix, delimiter and max keys + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .prefix("c") + .delimiter("/") + .max_uploads(1) + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 1); + assert!(r.common_prefixes.is_none()); + } + { + // With starting token before the first element + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .key_marker("ZZZZZ") + .send() + .await + .unwrap(); + + assert_eq!(r.uploads.unwrap().len(), 5); + assert!(r.common_prefixes.is_none()); + } + { + // With starting token after the last element + let r = ctx + .client + .list_multipart_uploads() + .bucket(&bucket) + .key_marker("d") + .send() + .await + .unwrap(); + + assert!(r.uploads.is_none()); + assert!(r.common_prefixes.is_none()); + } +} diff --git a/src/garage/tests/s3/mod.rs b/src/garage/tests/s3/mod.rs new file mode 100644 index 00000000..623eb665 --- /dev/null +++ b/src/garage/tests/s3/mod.rs @@ -0,0 +1,6 @@ +mod list; +mod multipart; +mod objects; +mod simple; +mod streaming_signature; +mod website; diff --git a/src/garage/tests/s3/multipart.rs b/src/garage/tests/s3/multipart.rs new file mode 100644 index 00000000..895a2993 --- /dev/null +++ b/src/garage/tests/s3/multipart.rs @@ -0,0 +1,415 @@ +use crate::common; +use aws_sdk_s3::model::{CompletedMultipartUpload, CompletedPart}; +use aws_sdk_s3::types::ByteStream; + +const SZ_5MB: usize = 5 * 1024 * 1024; +const SZ_10MB: usize = 10 * 1024 * 1024; + +#[tokio::test] +async fn test_uploadlistpart() { + let ctx = common::context(); + let bucket = ctx.create_bucket("uploadpart"); + + let u1 = vec![0xee; SZ_5MB]; + let u2 = vec![0x11; SZ_5MB]; + + let up = ctx + .client + .create_multipart_upload() + .bucket(&bucket) + .key("a") + .send() + .await + .unwrap(); + let uid = up.upload_id.as_ref().unwrap(); + + assert!(up.upload_id.is_some()); + + { + let r = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .unwrap(); + + assert!(r.parts.is_none()); + } + + let p1 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(2) + .body(ByteStream::from(u1)) + .send() + .await + .unwrap(); + + { + // ListPart on 1st element + let r = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .unwrap(); + + let ps = r.parts.unwrap(); + assert_eq!(ps.len(), 1); + let fp = ps.iter().find(|x| x.part_number == 2).unwrap(); + assert!(fp.last_modified.is_some()); + assert_eq!( + fp.e_tag.as_ref().unwrap(), + "\"3366bb9dcf710d6801b5926467d02e19\"" + ); + assert_eq!(fp.size, SZ_5MB as i64); + } + + let p2 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .part_number(1) + .body(ByteStream::from(u2)) + .send() + .await + .unwrap(); + + { + // ListPart on the 2 elements + let r = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .unwrap(); + + let ps = r.parts.unwrap(); + assert_eq!(ps.len(), 2); + let fp = ps.iter().find(|x| x.part_number == 1).unwrap(); + assert!(fp.last_modified.is_some()); + assert_eq!( + fp.e_tag.as_ref().unwrap(), + "\"3c484266f9315485694556e6c693bfa2\"" + ); + assert_eq!(fp.size, SZ_5MB as i64); + } + + { + // Call pagination + let r = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .max_parts(1) + .send() + .await + .unwrap(); + + assert!(r.part_number_marker.is_none()); + assert!(r.next_part_number_marker.is_some()); + assert_eq!(r.max_parts, 1_i32); + assert!(r.is_truncated); + assert_eq!(r.key.unwrap(), "a"); + assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str()); + assert_eq!(r.parts.unwrap().len(), 1); + + let r2 = ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .max_parts(1) + .part_number_marker(r.next_part_number_marker.as_ref().unwrap()) + .send() + .await + .unwrap(); + + assert_eq!( + r2.part_number_marker.as_ref().unwrap(), + r.next_part_number_marker.as_ref().unwrap() + ); + assert_eq!(r2.max_parts, 1_i32); + assert!(r2.is_truncated); + assert_eq!(r2.key.unwrap(), "a"); + assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str()); + assert_eq!(r2.parts.unwrap().len(), 1); + } + + let cmp = CompletedMultipartUpload::builder() + .parts( + CompletedPart::builder() + .part_number(1) + .e_tag(p2.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(2) + .e_tag(p1.e_tag.unwrap()) + .build(), + ) + .build(); + + ctx.client + .complete_multipart_upload() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .multipart_upload(cmp) + .send() + .await + .unwrap(); + + // The multipart upload must not appear anymore + assert!(ctx + .client + .list_parts() + .bucket(&bucket) + .key("a") + .upload_id(uid) + .send() + .await + .is_err()); + + { + // The object must appear as a regular object + let r = ctx + .client + .head_object() + .bucket(&bucket) + .key("a") + .send() + .await + .unwrap(); + + assert_eq!(r.content_length, (SZ_5MB * 2) as i64); + } +} + +#[tokio::test] +async fn test_uploadpartcopy() { + let ctx = common::context(); + let bucket = ctx.create_bucket("uploadpartcopy"); + + let u1 = vec![0x11; SZ_10MB]; + let u2 = vec![0x22; SZ_5MB]; + let u3 = vec![0x33; SZ_5MB]; + let u4 = vec![0x44; SZ_5MB]; + let u5 = vec![0x55; SZ_5MB]; + + let overflow = 5500000 - SZ_5MB; + let mut exp_obj = u3.clone(); + exp_obj.extend(&u4[500..]); + exp_obj.extend(&u5[..overflow + 1]); + exp_obj.extend(&u2); + exp_obj.extend(&u1[500..5500000 + 1]); + + // (setup) Upload a single part object + ctx.client + .put_object() + .bucket(&bucket) + .key("source1") + .body(ByteStream::from(u1)) + .send() + .await + .unwrap(); + + // (setup) Upload a multipart object with 2 parts + { + let up = ctx + .client + .create_multipart_upload() + .bucket(&bucket) + .key("source2") + .send() + .await + .unwrap(); + let uid = up.upload_id.as_ref().unwrap(); + + let p1 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("source2") + .upload_id(uid) + .part_number(1) + .body(ByteStream::from(u4)) + .send() + .await + .unwrap(); + + let p2 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("source2") + .upload_id(uid) + .part_number(2) + .body(ByteStream::from(u5)) + .send() + .await + .unwrap(); + + let cmp = CompletedMultipartUpload::builder() + .parts( + CompletedPart::builder() + .part_number(1) + .e_tag(p1.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(2) + .e_tag(p2.e_tag.unwrap()) + .build(), + ) + .build(); + + ctx.client + .complete_multipart_upload() + .bucket(&bucket) + .key("source2") + .upload_id(uid) + .multipart_upload(cmp) + .send() + .await + .unwrap(); + } + + // Our multipart object that does copy + let up = ctx + .client + .create_multipart_upload() + .bucket(&bucket) + .key("target") + .send() + .await + .unwrap(); + let uid = up.upload_id.as_ref().unwrap(); + + let p3 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("target") + .upload_id(uid) + .part_number(3) + .body(ByteStream::from(u2)) + .send() + .await + .unwrap(); + + let p1 = ctx + .client + .upload_part() + .bucket(&bucket) + .key("target") + .upload_id(uid) + .part_number(1) + .body(ByteStream::from(u3)) + .send() + .await + .unwrap(); + + let p2 = ctx + .client + .upload_part_copy() + .bucket(&bucket) + .key("target") + .upload_id(uid) + .part_number(2) + .copy_source("uploadpartcopy/source2") + .copy_source_range("bytes=500-5500000") + .send() + .await + .unwrap(); + + let p4 = ctx + .client + .upload_part_copy() + .bucket(&bucket) + .key("target") + .upload_id(uid) + .part_number(4) + .copy_source("uploadpartcopy/source1") + .copy_source_range("bytes=500-5500000") + .send() + .await + .unwrap(); + + let cmp = CompletedMultipartUpload::builder() + .parts( + CompletedPart::builder() + .part_number(1) + .e_tag(p1.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(2) + .e_tag(p2.copy_part_result.unwrap().e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(3) + .e_tag(p3.e_tag.unwrap()) + .build(), + ) + .parts( + CompletedPart::builder() + .part_number(4) + .e_tag(p4.copy_part_result.unwrap().e_tag.unwrap()) + .build(), + ) + .build(); + + ctx.client + .complete_multipart_upload() + .bucket(&bucket) + .key("target") + .upload_id(uid) + .multipart_upload(cmp) + .send() + .await + .unwrap(); + + // (check) Get object + + let obj = ctx + .client + .get_object() + .bucket(&bucket) + .key("target") + .send() + .await + .unwrap(); + + let real_obj = obj + .body + .collect() + .await + .expect("Error reading data") + .into_bytes(); + + assert_eq!(real_obj.len(), exp_obj.len()); + assert_eq!(real_obj, exp_obj); +} diff --git a/src/garage/tests/s3/objects.rs b/src/garage/tests/s3/objects.rs new file mode 100644 index 00000000..e1175b81 --- /dev/null +++ b/src/garage/tests/s3/objects.rs @@ -0,0 +1,266 @@ +use crate::common; +use aws_sdk_s3::model::{Delete, ObjectIdentifier}; +use aws_sdk_s3::types::ByteStream; + +const STD_KEY: &str = "hello world"; +const CTRL_KEY: &str = "\x00\x01\x02\x00"; +const UTF8_KEY: &str = "\u{211D}\u{1F923}\u{1F44B}"; +const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + +#[tokio::test] +async fn test_putobject() { + let ctx = common::context(); + let bucket = ctx.create_bucket("putobject"); + + { + // Send an empty object (can serve as a directory marker) + // with a content type + let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; + let content_type = "text/csv"; + let r = ctx + .client + .put_object() + .bucket(&bucket) + .key(STD_KEY) + .content_type(content_type) + .send() + .await + .unwrap(); + + assert_eq!(r.e_tag.unwrap().as_str(), etag); + // We return a version ID here + // We should check if Amazon is returning one when versioning is not enabled + assert!(r.version_id.is_some()); + + let _version = r.version_id.unwrap(); + + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(STD_KEY) + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, b""); + assert_eq!(o.e_tag.unwrap(), etag); + // We do not return version ID + // We should check if Amazon is returning one when versioning is not enabled + // assert_eq!(o.version_id.unwrap(), _version); + assert_eq!(o.content_type.unwrap(), content_type); + assert!(o.last_modified.is_some()); + assert_eq!(o.content_length, 0); + assert_eq!(o.parts_count, 0); + assert_eq!(o.tag_count, 0); + } + + { + // Key with control characters, + // no content type and some data + let etag = "\"49f68a5c8493ec2c0bf489821c21fc3b\""; + let data = ByteStream::from_static(b"hi"); + + let r = ctx + .client + .put_object() + .bucket(&bucket) + .key(CTRL_KEY) + .body(data) + .send() + .await + .unwrap(); + + assert_eq!(r.e_tag.unwrap().as_str(), etag); + assert!(r.version_id.is_some()); + + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(CTRL_KEY) + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, b"hi"); + assert_eq!(o.e_tag.unwrap(), etag); + assert!(o.last_modified.is_some()); + assert_eq!(o.content_length, 2); + assert_eq!(o.parts_count, 0); + assert_eq!(o.tag_count, 0); + } + + { + // Key with UTF8 codepoints including emoji + let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; + + let r = ctx + .client + .put_object() + .bucket(&bucket) + .key(UTF8_KEY) + .send() + .await + .unwrap(); + + assert_eq!(r.e_tag.unwrap().as_str(), etag); + assert!(r.version_id.is_some()); + + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(UTF8_KEY) + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, b""); + assert_eq!(o.e_tag.unwrap(), etag); + assert!(o.last_modified.is_some()); + assert_eq!(o.content_length, 0); + assert_eq!(o.parts_count, 0); + assert_eq!(o.tag_count, 0); + } +} + +#[tokio::test] +async fn test_getobject() { + let ctx = common::context(); + let bucket = ctx.create_bucket("getobject"); + + let etag = "\"46cf18a9b447991b450cad3facf5937e\""; + let data = ByteStream::from_static(BODY); + + let r = ctx + .client + .put_object() + .bucket(&bucket) + .key(STD_KEY) + .body(data) + .send() + .await + .unwrap(); + + assert_eq!(r.e_tag.unwrap().as_str(), etag); + + { + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(STD_KEY) + .range("bytes=1-9") + .send() + .await + .unwrap(); + + assert_eq!(o.content_range.unwrap().as_str(), "bytes 1-9/62"); + assert_bytes_eq!(o.body, &BODY[1..10]); + } + { + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(STD_KEY) + .range("bytes=9-") + .send() + .await + .unwrap(); + assert_eq!(o.content_range.unwrap().as_str(), "bytes 9-61/62"); + assert_bytes_eq!(o.body, &BODY[9..]); + } + { + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(STD_KEY) + .range("bytes=-5") + .send() + .await + .unwrap(); + assert_eq!(o.content_range.unwrap().as_str(), "bytes 57-61/62"); + assert_bytes_eq!(o.body, &BODY[57..]); + } +} + +#[tokio::test] +async fn test_deleteobject() { + let ctx = common::context(); + let bucket = ctx.create_bucket("deleteobject"); + + let mut to_del = Delete::builder(); + + // add content without data + for i in 0..5 { + let k = format!("k-{}", i); + ctx.client + .put_object() + .bucket(&bucket) + .key(k.to_string()) + .send() + .await + .unwrap(); + if i > 0 { + to_del = to_del.objects(ObjectIdentifier::builder().key(k).build()); + } + } + + // add content with data + for i in 0..5 { + let k = format!("l-{}", i); + let data = ByteStream::from_static(BODY); + ctx.client + .put_object() + .bucket(&bucket) + .key(k.to_string()) + .body(data) + .send() + .await + .unwrap(); + + if i > 0 { + to_del = to_del.objects(ObjectIdentifier::builder().key(k).build()); + } + } + + ctx.client + .delete_object() + .bucket(&bucket) + .key("k-0") + .send() + .await + .unwrap(); + + ctx.client + .delete_object() + .bucket(&bucket) + .key("l-0") + .send() + .await + .unwrap(); + + let r = ctx + .client + .delete_objects() + .bucket(&bucket) + .delete(to_del.build()) + .send() + .await + .unwrap(); + + assert_eq!(r.deleted.unwrap().len(), 8); + + let l = ctx + .client + .list_objects_v2() + .bucket(&bucket) + .send() + .await + .unwrap(); + + assert!(l.contents.is_none()); +} diff --git a/src/garage/tests/s3/simple.rs b/src/garage/tests/s3/simple.rs new file mode 100644 index 00000000..f54ae9ac --- /dev/null +++ b/src/garage/tests/s3/simple.rs @@ -0,0 +1,31 @@ +use crate::common; + +#[tokio::test] +async fn test_simple() { + use aws_sdk_s3::types::ByteStream; + + let ctx = common::context(); + let bucket = ctx.create_bucket("test-simple"); + + let data = ByteStream::from_static(b"Hello world!"); + + ctx.client + .put_object() + .bucket(&bucket) + .key("test") + .body(data) + .send() + .await + .unwrap(); + + let res = ctx + .client + .get_object() + .bucket(&bucket) + .key("test") + .send() + .await + .unwrap(); + + assert_bytes_eq!(res.body, b"Hello world!"); +} diff --git a/src/garage/tests/s3/streaming_signature.rs b/src/garage/tests/s3/streaming_signature.rs new file mode 100644 index 00000000..c68f7dfc --- /dev/null +++ b/src/garage/tests/s3/streaming_signature.rs @@ -0,0 +1,185 @@ +use std::collections::HashMap; + +use crate::common; +use common::custom_requester::BodySignature; +use hyper::Method; + +const STD_KEY: &str = "hello-world"; +//const CTRL_KEY: &str = "\x00\x01\x02\x00"; +const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + +#[tokio::test] +async fn test_putobject_streaming() { + let ctx = common::context(); + let bucket = ctx.create_bucket("putobject-streaming"); + + { + // Send an empty object (can serve as a directory marker) + // with a content type + let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; + let content_type = "text/csv"; + let mut headers = HashMap::new(); + headers.insert("content-type".to_owned(), content_type.to_owned()); + let _ = ctx + .custom_request + .builder(bucket.clone()) + .method(Method::PUT) + .path(STD_KEY.to_owned()) + .unsigned_headers(headers) + .vhost_style(true) + .body(vec![]) + .body_signature(BodySignature::Streaming(10)) + .send() + .await + .unwrap(); + + // assert_eq!(r.e_tag.unwrap().as_str(), etag); + // We return a version ID here + // We should check if Amazon is returning one when versioning is not enabled + // assert!(r.version_id.is_some()); + + //let _version = r.version_id.unwrap(); + + let o = ctx + .client + .get_object() + .bucket(&bucket) + .key(STD_KEY) + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, b""); + assert_eq!(o.e_tag.unwrap(), etag); + // We do not return version ID + // We should check if Amazon is returning one when versioning is not enabled + // assert_eq!(o.version_id.unwrap(), _version); + assert_eq!(o.content_type.unwrap(), content_type); + assert!(o.last_modified.is_some()); + assert_eq!(o.content_length, 0); + assert_eq!(o.parts_count, 0); + assert_eq!(o.tag_count, 0); + } + + { + let etag = "\"46cf18a9b447991b450cad3facf5937e\""; + + let _ = ctx + .custom_request + .builder(bucket.clone()) + .method(Method::PUT) + //.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this + //fail + .path("abc".to_owned()) + .vhost_style(true) + .body(BODY.to_vec()) + .body_signature(BodySignature::Streaming(16)) + .send() + .await + .unwrap(); + + // assert_eq!(r.e_tag.unwrap().as_str(), etag); + // assert!(r.version_id.is_some()); + + let o = ctx + .client + .get_object() + .bucket(&bucket) + //.key(CTRL_KEY) + .key("abc") + .send() + .await + .unwrap(); + + assert_bytes_eq!(o.body, BODY); + assert_eq!(o.e_tag.unwrap(), etag); + assert!(o.last_modified.is_some()); + assert_eq!(o.content_length, 62); + assert_eq!(o.parts_count, 0); + assert_eq!(o.tag_count, 0); + } +} + +#[tokio::test] +async fn test_create_bucket_streaming() { + let ctx = common::context(); + let bucket = "createbucket-streaming"; + + { + // create bucket + let _ = ctx + .custom_request + .builder(bucket.to_owned()) + .method(Method::PUT) + .body_signature(BodySignature::Streaming(10)) + .send() + .await + .unwrap(); + + // test if the bucket exists and works properly + let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; + let content_type = "text/csv"; + let _ = ctx + .client + .put_object() + .bucket(bucket) + .key(STD_KEY) + .content_type(content_type) + .send() + .await + .unwrap(); + + let o = ctx + .client + .get_object() + .bucket(bucket) + .key(STD_KEY) + .send() + .await + .unwrap(); + + assert_eq!(o.e_tag.unwrap(), etag); + } +} + +#[tokio::test] +async fn test_put_website_streaming() { + let ctx = common::context(); + let bucket = ctx.create_bucket("putwebsite-streaming"); + + { + let website_config = r#" + + + err/error.html + + + home.html + +"#; + + let mut query = HashMap::new(); + query.insert("website".to_owned(), None); + let _ = ctx + .custom_request + .builder(bucket.clone()) + .method(Method::PUT) + .query_params(query) + .body(website_config.as_bytes().to_vec()) + .body_signature(BodySignature::Streaming(10)) + .send() + .await + .unwrap(); + + let o = ctx + .client + .get_bucket_website() + .bucket(&bucket) + .send() + .await + .unwrap(); + + assert_eq!(o.index_document.unwrap().suffix.unwrap(), "home.html"); + assert_eq!(o.error_document.unwrap().key.unwrap(), "err/error.html"); + } +} diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs new file mode 100644 index 00000000..0570ac6a --- /dev/null +++ b/src/garage/tests/s3/website.rs @@ -0,0 +1,324 @@ +use crate::common; +use crate::common::ext::*; +use aws_sdk_s3::{ + model::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration}, + types::ByteStream, +}; +use http::Request; +use hyper::{ + body::{to_bytes, Body}, + Client, +}; + +const BODY: &[u8; 16] = b"

bonjour

"; +const BODY_ERR: &[u8; 6] = b"erreur"; + +#[tokio::test] +async fn test_website() { + const BCKT_NAME: &str = "my-website"; + let ctx = common::context(); + let bucket = ctx.create_bucket(BCKT_NAME); + + let data = ByteStream::from_static(BODY); + + ctx.client + .put_object() + .bucket(&bucket) + .key("index.html") + .body(data) + .send() + .await + .unwrap(); + + let client = Client::new(); + + let req = || { + Request::builder() + .method("GET") + .uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .body(Body::empty()) + .unwrap() + }; + + let mut resp = client.request(req()).await.unwrap(); + + assert_eq!(resp.status(), 404); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); /* check that we do not leak body */ + + ctx.garage + .command() + .args(["bucket", "website", "--allow", BCKT_NAME]) + .quiet() + .expect_success_status("Could not allow website on bucket"); + + resp = client.request(req()).await.unwrap(); + assert_eq!(resp.status(), 200); + assert_eq!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + + ctx.garage + .command() + .args(["bucket", "website", "--deny", BCKT_NAME]) + .quiet() + .expect_success_status("Could not deny website on bucket"); + + resp = client.request(req()).await.unwrap(); + assert_eq!(resp.status(), 404); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); /* check that we do not leak body */ +} + +#[tokio::test] +async fn test_website_s3_api() { + const BCKT_NAME: &str = "my-cors"; + let ctx = common::context(); + let bucket = ctx.create_bucket(BCKT_NAME); + + let data = ByteStream::from_static(BODY); + + ctx.client + .put_object() + .bucket(&bucket) + .key("site/home.html") + .body(data) + .send() + .await + .unwrap(); + + ctx.client + .put_object() + .bucket(&bucket) + .key("err/error.html") + .body(ByteStream::from_static(BODY_ERR)) + .send() + .await + .unwrap(); + + let conf = WebsiteConfiguration::builder() + .index_document(IndexDocument::builder().suffix("home.html").build()) + .error_document(ErrorDocument::builder().key("err/error.html").build()) + .build(); + + ctx.client + .put_bucket_website() + .bucket(&bucket) + .website_configuration(conf) + .send() + .await + .unwrap(); + + let cors = CorsConfiguration::builder() + .cors_rules( + CorsRule::builder() + .id("main-rule") + .allowed_headers("*") + .allowed_methods("GET") + .allowed_methods("PUT") + .allowed_origins("*") + .build(), + ) + .build(); + + ctx.client + .put_bucket_cors() + .bucket(&bucket) + .cors_configuration(cors) + .send() + .await + .unwrap(); + + { + let cors_res = ctx + .client + .get_bucket_cors() + .bucket(&bucket) + .send() + .await + .unwrap(); + + let main_rule = cors_res.cors_rules().unwrap().iter().next().unwrap(); + + assert_eq!(main_rule.id.as_ref().unwrap(), "main-rule"); + assert_eq!( + main_rule.allowed_headers.as_ref().unwrap(), + &vec!["*".to_string()] + ); + assert_eq!( + main_rule.allowed_origins.as_ref().unwrap(), + &vec!["*".to_string()] + ); + assert_eq!( + main_rule.allowed_methods.as_ref().unwrap(), + &vec!["GET".to_string(), "PUT".to_string()] + ); + } + + let client = Client::new(); + + // Test direct requests with CORS + { + let req = Request::builder() + .method("GET") + .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .header("Origin", "https://example.com") + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 200); + assert_eq!( + resp.headers().get("access-control-allow-origin").unwrap(), + "*" + ); + assert_eq!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + } + + // Test ErrorDocument on 404 + { + let req = Request::builder() + .method("GET") + .uri(format!( + "http://127.0.0.1:{}/wrong.html", + ctx.garage.web_port + )) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 404); + assert_eq!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY_ERR.as_ref() + ); + } + + // Test CORS with an allowed preflight request + { + let req = Request::builder() + .method("OPTIONS") + .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .header("Origin", "https://example.com") + .header("Access-Control-Request-Method", "PUT") + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 200); + assert_eq!( + resp.headers().get("access-control-allow-origin").unwrap(), + "*" + ); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + } + + // Test CORS with a forbidden preflight request + { + let req = Request::builder() + .method("OPTIONS") + .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .header("Origin", "https://example.com") + .header("Access-Control-Request-Method", "DELETE") + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 403); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + } + + //@TODO test CORS on the S3 endpoint. We need to handle auth manually to check it. + + // Delete cors + ctx.client + .delete_bucket_cors() + .bucket(&bucket) + .send() + .await + .unwrap(); + + // Check CORS are deleted from the API + // @FIXME check what is the expected behavior when GetBucketCors is called on a bucket without + // any CORS. + assert!(ctx + .client + .get_bucket_cors() + .bucket(&bucket) + .send() + .await + .is_err()); + + // Test CORS are not sent anymore on a previously allowed request + { + let req = Request::builder() + .method("OPTIONS") + .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .header("Origin", "https://example.com") + .header("Access-Control-Request-Method", "PUT") + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 403); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + } + + // Disallow website from the API + ctx.client + .delete_bucket_website() + .bucket(&bucket) + .send() + .await + .unwrap(); + + // Check that the website is not served anymore + { + let req = Request::builder() + .method("GET") + .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port)) + .header("Host", format!("{}.web.garage", BCKT_NAME)) + .body(Body::empty()) + .unwrap(); + + let mut resp = client.request(req).await.unwrap(); + + assert_eq!(resp.status(), 404); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY_ERR.as_ref() + ); + assert_ne!( + to_bytes(resp.body_mut()).await.unwrap().as_ref(), + BODY.as_ref() + ); + } +} diff --git a/src/garage/tests/simple.rs b/src/garage/tests/simple.rs deleted file mode 100644 index f54ae9ac..00000000 --- a/src/garage/tests/simple.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::common; - -#[tokio::test] -async fn test_simple() { - use aws_sdk_s3::types::ByteStream; - - let ctx = common::context(); - let bucket = ctx.create_bucket("test-simple"); - - let data = ByteStream::from_static(b"Hello world!"); - - ctx.client - .put_object() - .bucket(&bucket) - .key("test") - .body(data) - .send() - .await - .unwrap(); - - let res = ctx - .client - .get_object() - .bucket(&bucket) - .key("test") - .send() - .await - .unwrap(); - - assert_bytes_eq!(res.body, b"Hello world!"); -} diff --git a/src/garage/tests/streaming_signature.rs b/src/garage/tests/streaming_signature.rs deleted file mode 100644 index c68f7dfc..00000000 --- a/src/garage/tests/streaming_signature.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::collections::HashMap; - -use crate::common; -use common::custom_requester::BodySignature; -use hyper::Method; - -const STD_KEY: &str = "hello-world"; -//const CTRL_KEY: &str = "\x00\x01\x02\x00"; -const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - -#[tokio::test] -async fn test_putobject_streaming() { - let ctx = common::context(); - let bucket = ctx.create_bucket("putobject-streaming"); - - { - // Send an empty object (can serve as a directory marker) - // with a content type - let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; - let content_type = "text/csv"; - let mut headers = HashMap::new(); - headers.insert("content-type".to_owned(), content_type.to_owned()); - let _ = ctx - .custom_request - .builder(bucket.clone()) - .method(Method::PUT) - .path(STD_KEY.to_owned()) - .unsigned_headers(headers) - .vhost_style(true) - .body(vec![]) - .body_signature(BodySignature::Streaming(10)) - .send() - .await - .unwrap(); - - // assert_eq!(r.e_tag.unwrap().as_str(), etag); - // We return a version ID here - // We should check if Amazon is returning one when versioning is not enabled - // assert!(r.version_id.is_some()); - - //let _version = r.version_id.unwrap(); - - let o = ctx - .client - .get_object() - .bucket(&bucket) - .key(STD_KEY) - .send() - .await - .unwrap(); - - assert_bytes_eq!(o.body, b""); - assert_eq!(o.e_tag.unwrap(), etag); - // We do not return version ID - // We should check if Amazon is returning one when versioning is not enabled - // assert_eq!(o.version_id.unwrap(), _version); - assert_eq!(o.content_type.unwrap(), content_type); - assert!(o.last_modified.is_some()); - assert_eq!(o.content_length, 0); - assert_eq!(o.parts_count, 0); - assert_eq!(o.tag_count, 0); - } - - { - let etag = "\"46cf18a9b447991b450cad3facf5937e\""; - - let _ = ctx - .custom_request - .builder(bucket.clone()) - .method(Method::PUT) - //.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this - //fail - .path("abc".to_owned()) - .vhost_style(true) - .body(BODY.to_vec()) - .body_signature(BodySignature::Streaming(16)) - .send() - .await - .unwrap(); - - // assert_eq!(r.e_tag.unwrap().as_str(), etag); - // assert!(r.version_id.is_some()); - - let o = ctx - .client - .get_object() - .bucket(&bucket) - //.key(CTRL_KEY) - .key("abc") - .send() - .await - .unwrap(); - - assert_bytes_eq!(o.body, BODY); - assert_eq!(o.e_tag.unwrap(), etag); - assert!(o.last_modified.is_some()); - assert_eq!(o.content_length, 62); - assert_eq!(o.parts_count, 0); - assert_eq!(o.tag_count, 0); - } -} - -#[tokio::test] -async fn test_create_bucket_streaming() { - let ctx = common::context(); - let bucket = "createbucket-streaming"; - - { - // create bucket - let _ = ctx - .custom_request - .builder(bucket.to_owned()) - .method(Method::PUT) - .body_signature(BodySignature::Streaming(10)) - .send() - .await - .unwrap(); - - // test if the bucket exists and works properly - let etag = "\"d41d8cd98f00b204e9800998ecf8427e\""; - let content_type = "text/csv"; - let _ = ctx - .client - .put_object() - .bucket(bucket) - .key(STD_KEY) - .content_type(content_type) - .send() - .await - .unwrap(); - - let o = ctx - .client - .get_object() - .bucket(bucket) - .key(STD_KEY) - .send() - .await - .unwrap(); - - assert_eq!(o.e_tag.unwrap(), etag); - } -} - -#[tokio::test] -async fn test_put_website_streaming() { - let ctx = common::context(); - let bucket = ctx.create_bucket("putwebsite-streaming"); - - { - let website_config = r#" - - - err/error.html - - - home.html - -"#; - - let mut query = HashMap::new(); - query.insert("website".to_owned(), None); - let _ = ctx - .custom_request - .builder(bucket.clone()) - .method(Method::PUT) - .query_params(query) - .body(website_config.as_bytes().to_vec()) - .body_signature(BodySignature::Streaming(10)) - .send() - .await - .unwrap(); - - let o = ctx - .client - .get_bucket_website() - .bucket(&bucket) - .send() - .await - .unwrap(); - - assert_eq!(o.index_document.unwrap().suffix.unwrap(), "home.html"); - assert_eq!(o.error_document.unwrap().key.unwrap(), "err/error.html"); - } -} diff --git a/src/garage/tests/website.rs b/src/garage/tests/website.rs deleted file mode 100644 index 963d11ea..00000000 --- a/src/garage/tests/website.rs +++ /dev/null @@ -1,342 +0,0 @@ -use crate::common; -use crate::common::ext::*; -use aws_sdk_s3::{ - model::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration}, - types::ByteStream, -}; -use http::Request; -use hyper::{ - body::{to_bytes, Body}, - Client, -}; - -const BODY: &[u8; 16] = b"

bonjour

"; -const BODY_ERR: &[u8; 6] = b"erreur"; - -#[tokio::test] -async fn test_website() { - const BCKT_NAME: &str = "my-website"; - let ctx = common::context(); - let bucket = ctx.create_bucket(BCKT_NAME); - - let data = ByteStream::from_static(BODY); - - ctx.client - .put_object() - .bucket(&bucket) - .key("index.html") - .body(data) - .send() - .await - .unwrap(); - - let client = Client::new(); - - let req = || { - Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{}/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::empty()) - .unwrap() - }; - - let mut resp = client.request(req()).await.unwrap(); - - assert_eq!(resp.status(), 404); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); /* check that we do not leak body */ - - ctx.garage - .command() - .args(["bucket", "website", "--allow", BCKT_NAME]) - .quiet() - .expect_success_status("Could not allow website on bucket"); - - resp = client.request(req()).await.unwrap(); - assert_eq!(resp.status(), 200); - assert_eq!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - - ctx.garage - .command() - .args(["bucket", "website", "--deny", BCKT_NAME]) - .quiet() - .expect_success_status("Could not deny website on bucket"); - - resp = client.request(req()).await.unwrap(); - assert_eq!(resp.status(), 404); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); /* check that we do not leak body */ -} - -#[tokio::test] -async fn test_website_s3_api() { - const BCKT_NAME: &str = "my-cors"; - let ctx = common::context(); - let bucket = ctx.create_bucket(BCKT_NAME); - - let data = ByteStream::from_static(BODY); - - ctx.client - .put_object() - .bucket(&bucket) - .key("site/home.html") - .body(data) - .send() - .await - .unwrap(); - - ctx.client - .put_object() - .bucket(&bucket) - .key("err/error.html") - .body(ByteStream::from_static(BODY_ERR)) - .send() - .await - .unwrap(); - - let conf = WebsiteConfiguration::builder() - .index_document(IndexDocument::builder().suffix("home.html").build()) - .error_document(ErrorDocument::builder().key("err/error.html").build()) - .build(); - - ctx.client - .put_bucket_website() - .bucket(&bucket) - .website_configuration(conf) - .send() - .await - .unwrap(); - - let cors = CorsConfiguration::builder() - .cors_rules( - CorsRule::builder() - .id("main-rule") - .allowed_headers("*") - .allowed_methods("GET") - .allowed_methods("PUT") - .allowed_origins("*") - .build(), - ) - .build(); - - ctx.client - .put_bucket_cors() - .bucket(&bucket) - .cors_configuration(cors) - .send() - .await - .unwrap(); - - { - let cors_res = ctx - .client - .get_bucket_cors() - .bucket(&bucket) - .send() - .await - .unwrap(); - - let main_rule = cors_res.cors_rules().unwrap().iter().next().unwrap(); - - assert_eq!(main_rule.id.as_ref().unwrap(), "main-rule"); - assert_eq!( - main_rule.allowed_headers.as_ref().unwrap(), - &vec!["*".to_string()] - ); - assert_eq!( - main_rule.allowed_origins.as_ref().unwrap(), - &vec!["*".to_string()] - ); - assert_eq!( - main_rule.allowed_methods.as_ref().unwrap(), - &vec!["GET".to_string(), "PUT".to_string()] - ); - } - - let client = Client::new(); - - // Test direct requests with CORS - { - let req = Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{}/site/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .header("Origin", "https://example.com") - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 200); - assert_eq!( - resp.headers().get("access-control-allow-origin").unwrap(), - "*" - ); - assert_eq!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - } - - // Test ErrorDocument on 404 - { - let req = Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{}/wrong.html", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 404); - assert_eq!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY_ERR.as_ref() - ); - } - - // Test CORS with an allowed preflight request - { - let req = Request::builder() - .method("OPTIONS") - .uri(format!( - "http://127.0.0.1:{}/site/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .header("Origin", "https://example.com") - .header("Access-Control-Request-Method", "PUT") - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 200); - assert_eq!( - resp.headers().get("access-control-allow-origin").unwrap(), - "*" - ); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - } - - // Test CORS with a forbidden preflight request - { - let req = Request::builder() - .method("OPTIONS") - .uri(format!( - "http://127.0.0.1:{}/site/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .header("Origin", "https://example.com") - .header("Access-Control-Request-Method", "DELETE") - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 403); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - } - - //@TODO test CORS on the S3 endpoint. We need to handle auth manually to check it. - - // Delete cors - ctx.client - .delete_bucket_cors() - .bucket(&bucket) - .send() - .await - .unwrap(); - - // Check CORS are deleted from the API - // @FIXME check what is the expected behavior when GetBucketCors is called on a bucket without - // any CORS. - assert!(ctx - .client - .get_bucket_cors() - .bucket(&bucket) - .send() - .await - .is_err()); - - // Test CORS are not sent anymore on a previously allowed request - { - let req = Request::builder() - .method("OPTIONS") - .uri(format!( - "http://127.0.0.1:{}/site/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .header("Origin", "https://example.com") - .header("Access-Control-Request-Method", "PUT") - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 403); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - } - - // Disallow website from the API - ctx.client - .delete_bucket_website() - .bucket(&bucket) - .send() - .await - .unwrap(); - - // Check that the website is not served anymore - { - let req = Request::builder() - .method("GET") - .uri(format!( - "http://127.0.0.1:{}/site/", - common::garage::DEFAULT_PORT + 2 - )) - .header("Host", format!("{}.web.garage", BCKT_NAME)) - .body(Body::empty()) - .unwrap(); - - let mut resp = client.request(req).await.unwrap(); - - assert_eq!(resp.status(), 404); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY_ERR.as_ref() - ); - assert_ne!( - to_bytes(resp.body_mut()).await.unwrap().as_ref(), - BODY.as_ref() - ); - } -} diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 007cec89..133fe44e 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -22,8 +22,10 @@ garage_model_050 = { package = "garage_model", version = "0.5.1" } async-trait = "0.1.7" arc-swap = "1.0" +blake2 = "0.9" err-derive = "0.3" hex = "0.4" +base64 = "0.13" tracing = "0.1.30" rand = "0.8" zstd = { version = "0.9", default-features = false } @@ -42,3 +44,6 @@ opentelemetry = "0.17" #netapp = { version = "0.3.0", git = "https://git.deuxfleurs.fr/lx/netapp" } #netapp = { version = "0.4", path = "../../../netapp" } netapp = "0.4" + +[features] +k2v = [ "garage_util/k2v" ] diff --git a/src/model/block_ref_table.rs b/src/model/block_ref_table.rs deleted file mode 100644 index b6945403..00000000 --- a/src/model/block_ref_table.rs +++ /dev/null @@ -1,74 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::sync::Arc; - -use garage_util::data::*; - -use garage_table::crdt::Crdt; -use garage_table::*; - -use garage_block::manager::*; - -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct BlockRef { - /// Hash (blake2 sum) of the block, used as partition key - pub block: Hash, - - /// Id of the Version for the object containing this block, used as sorting key - pub version: Uuid, - - // Keep track of deleted status - /// Is the Version that contains this block deleted - pub deleted: crdt::Bool, -} - -impl Entry for BlockRef { - fn partition_key(&self) -> &Hash { - &self.block - } - fn sort_key(&self) -> &Uuid { - &self.version - } - fn is_tombstone(&self) -> bool { - self.deleted.get() - } -} - -impl Crdt for BlockRef { - fn merge(&mut self, other: &Self) { - self.deleted.merge(&other.deleted); - } -} - -pub struct BlockRefTable { - pub block_manager: Arc, -} - -impl TableSchema for BlockRefTable { - const TABLE_NAME: &'static str = "block_ref"; - - type P = Hash; - type S = Uuid; - type E = BlockRef; - type Filter = DeletedFilter; - - fn updated(&self, old: Option, new: Option) { - #[allow(clippy::or_fun_call)] - let block = &old.as_ref().or(new.as_ref()).unwrap().block; - let was_before = old.as_ref().map(|x| !x.deleted.get()).unwrap_or(false); - let is_after = new.as_ref().map(|x| !x.deleted.get()).unwrap_or(false); - if is_after && !was_before { - if let Err(e) = self.block_manager.block_incref(block) { - warn!("block_incref failed for block {:?}: {}", block, e); - } - } - if was_before && !is_after { - if let Err(e) = self.block_manager.block_decref(block) { - warn!("block_decref failed for block {:?}: {}", block, e); - } - } - } - - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { - filter.apply(entry.deleted.get()) - } -} diff --git a/src/model/garage.rs b/src/model/garage.rs index abdb920a..03e21f8a 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -13,13 +13,19 @@ use garage_table::replication::TableFullReplication; use garage_table::replication::TableShardedReplication; use garage_table::*; -use crate::block_ref_table::*; +use crate::s3::block_ref_table::*; +use crate::s3::object_table::*; +use crate::s3::version_table::*; + use crate::bucket_alias_table::*; use crate::bucket_table::*; use crate::helper; use crate::key_table::*; -use crate::object_table::*; -use crate::version_table::*; + +#[cfg(feature = "k2v")] +use crate::index_counter::*; +#[cfg(feature = "k2v")] +use crate::k2v::{counter_table::*, item_table::*, poll::*, rpc::*}; /// An entire Garage full of data pub struct Garage { @@ -35,16 +41,32 @@ pub struct Garage { /// The block manager pub block_manager: Arc, - /// Table containing informations about buckets + /// Table containing buckets pub bucket_table: Arc>, - /// Table containing informations about bucket aliases + /// Table containing bucket aliases pub bucket_alias_table: Arc>, - /// Table containing informations about api keys + /// Table containing api keys pub key_table: Arc>, + /// Table containing S3 objects pub object_table: Arc>, + /// Table containing S3 object versions pub version_table: Arc>, + /// Table containing S3 block references (not blocks themselves) pub block_ref_table: Arc>, + + #[cfg(feature = "k2v")] + pub k2v: GarageK2V, +} + +#[cfg(feature = "k2v")] +pub struct GarageK2V { + /// Table containing K2V items + pub item_table: Arc>, + /// Indexing table containing K2V item counters + pub counter_table: Arc>, + /// K2V RPC handler + pub rpc: Arc, } impl Garage { @@ -95,6 +117,21 @@ impl Garage { system.clone(), ); + // ---- admin tables ---- + info!("Initialize bucket_table..."); + let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); + + info!("Initialize bucket_alias_table..."); + let bucket_alias_table = Table::new( + BucketAliasTable, + control_rep_param.clone(), + system.clone(), + &db, + ); + info!("Initialize key_table_table..."); + let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db); + + // ---- S3 tables ---- info!("Initialize block_ref_table..."); let block_ref_table = Table::new( BlockRefTable { @@ -117,29 +154,20 @@ impl Garage { ); info!("Initialize object_table..."); + #[allow(clippy::redundant_clone)] let object_table = Table::new( ObjectTable { background: background.clone(), version_table: version_table.clone(), }, - meta_rep_param, - system.clone(), - &db, - ); - - info!("Initialize bucket_table..."); - let bucket_table = Table::new(BucketTable, control_rep_param.clone(), system.clone(), &db); - - info!("Initialize bucket_alias_table..."); - let bucket_alias_table = Table::new( - BucketAliasTable, - control_rep_param.clone(), + meta_rep_param.clone(), system.clone(), &db, ); - info!("Initialize key_table_table..."); - let key_table = Table::new(KeyTable, control_rep_param, system.clone(), &db); + // ---- K2V ---- + #[cfg(feature = "k2v")] + let k2v = GarageK2V::new(system.clone(), &db, meta_rep_param); info!("Initialize Garage..."); @@ -155,6 +183,8 @@ impl Garage { object_table, version_table, block_ref_table, + #[cfg(feature = "k2v")] + k2v, }) } @@ -162,3 +192,30 @@ impl Garage { helper::bucket::BucketHelper(self) } } + +#[cfg(feature = "k2v")] +impl GarageK2V { + fn new(system: Arc, db: &sled::Db, meta_rep_param: TableShardedReplication) -> Self { + info!("Initialize K2V counter table..."); + let counter_table = IndexCounter::new(system.clone(), meta_rep_param.clone(), db); + info!("Initialize K2V subscription manager..."); + let subscriptions = Arc::new(SubscriptionManager::new()); + info!("Initialize K2V item table..."); + let item_table = Table::new( + K2VItemTable { + counter_table: counter_table.clone(), + subscriptions: subscriptions.clone(), + }, + meta_rep_param, + system.clone(), + db, + ); + let rpc = K2VRpcHandler::new(system, item_table.clone(), subscriptions); + + Self { + item_table, + counter_table, + rpc, + } + } +} diff --git a/src/model/helper/bucket.rs b/src/model/helper/bucket.rs index 706faf26..54d2f97b 100644 --- a/src/model/helper/bucket.rs +++ b/src/model/helper/bucket.rs @@ -1,4 +1,4 @@ -use garage_table::util::EmptyKey; +use garage_table::util::*; use garage_util::crdt::*; use garage_util::data::*; use garage_util::error::{Error as GarageError, OkOrMessage}; @@ -116,6 +116,7 @@ impl<'a> BucketHelper<'a> { None, Some(KeyFilter::MatchesAndNotDeleted(pattern.to_string())), 10, + EnumerationOrder::Forward, ) .await? .into_iter() diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs new file mode 100644 index 00000000..123154d4 --- /dev/null +++ b/src/model/index_counter.rs @@ -0,0 +1,305 @@ +use std::collections::{hash_map, BTreeMap, HashMap}; +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::Duration; + +use serde::{Deserialize, Serialize}; +use tokio::sync::{mpsc, watch}; + +use garage_rpc::ring::Ring; +use garage_rpc::system::System; +use garage_util::data::*; +use garage_util::error::*; + +use garage_table::crdt::*; +use garage_table::replication::TableShardedReplication; +use garage_table::*; + +pub trait CounterSchema: Clone + PartialEq + Send + Sync + 'static { + const NAME: &'static str; + type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; + type S: SortKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; +} + +/// A counter entry in the global table +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct CounterEntry { + pub pk: T::P, + pub sk: T::S, + pub values: BTreeMap, +} + +impl Entry for CounterEntry { + fn partition_key(&self) -> &T::P { + &self.pk + } + fn sort_key(&self) -> &T::S { + &self.sk + } + fn is_tombstone(&self) -> bool { + self.values + .iter() + .all(|(_, v)| v.node_values.iter().all(|(_, (_, v))| *v == 0)) + } +} + +impl CounterEntry { + pub fn filtered_values(&self, ring: &Ring) -> HashMap { + let nodes = &ring.layout.node_id_vec[..]; + self.filtered_values_with_nodes(nodes) + } + + pub fn filtered_values_with_nodes(&self, nodes: &[Uuid]) -> HashMap { + let mut ret = HashMap::new(); + for (name, vals) in self.values.iter() { + let new_vals = vals + .node_values + .iter() + .filter(|(n, _)| nodes.contains(n)) + .map(|(_, (_, v))| *v) + .collect::>(); + if !new_vals.is_empty() { + ret.insert( + name.clone(), + new_vals.iter().fold(i64::MIN, |a, b| std::cmp::max(a, *b)), + ); + } + } + + ret + } +} + +/// A counter entry in the global table +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct CounterValue { + pub node_values: BTreeMap, +} + +impl Crdt for CounterEntry { + fn merge(&mut self, other: &Self) { + for (name, e2) in other.values.iter() { + if let Some(e) = self.values.get_mut(name) { + e.merge(e2); + } else { + self.values.insert(name.clone(), e2.clone()); + } + } + } +} + +impl Crdt for CounterValue { + fn merge(&mut self, other: &Self) { + for (node, (t2, e2)) in other.node_values.iter() { + if let Some((t, e)) = self.node_values.get_mut(node) { + if t2 > t { + *e = *e2; + } + } else { + self.node_values.insert(*node, (*t2, *e2)); + } + } + } +} + +pub struct CounterTable { + _phantom_t: PhantomData, +} + +impl TableSchema for CounterTable { + const TABLE_NAME: &'static str = T::NAME; + + type P = T::P; + type S = T::S; + type E = CounterEntry; + type Filter = (DeletedFilter, Vec); + + fn updated(&self, _old: Option<&Self::E>, _new: Option<&Self::E>) { + // nothing for now + } + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + if filter.0 == DeletedFilter::Any { + return true; + } + + let is_tombstone = entry + .filtered_values_with_nodes(&filter.1[..]) + .iter() + .all(|(_, v)| *v == 0); + filter.0.apply(is_tombstone) + } +} + +// ---- + +pub struct IndexCounter { + this_node: Uuid, + local_counter: sled::Tree, + propagate_tx: mpsc::UnboundedSender<(T::P, T::S, LocalCounterEntry)>, + pub table: Arc, TableShardedReplication>>, +} + +impl IndexCounter { + pub fn new( + system: Arc, + replication: TableShardedReplication, + db: &sled::Db, + ) -> Arc { + let background = system.background.clone(); + + let (propagate_tx, propagate_rx) = mpsc::unbounded_channel(); + + let this = Arc::new(Self { + this_node: system.id, + local_counter: db + .open_tree(format!("local_counter:{}", T::NAME)) + .expect("Unable to open local counter tree"), + propagate_tx, + table: Table::new( + CounterTable { + _phantom_t: Default::default(), + }, + replication, + system, + db, + ), + }); + + let this2 = this.clone(); + background.spawn_worker( + format!("{} index counter propagator", T::NAME), + move |must_exit| this2.clone().propagate_loop(propagate_rx, must_exit), + ); + this + } + + pub fn count(&self, pk: &T::P, sk: &T::S, counts: &[(&str, i64)]) -> Result<(), Error> { + let tree_key = self.table.data.tree_key(pk, sk); + + let new_entry = self.local_counter.transaction(|tx| { + let mut entry = match tx.get(&tree_key[..])? { + Some(old_bytes) => { + rmp_serde::decode::from_read_ref::<_, LocalCounterEntry>(&old_bytes) + .map_err(Error::RmpDecode) + .map_err(sled::transaction::ConflictableTransactionError::Abort)? + } + None => LocalCounterEntry { + values: BTreeMap::new(), + }, + }; + + for (s, inc) in counts.iter() { + let mut ent = entry.values.entry(s.to_string()).or_insert((0, 0)); + ent.0 += 1; + ent.1 += *inc; + } + + let new_entry_bytes = rmp_to_vec_all_named(&entry) + .map_err(Error::RmpEncode) + .map_err(sled::transaction::ConflictableTransactionError::Abort)?; + tx.insert(&tree_key[..], new_entry_bytes)?; + + Ok(entry) + })?; + + if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), new_entry)) { + error!( + "Could not propagate updated counter values, failed to send to channel: {}", + e + ); + } + + Ok(()) + } + + async fn propagate_loop( + self: Arc, + mut propagate_rx: mpsc::UnboundedReceiver<(T::P, T::S, LocalCounterEntry)>, + must_exit: watch::Receiver, + ) { + // This loop batches updates to counters to be sent all at once. + // They are sent once the propagate_rx channel has been emptied (or is closed). + let mut buf = HashMap::new(); + let mut errors = 0; + + loop { + let (ent, closed) = match propagate_rx.try_recv() { + Ok(ent) => (Some(ent), false), + Err(mpsc::error::TryRecvError::Empty) if buf.is_empty() => { + match propagate_rx.recv().await { + Some(ent) => (Some(ent), false), + None => (None, true), + } + } + Err(mpsc::error::TryRecvError::Empty) => (None, false), + Err(mpsc::error::TryRecvError::Disconnected) => (None, true), + }; + + if let Some((pk, sk, counters)) = ent { + let tree_key = self.table.data.tree_key(&pk, &sk); + let dist_entry = counters.into_counter_entry::(self.this_node, pk, sk); + match buf.entry(tree_key) { + hash_map::Entry::Vacant(e) => { + e.insert(dist_entry); + } + hash_map::Entry::Occupied(mut e) => { + e.get_mut().merge(&dist_entry); + } + } + // As long as we can add entries, loop back and add them to batch + // before sending batch to other nodes + continue; + } + + if !buf.is_empty() { + let entries = buf.iter().map(|(_k, v)| v); + if let Err(e) = self.table.insert_many(entries).await { + errors += 1; + if errors >= 2 && *must_exit.borrow() { + error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::NAME, buf.len(), e); + break; + } + warn!("({}) Could not propagate {} counter values: {}, retrying in 5 seconds (retry #{})", T::NAME, buf.len(), e, errors); + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + + buf.clear(); + errors = 0; + } + + if closed || *must_exit.borrow() { + break; + } + } + } +} + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +struct LocalCounterEntry { + values: BTreeMap, +} + +impl LocalCounterEntry { + fn into_counter_entry( + self, + this_node: Uuid, + pk: T::P, + sk: T::S, + ) -> CounterEntry { + CounterEntry { + pk, + sk, + values: self + .values + .into_iter() + .map(|(name, (ts, v))| { + let mut node_values = BTreeMap::new(); + node_values.insert(this_node, (ts, v)); + (name, CounterValue { node_values }) + }) + .collect(), + } + } +} diff --git a/src/model/k2v/causality.rs b/src/model/k2v/causality.rs new file mode 100644 index 00000000..8c76a32b --- /dev/null +++ b/src/model/k2v/causality.rs @@ -0,0 +1,96 @@ +use std::collections::BTreeMap; +use std::convert::TryInto; + +use serde::{Deserialize, Serialize}; + +use garage_util::data::*; + +/// Node IDs used in K2V are u64 integers that are the abbreviation +/// of full Garage node IDs which are 256-bit UUIDs. +pub type K2VNodeId = u64; + +pub fn make_node_id(node_id: Uuid) -> K2VNodeId { + let mut tmp = [0u8; 8]; + tmp.copy_from_slice(&node_id.as_slice()[..8]); + u64::from_be_bytes(tmp) +} + +#[derive(PartialEq, Debug, Serialize, Deserialize)] +pub struct CausalContext { + pub vector_clock: BTreeMap, +} + +impl CausalContext { + /// Empty causality context + pub fn new_empty() -> Self { + Self { + vector_clock: BTreeMap::new(), + } + } + /// Make binary representation and encode in base64 + pub fn serialize(&self) -> String { + let mut ints = Vec::with_capacity(2 * self.vector_clock.len()); + for (node, time) in self.vector_clock.iter() { + ints.push(*node); + ints.push(*time); + } + let checksum = ints.iter().fold(0, |acc, v| acc ^ *v); + + let mut bytes = u64::to_be_bytes(checksum).to_vec(); + for i in ints { + bytes.extend(u64::to_be_bytes(i)); + } + + base64::encode_config(bytes, base64::URL_SAFE_NO_PAD) + } + /// Parse from base64-encoded binary representation + pub fn parse(s: &str) -> Result { + let bytes = base64::decode_config(s, base64::URL_SAFE_NO_PAD) + .map_err(|e| format!("bad causality token base64: {}", e))?; + if bytes.len() % 16 != 8 || bytes.len() < 8 { + return Err("bad causality token length".into()); + } + + let checksum = u64::from_be_bytes(bytes[..8].try_into().unwrap()); + let mut ret = CausalContext { + vector_clock: BTreeMap::new(), + }; + + for i in 0..(bytes.len() / 16) { + let node_id = u64::from_be_bytes(bytes[8 + i * 16..16 + i * 16].try_into().unwrap()); + let time = u64::from_be_bytes(bytes[16 + i * 16..24 + i * 16].try_into().unwrap()); + ret.vector_clock.insert(node_id, time); + } + + let check = ret.vector_clock.iter().fold(0, |acc, (n, t)| acc ^ *n ^ *t); + + if check != checksum { + return Err("bad causality token checksum".into()); + } + + Ok(ret) + } + /// Check if this causal context contains newer items than another one + pub fn is_newer_than(&self, other: &Self) -> bool { + self.vector_clock + .iter() + .any(|(k, v)| v > other.vector_clock.get(k).unwrap_or(&0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_causality_token_serialization() { + let ct = CausalContext { + vector_clock: [(4, 42), (1928131023, 76), (0xefc0c1c47f9de433, 2)] + .iter() + .cloned() + .collect(), + }; + + assert_eq!(CausalContext::parse(&ct.serialize()).unwrap(), ct); + } +} diff --git a/src/model/k2v/counter_table.rs b/src/model/k2v/counter_table.rs new file mode 100644 index 00000000..4856eb2b --- /dev/null +++ b/src/model/k2v/counter_table.rs @@ -0,0 +1,20 @@ +use garage_util::data::*; + +use crate::index_counter::*; + +pub const ENTRIES: &str = "entries"; +pub const CONFLICTS: &str = "conflicts"; +pub const VALUES: &str = "values"; +pub const BYTES: &str = "bytes"; + +#[derive(PartialEq, Clone)] +pub struct K2VCounterTable; + +impl CounterSchema for K2VCounterTable { + const NAME: &'static str = "k2v_index_counter"; + + // Partition key = bucket id + type P = Uuid; + // Sort key = K2V item's partition key + type S = String; +} diff --git a/src/model/k2v/item_table.rs b/src/model/k2v/item_table.rs new file mode 100644 index 00000000..8b7cc08a --- /dev/null +++ b/src/model/k2v/item_table.rs @@ -0,0 +1,291 @@ +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::sync::Arc; + +use garage_util::data::*; + +use garage_table::crdt::*; +use garage_table::*; + +use crate::index_counter::*; +use crate::k2v::causality::*; +use crate::k2v::counter_table::*; +use crate::k2v::poll::*; + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct K2VItem { + pub partition: K2VItemPartition, + pub sort_key: String, + + items: BTreeMap, +} + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize, Hash, Eq)] +pub struct K2VItemPartition { + pub bucket_id: Uuid, + pub partition_key: String, +} + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +struct DvvsEntry { + t_discard: u64, + values: Vec<(u64, DvvsValue)>, +} + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub enum DvvsValue { + Value(#[serde(with = "serde_bytes")] Vec), + Deleted, +} + +impl K2VItem { + /// Creates a new K2VItem when no previous entry existed in the db + pub fn new(bucket_id: Uuid, partition_key: String, sort_key: String) -> Self { + Self { + partition: K2VItemPartition { + bucket_id, + partition_key, + }, + sort_key, + items: BTreeMap::new(), + } + } + /// Updates a K2VItem with a new value or a deletion event + pub fn update( + &mut self, + this_node: Uuid, + context: &Option, + new_value: DvvsValue, + ) { + if let Some(context) = context { + for (node, t_discard) in context.vector_clock.iter() { + if let Some(e) = self.items.get_mut(node) { + e.t_discard = std::cmp::max(e.t_discard, *t_discard); + } else { + self.items.insert( + *node, + DvvsEntry { + t_discard: *t_discard, + values: vec![], + }, + ); + } + } + } + + self.discard(); + + let node_id = make_node_id(this_node); + let e = self.items.entry(node_id).or_insert(DvvsEntry { + t_discard: 0, + values: vec![], + }); + let t_prev = e.max_time(); + e.values.push((t_prev + 1, new_value)); + } + + /// Extract the causality context of a K2V Item + pub fn causal_context(&self) -> CausalContext { + let mut cc = CausalContext::new_empty(); + for (node, ent) in self.items.iter() { + cc.vector_clock.insert(*node, ent.max_time()); + } + cc + } + + /// Extract the list of values + pub fn values(&'_ self) -> Vec<&'_ DvvsValue> { + let mut ret = vec![]; + for (_, ent) in self.items.iter() { + for (_, v) in ent.values.iter() { + if !ret.contains(&v) { + ret.push(v); + } + } + } + ret + } + + fn discard(&mut self) { + for (_, ent) in self.items.iter_mut() { + ent.discard(); + } + } + + // returns counters: (non-deleted entries, conflict entries, non-tombstone values, bytes used) + fn stats(&self) -> (i64, i64, i64, i64) { + let values = self.values(); + + let n_entries = if self.is_tombstone() { 0 } else { 1 }; + let n_conflicts = if values.len() > 1 { 1 } else { 0 }; + let n_values = values + .iter() + .filter(|v| matches!(v, DvvsValue::Value(_))) + .count() as i64; + let n_bytes = values + .iter() + .map(|v| match v { + DvvsValue::Deleted => 0, + DvvsValue::Value(v) => v.len() as i64, + }) + .sum(); + + (n_entries, n_conflicts, n_values, n_bytes) + } +} + +impl DvvsEntry { + fn max_time(&self) -> u64 { + self.values + .iter() + .fold(self.t_discard, |acc, (vts, _)| std::cmp::max(acc, *vts)) + } + + fn discard(&mut self) { + self.values = std::mem::take(&mut self.values) + .into_iter() + .filter(|(t, _)| *t > self.t_discard) + .collect::>(); + } +} + +impl Crdt for K2VItem { + fn merge(&mut self, other: &Self) { + for (node, e2) in other.items.iter() { + if let Some(e) = self.items.get_mut(node) { + e.merge(e2); + } else { + self.items.insert(*node, e2.clone()); + } + } + } +} + +impl Crdt for DvvsEntry { + fn merge(&mut self, other: &Self) { + self.t_discard = std::cmp::max(self.t_discard, other.t_discard); + self.discard(); + + let t_max = self.max_time(); + for (vt, vv) in other.values.iter() { + if *vt > t_max { + self.values.push((*vt, vv.clone())); + } + } + } +} + +impl PartitionKey for K2VItemPartition { + fn hash(&self) -> Hash { + use blake2::{Blake2b, Digest}; + + let mut hasher = Blake2b::new(); + hasher.update(self.bucket_id.as_slice()); + hasher.update(self.partition_key.as_bytes()); + let mut hash = [0u8; 32]; + hash.copy_from_slice(&hasher.finalize()[..32]); + hash.into() + } +} + +impl Entry for K2VItem { + fn partition_key(&self) -> &K2VItemPartition { + &self.partition + } + fn sort_key(&self) -> &String { + &self.sort_key + } + fn is_tombstone(&self) -> bool { + self.values() + .iter() + .all(|v| matches!(v, DvvsValue::Deleted)) + } +} + +pub struct K2VItemTable { + pub(crate) counter_table: Arc>, + pub(crate) subscriptions: Arc, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct ItemFilter { + pub exclude_only_tombstones: bool, + pub conflicts_only: bool, +} + +impl TableSchema for K2VItemTable { + const TABLE_NAME: &'static str = "k2v_item"; + + type P = K2VItemPartition; + type S = String; + type E = K2VItem; + type Filter = ItemFilter; + + fn updated(&self, old: Option<&Self::E>, new: Option<&Self::E>) { + // 1. Count + let (old_entries, old_conflicts, old_values, old_bytes) = match old { + None => (0, 0, 0, 0), + Some(e) => e.stats(), + }; + let (new_entries, new_conflicts, new_values, new_bytes) = match new { + None => (0, 0, 0, 0), + Some(e) => e.stats(), + }; + + let count_pk = old + .map(|e| e.partition.bucket_id) + .unwrap_or_else(|| new.unwrap().partition.bucket_id); + let count_sk = old + .map(|e| &e.partition.partition_key) + .unwrap_or_else(|| &new.unwrap().partition.partition_key); + + if let Err(e) = self.counter_table.count( + &count_pk, + count_sk, + &[ + (ENTRIES, new_entries - old_entries), + (CONFLICTS, new_conflicts - old_conflicts), + (VALUES, new_values - old_values), + (BYTES, new_bytes - old_bytes), + ], + ) { + error!("Could not update K2V counter for bucket {:?} partition {}; counts will now be inconsistent. {}", count_pk, count_sk, e); + } + + // 2. Notify + if let Some(new_ent) = new { + self.subscriptions.notify(new_ent); + } + } + + #[allow(clippy::nonminimal_bool)] + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + let v = entry.values(); + !(filter.conflicts_only && v.len() < 2) + && !(filter.exclude_only_tombstones && entry.is_tombstone()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dvvsentry_merge_simple() { + let e1 = DvvsEntry { + t_discard: 4, + values: vec![ + (5, DvvsValue::Value(vec![15])), + (6, DvvsValue::Value(vec![16])), + ], + }; + let e2 = DvvsEntry { + t_discard: 5, + values: vec![(6, DvvsValue::Value(vec![16])), (7, DvvsValue::Deleted)], + }; + + let mut e3 = e1.clone(); + e3.merge(&e2); + assert_eq!(e2, e3); + } +} diff --git a/src/model/k2v/mod.rs b/src/model/k2v/mod.rs new file mode 100644 index 00000000..664172a6 --- /dev/null +++ b/src/model/k2v/mod.rs @@ -0,0 +1,7 @@ +pub mod causality; + +pub mod counter_table; +pub mod item_table; + +pub mod poll; +pub mod rpc; diff --git a/src/model/k2v/poll.rs b/src/model/k2v/poll.rs new file mode 100644 index 00000000..93105207 --- /dev/null +++ b/src/model/k2v/poll.rs @@ -0,0 +1,50 @@ +use std::collections::HashMap; +use std::sync::Mutex; + +use serde::{Deserialize, Serialize}; +use tokio::sync::broadcast; + +use crate::k2v::item_table::*; + +#[derive(Debug, Hash, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PollKey { + pub partition: K2VItemPartition, + pub sort_key: String, +} + +#[derive(Default)] +pub struct SubscriptionManager { + subscriptions: Mutex>>, +} + +impl SubscriptionManager { + pub fn new() -> Self { + Self::default() + } + + pub fn subscribe(&self, key: &PollKey) -> broadcast::Receiver { + let mut subs = self.subscriptions.lock().unwrap(); + if let Some(s) = subs.get(key) { + s.subscribe() + } else { + let (tx, rx) = broadcast::channel(8); + subs.insert(key.clone(), tx); + rx + } + } + + pub fn notify(&self, item: &K2VItem) { + let key = PollKey { + partition: item.partition.clone(), + sort_key: item.sort_key.clone(), + }; + let mut subs = self.subscriptions.lock().unwrap(); + if let Some(s) = subs.get(&key) { + if s.send(item.clone()).is_err() { + // no more subscribers, remove channel from here + // (we will re-create it later if we need to subscribe again) + subs.remove(&key); + } + } + } +} diff --git a/src/model/k2v/rpc.rs b/src/model/k2v/rpc.rs new file mode 100644 index 00000000..90101d0f --- /dev/null +++ b/src/model/k2v/rpc.rs @@ -0,0 +1,343 @@ +//! Module that implements RPCs specific to K2V. +//! This is necessary for insertions into the K2V store, +//! as they have to be transmitted to one of the nodes responsible +//! for storing the entry to be processed (the API entry +//! node does not process the entry directly, as this would +//! mean the vector clock gets much larger than needed). + +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use serde::{Deserialize, Serialize}; +use tokio::select; + +use garage_util::crdt::*; +use garage_util::data::*; +use garage_util::error::*; + +use garage_rpc::system::System; +use garage_rpc::*; + +use garage_table::replication::{TableReplication, TableShardedReplication}; +use garage_table::table::TABLE_RPC_TIMEOUT; +use garage_table::{PartitionKey, Table}; + +use crate::k2v::causality::*; +use crate::k2v::item_table::*; +use crate::k2v::poll::*; + +/// RPC messages for K2V +#[derive(Debug, Serialize, Deserialize)] +enum K2VRpc { + Ok, + InsertItem(InsertedItem), + InsertManyItems(Vec), + PollItem { + key: PollKey, + causal_context: CausalContext, + timeout_msec: u64, + }, + PollItemResponse(Option), +} + +#[derive(Debug, Serialize, Deserialize)] +struct InsertedItem { + partition: K2VItemPartition, + sort_key: String, + causal_context: Option, + value: DvvsValue, +} + +impl Rpc for K2VRpc { + type Response = Result; +} + +/// The block manager, handling block exchange between nodes, and block storage on local node +pub struct K2VRpcHandler { + system: Arc, + item_table: Arc>, + endpoint: Arc>, + subscriptions: Arc, +} + +impl K2VRpcHandler { + pub fn new( + system: Arc, + item_table: Arc>, + subscriptions: Arc, + ) -> Arc { + let endpoint = system.netapp.endpoint("garage_model/k2v/Rpc".to_string()); + + let rpc_handler = Arc::new(Self { + system, + item_table, + endpoint, + subscriptions, + }); + rpc_handler.endpoint.set_handler(rpc_handler.clone()); + + rpc_handler + } + + // ---- public interface ---- + + pub async fn insert( + &self, + bucket_id: Uuid, + partition_key: String, + sort_key: String, + causal_context: Option, + value: DvvsValue, + ) -> Result<(), Error> { + let partition = K2VItemPartition { + bucket_id, + partition_key, + }; + let mut who = self + .item_table + .data + .replication + .write_nodes(&partition.hash()); + who.sort(); + + self.system + .rpc + .try_call_many( + &self.endpoint, + &who[..], + K2VRpc::InsertItem(InsertedItem { + partition, + sort_key, + causal_context, + value, + }), + RequestStrategy::with_priority(PRIO_NORMAL) + .with_quorum(1) + .with_timeout(TABLE_RPC_TIMEOUT) + .interrupt_after_quorum(true), + ) + .await?; + + Ok(()) + } + + pub async fn insert_batch( + &self, + bucket_id: Uuid, + items: Vec<(String, String, Option, DvvsValue)>, + ) -> Result<(), Error> { + let n_items = items.len(); + + let mut call_list: HashMap<_, Vec<_>> = HashMap::new(); + + for (partition_key, sort_key, causal_context, value) in items { + let partition = K2VItemPartition { + bucket_id, + partition_key, + }; + let mut who = self + .item_table + .data + .replication + .write_nodes(&partition.hash()); + who.sort(); + + call_list.entry(who).or_default().push(InsertedItem { + partition, + sort_key, + causal_context, + value, + }); + } + + debug!( + "K2V insert_batch: {} requests to insert {} items", + call_list.len(), + n_items + ); + let call_futures = call_list.into_iter().map(|(nodes, items)| async move { + let resp = self + .system + .rpc + .try_call_many( + &self.endpoint, + &nodes[..], + K2VRpc::InsertManyItems(items), + RequestStrategy::with_priority(PRIO_NORMAL) + .with_quorum(1) + .with_timeout(TABLE_RPC_TIMEOUT) + .interrupt_after_quorum(true), + ) + .await?; + Ok::<_, Error>((nodes, resp)) + }); + + let mut resps = call_futures.collect::>(); + while let Some(resp) = resps.next().await { + resp?; + } + + Ok(()) + } + + pub async fn poll( + &self, + bucket_id: Uuid, + partition_key: String, + sort_key: String, + causal_context: CausalContext, + timeout_msec: u64, + ) -> Result, Error> { + let poll_key = PollKey { + partition: K2VItemPartition { + bucket_id, + partition_key, + }, + sort_key, + }; + let nodes = self + .item_table + .data + .replication + .write_nodes(&poll_key.partition.hash()); + + let resps = self + .system + .rpc + .try_call_many( + &self.endpoint, + &nodes[..], + K2VRpc::PollItem { + key: poll_key, + causal_context, + timeout_msec, + }, + RequestStrategy::with_priority(PRIO_NORMAL) + .with_quorum(self.item_table.data.replication.read_quorum()) + .with_timeout(Duration::from_millis(timeout_msec) + TABLE_RPC_TIMEOUT), + ) + .await?; + + let mut resp: Option = None; + for v in resps { + match v { + K2VRpc::PollItemResponse(Some(x)) => { + if let Some(y) = &mut resp { + y.merge(&x); + } else { + resp = Some(x); + } + } + K2VRpc::PollItemResponse(None) => { + return Ok(None); + } + v => return Err(Error::unexpected_rpc_message(v)), + } + } + + Ok(resp) + } + + // ---- internal handlers ---- + + async fn handle_insert(&self, item: &InsertedItem) -> Result { + let new = self.local_insert(item)?; + + // Propagate to rest of network + if let Some(updated) = new { + self.item_table.insert(&updated).await?; + } + + Ok(K2VRpc::Ok) + } + + async fn handle_insert_many(&self, items: &[InsertedItem]) -> Result { + let mut updated_vec = vec![]; + + for item in items { + let new = self.local_insert(item)?; + + if let Some(updated) = new { + updated_vec.push(updated); + } + } + + // Propagate to rest of network + if !updated_vec.is_empty() { + self.item_table.insert_many(&updated_vec).await?; + } + + Ok(K2VRpc::Ok) + } + + fn local_insert(&self, item: &InsertedItem) -> Result, Error> { + let tree_key = self + .item_table + .data + .tree_key(&item.partition, &item.sort_key); + + self.item_table + .data + .update_entry_with(&tree_key[..], |ent| { + let mut ent = ent.unwrap_or_else(|| { + K2VItem::new( + item.partition.bucket_id, + item.partition.partition_key.clone(), + item.sort_key.clone(), + ) + }); + ent.update(self.system.id, &item.causal_context, item.value.clone()); + ent + }) + } + + async fn handle_poll(&self, key: &PollKey, ct: &CausalContext) -> Result { + let mut chan = self.subscriptions.subscribe(key); + + let mut value = self + .item_table + .data + .read_entry(&key.partition, &key.sort_key)? + .map(|bytes| self.item_table.data.decode_entry(&bytes[..])) + .transpose()? + .unwrap_or_else(|| { + K2VItem::new( + key.partition.bucket_id, + key.partition.partition_key.clone(), + key.sort_key.clone(), + ) + }); + + while !value.causal_context().is_newer_than(ct) { + value = chan.recv().await?; + } + + Ok(value) + } +} + +#[async_trait] +impl EndpointHandler for K2VRpcHandler { + async fn handle(self: &Arc, message: &K2VRpc, _from: NodeID) -> Result { + match message { + K2VRpc::InsertItem(item) => self.handle_insert(item).await, + K2VRpc::InsertManyItems(items) => self.handle_insert_many(&items[..]).await, + K2VRpc::PollItem { + key, + causal_context, + timeout_msec, + } => { + let delay = tokio::time::sleep(Duration::from_millis(*timeout_msec)); + select! { + ret = self.handle_poll(key, causal_context) => ret.map(Some).map(K2VRpc::PollItemResponse), + _ = delay => Ok(K2VRpc::PollItemResponse(None)), + } + } + m => Err(Error::unexpected_rpc_message(m)), + } + } +} diff --git a/src/model/lib.rs b/src/model/lib.rs index 05a4cdc7..7c9d9270 100644 --- a/src/model/lib.rs +++ b/src/model/lib.rs @@ -3,12 +3,15 @@ extern crate tracing; pub mod permission; -pub mod block_ref_table; +pub mod index_counter; + pub mod bucket_alias_table; pub mod bucket_table; pub mod key_table; -pub mod object_table; -pub mod version_table; + +#[cfg(feature = "k2v")] +pub mod k2v; +pub mod s3; pub mod garage; pub mod helper; diff --git a/src/model/object_table.rs b/src/model/object_table.rs deleted file mode 100644 index da53878e..00000000 --- a/src/model/object_table.rs +++ /dev/null @@ -1,334 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use std::sync::Arc; - -use garage_util::background::BackgroundRunner; -use garage_util::data::*; - -use garage_table::crdt::*; -use garage_table::replication::TableShardedReplication; -use garage_table::*; - -use crate::version_table::*; - -use garage_model_050::object_table as old; - -/// An object -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct Object { - /// The bucket in which the object is stored, used as partition key - pub bucket_id: Uuid, - - /// The key at which the object is stored in its bucket, used as sorting key - pub key: String, - - /// The list of currenty stored versions of the object - versions: Vec, -} - -impl Object { - /// Initialize an Object struct from parts - pub fn new(bucket_id: Uuid, key: String, versions: Vec) -> Self { - let mut ret = Self { - bucket_id, - key, - versions: vec![], - }; - for v in versions { - ret.add_version(v) - .expect("Twice the same ObjectVersion in Object constructor"); - } - ret - } - - /// Adds a version if it wasn't already present - #[allow(clippy::result_unit_err)] - pub fn add_version(&mut self, new: ObjectVersion) -> Result<(), ()> { - match self - .versions - .binary_search_by(|v| v.cmp_key().cmp(&new.cmp_key())) - { - Err(i) => { - self.versions.insert(i, new); - Ok(()) - } - Ok(_) => Err(()), - } - } - - /// Get a list of currently stored versions of `Object` - pub fn versions(&self) -> &[ObjectVersion] { - &self.versions[..] - } -} - -/// Informations about a version of an object -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersion { - /// Id of the version - pub uuid: Uuid, - /// Timestamp of when the object was created - pub timestamp: u64, - /// State of the version - pub state: ObjectVersionState, -} - -/// State of an object version -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionState { - /// The version is being received - Uploading(ObjectVersionHeaders), - /// The version is fully received - Complete(ObjectVersionData), - /// The version uploaded containded errors or the upload was explicitly aborted - Aborted, -} - -impl Crdt for ObjectVersionState { - fn merge(&mut self, other: &Self) { - use ObjectVersionState::*; - match other { - Aborted => { - *self = Aborted; - } - Complete(b) => match self { - Aborted => {} - Complete(a) => { - a.merge(b); - } - Uploading(_) => { - *self = Complete(b.clone()); - } - }, - Uploading(_) => {} - } - } -} - -/// Data stored in object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionData { - /// The object was deleted, this Version is a tombstone to mark it as such - DeleteMarker, - /// The object is short, it's stored inlined - Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), - /// The object is not short, Hash of first block is stored here, next segments hashes are - /// stored in the version table - FirstBlock(ObjectVersionMeta, Hash), -} - -impl AutoCrdt for ObjectVersionData { - const WARN_IF_DIFFERENT: bool = true; -} - -/// Metadata about the object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionMeta { - /// Headers to send to the client - pub headers: ObjectVersionHeaders, - /// Size of the object - pub size: u64, - /// etag of the object - pub etag: String, -} - -/// Additional headers for an object -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionHeaders { - /// Content type of the object - pub content_type: String, - /// Any other http headers to send - pub other: BTreeMap, -} - -impl ObjectVersion { - fn cmp_key(&self) -> (u64, Uuid) { - (self.timestamp, self.uuid) - } - - /// Is the object version currently being uploaded - pub fn is_uploading(&self) -> bool { - matches!(self.state, ObjectVersionState::Uploading(_)) - } - - /// Is the object version completely received - pub fn is_complete(&self) -> bool { - matches!(self.state, ObjectVersionState::Complete(_)) - } - - /// Is the object version available (received and not a tombstone) - pub fn is_data(&self) -> bool { - match self.state { - ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false, - ObjectVersionState::Complete(_) => true, - _ => false, - } - } -} - -impl Entry for Object { - fn partition_key(&self) -> &Uuid { - &self.bucket_id - } - fn sort_key(&self) -> &String { - &self.key - } - fn is_tombstone(&self) -> bool { - self.versions.len() == 1 - && self.versions[0].state - == ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) - } -} - -impl Crdt for Object { - fn merge(&mut self, other: &Self) { - // Merge versions from other into here - for other_v in other.versions.iter() { - match self - .versions - .binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key())) - { - Ok(i) => { - self.versions[i].state.merge(&other_v.state); - } - Err(i) => { - self.versions.insert(i, other_v.clone()); - } - } - } - - // Remove versions which are obsolete, i.e. those that come - // before the last version which .is_complete(). - let last_complete = self - .versions - .iter() - .enumerate() - .rev() - .find(|(_, v)| v.is_complete()) - .map(|(vi, _)| vi); - - if let Some(last_vi) = last_complete { - self.versions = self.versions.drain(last_vi..).collect::>(); - } - } -} - -pub struct ObjectTable { - pub background: Arc, - pub version_table: Arc>, -} - -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub enum ObjectFilter { - IsData, - IsUploading, -} - -impl TableSchema for ObjectTable { - const TABLE_NAME: &'static str = "object"; - - type P = Uuid; - type S = String; - type E = Object; - type Filter = ObjectFilter; - - fn updated(&self, old: Option, new: Option) { - let version_table = self.version_table.clone(); - self.background.spawn(async move { - if let (Some(old_v), Some(new_v)) = (old, new) { - // Propagate deletion of old versions - for v in old_v.versions.iter() { - let newly_deleted = match new_v - .versions - .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) - { - Err(_) => true, - Ok(i) => { - new_v.versions[i].state == ObjectVersionState::Aborted - && v.state != ObjectVersionState::Aborted - } - }; - if newly_deleted { - let deleted_version = - Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); - version_table.insert(&deleted_version).await?; - } - } - } - Ok(()) - }) - } - - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { - match filter { - ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()), - ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()), - } - } - - fn try_migrate(bytes: &[u8]) -> Option { - let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?; - Some(migrate_object(old_obj)) - } -} - -// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv -// (we just want to change bucket into bucket_id by hashing it) - -fn migrate_object(o: old::Object) -> Object { - let versions = o - .versions() - .iter() - .cloned() - .map(migrate_object_version) - .collect(); - Object { - bucket_id: blake2sum(o.bucket.as_bytes()), - key: o.key, - versions, - } -} - -fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion { - ObjectVersion { - uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(), - timestamp: v.timestamp, - state: match v.state { - old::ObjectVersionState::Uploading(h) => { - ObjectVersionState::Uploading(migrate_object_version_headers(h)) - } - old::ObjectVersionState::Complete(d) => { - ObjectVersionState::Complete(migrate_object_version_data(d)) - } - old::ObjectVersionState::Aborted => ObjectVersionState::Aborted, - }, - } -} - -fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders { - ObjectVersionHeaders { - content_type: h.content_type, - other: h.other, - } -} - -fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData { - match d { - old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker, - old::ObjectVersionData::Inline(m, b) => { - ObjectVersionData::Inline(migrate_object_version_meta(m), b) - } - old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock( - migrate_object_version_meta(m), - Hash::try_from(h.as_slice()).unwrap(), - ), - } -} - -fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta { - ObjectVersionMeta { - headers: migrate_object_version_headers(m.headers), - size: m.size, - etag: m.etag, - } -} diff --git a/src/model/s3/block_ref_table.rs b/src/model/s3/block_ref_table.rs new file mode 100644 index 00000000..9b3991bf --- /dev/null +++ b/src/model/s3/block_ref_table.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +use garage_util::data::*; + +use garage_table::crdt::Crdt; +use garage_table::*; + +use garage_block::manager::*; + +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct BlockRef { + /// Hash (blake2 sum) of the block, used as partition key + pub block: Hash, + + /// Id of the Version for the object containing this block, used as sorting key + pub version: Uuid, + + // Keep track of deleted status + /// Is the Version that contains this block deleted + pub deleted: crdt::Bool, +} + +impl Entry for BlockRef { + fn partition_key(&self) -> &Hash { + &self.block + } + fn sort_key(&self) -> &Uuid { + &self.version + } + fn is_tombstone(&self) -> bool { + self.deleted.get() + } +} + +impl Crdt for BlockRef { + fn merge(&mut self, other: &Self) { + self.deleted.merge(&other.deleted); + } +} + +pub struct BlockRefTable { + pub block_manager: Arc, +} + +impl TableSchema for BlockRefTable { + const TABLE_NAME: &'static str = "block_ref"; + + type P = Hash; + type S = Uuid; + type E = BlockRef; + type Filter = DeletedFilter; + + fn updated(&self, old: Option<&Self::E>, new: Option<&Self::E>) { + #[allow(clippy::or_fun_call)] + let block = &old.or(new).unwrap().block; + let was_before = old.map(|x| !x.deleted.get()).unwrap_or(false); + let is_after = new.map(|x| !x.deleted.get()).unwrap_or(false); + if is_after && !was_before { + if let Err(e) = self.block_manager.block_incref(block) { + warn!("block_incref failed for block {:?}: {}", block, e); + } + } + if was_before && !is_after { + if let Err(e) = self.block_manager.block_decref(block) { + warn!("block_decref failed for block {:?}: {}", block, e); + } + } + } + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted.get()) + } +} diff --git a/src/model/s3/mod.rs b/src/model/s3/mod.rs new file mode 100644 index 00000000..4e94337d --- /dev/null +++ b/src/model/s3/mod.rs @@ -0,0 +1,3 @@ +pub mod block_ref_table; +pub mod object_table; +pub mod version_table; diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs new file mode 100644 index 00000000..3d9a89f7 --- /dev/null +++ b/src/model/s3/object_table.rs @@ -0,0 +1,337 @@ +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::sync::Arc; + +use garage_util::background::BackgroundRunner; +use garage_util::data::*; + +use garage_table::crdt::*; +use garage_table::replication::TableShardedReplication; +use garage_table::*; + +use crate::s3::version_table::*; + +use garage_model_050::object_table as old; + +/// An object +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct Object { + /// The bucket in which the object is stored, used as partition key + pub bucket_id: Uuid, + + /// The key at which the object is stored in its bucket, used as sorting key + pub key: String, + + /// The list of currenty stored versions of the object + versions: Vec, +} + +impl Object { + /// Initialize an Object struct from parts + pub fn new(bucket_id: Uuid, key: String, versions: Vec) -> Self { + let mut ret = Self { + bucket_id, + key, + versions: vec![], + }; + for v in versions { + ret.add_version(v) + .expect("Twice the same ObjectVersion in Object constructor"); + } + ret + } + + /// Adds a version if it wasn't already present + #[allow(clippy::result_unit_err)] + pub fn add_version(&mut self, new: ObjectVersion) -> Result<(), ()> { + match self + .versions + .binary_search_by(|v| v.cmp_key().cmp(&new.cmp_key())) + { + Err(i) => { + self.versions.insert(i, new); + Ok(()) + } + Ok(_) => Err(()), + } + } + + /// Get a list of currently stored versions of `Object` + pub fn versions(&self) -> &[ObjectVersion] { + &self.versions[..] + } +} + +/// Informations about a version of an object +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct ObjectVersion { + /// Id of the version + pub uuid: Uuid, + /// Timestamp of when the object was created + pub timestamp: u64, + /// State of the version + pub state: ObjectVersionState, +} + +/// State of an object version +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub enum ObjectVersionState { + /// The version is being received + Uploading(ObjectVersionHeaders), + /// The version is fully received + Complete(ObjectVersionData), + /// The version uploaded containded errors or the upload was explicitly aborted + Aborted, +} + +impl Crdt for ObjectVersionState { + fn merge(&mut self, other: &Self) { + use ObjectVersionState::*; + match other { + Aborted => { + *self = Aborted; + } + Complete(b) => match self { + Aborted => {} + Complete(a) => { + a.merge(b); + } + Uploading(_) => { + *self = Complete(b.clone()); + } + }, + Uploading(_) => {} + } + } +} + +/// Data stored in object version +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] +pub enum ObjectVersionData { + /// The object was deleted, this Version is a tombstone to mark it as such + DeleteMarker, + /// The object is short, it's stored inlined + Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), + /// The object is not short, Hash of first block is stored here, next segments hashes are + /// stored in the version table + FirstBlock(ObjectVersionMeta, Hash), +} + +impl AutoCrdt for ObjectVersionData { + const WARN_IF_DIFFERENT: bool = true; +} + +/// Metadata about the object version +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] +pub struct ObjectVersionMeta { + /// Headers to send to the client + pub headers: ObjectVersionHeaders, + /// Size of the object + pub size: u64, + /// etag of the object + pub etag: String, +} + +/// Additional headers for an object +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] +pub struct ObjectVersionHeaders { + /// Content type of the object + pub content_type: String, + /// Any other http headers to send + pub other: BTreeMap, +} + +impl ObjectVersion { + fn cmp_key(&self) -> (u64, Uuid) { + (self.timestamp, self.uuid) + } + + /// Is the object version currently being uploaded + pub fn is_uploading(&self) -> bool { + matches!(self.state, ObjectVersionState::Uploading(_)) + } + + /// Is the object version completely received + pub fn is_complete(&self) -> bool { + matches!(self.state, ObjectVersionState::Complete(_)) + } + + /// Is the object version available (received and not a tombstone) + pub fn is_data(&self) -> bool { + match self.state { + ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) => false, + ObjectVersionState::Complete(_) => true, + _ => false, + } + } +} + +impl Entry for Object { + fn partition_key(&self) -> &Uuid { + &self.bucket_id + } + fn sort_key(&self) -> &String { + &self.key + } + fn is_tombstone(&self) -> bool { + self.versions.len() == 1 + && self.versions[0].state + == ObjectVersionState::Complete(ObjectVersionData::DeleteMarker) + } +} + +impl Crdt for Object { + fn merge(&mut self, other: &Self) { + // Merge versions from other into here + for other_v in other.versions.iter() { + match self + .versions + .binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key())) + { + Ok(i) => { + self.versions[i].state.merge(&other_v.state); + } + Err(i) => { + self.versions.insert(i, other_v.clone()); + } + } + } + + // Remove versions which are obsolete, i.e. those that come + // before the last version which .is_complete(). + let last_complete = self + .versions + .iter() + .enumerate() + .rev() + .find(|(_, v)| v.is_complete()) + .map(|(vi, _)| vi); + + if let Some(last_vi) = last_complete { + self.versions = self.versions.drain(last_vi..).collect::>(); + } + } +} + +pub struct ObjectTable { + pub background: Arc, + pub version_table: Arc>, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub enum ObjectFilter { + IsData, + IsUploading, +} + +impl TableSchema for ObjectTable { + const TABLE_NAME: &'static str = "object"; + + type P = Uuid; + type S = String; + type E = Object; + type Filter = ObjectFilter; + + fn updated(&self, old: Option<&Self::E>, new: Option<&Self::E>) { + let version_table = self.version_table.clone(); + let old = old.cloned(); + let new = new.cloned(); + + self.background.spawn(async move { + if let (Some(old_v), Some(new_v)) = (old, new) { + // Propagate deletion of old versions + for v in old_v.versions.iter() { + let newly_deleted = match new_v + .versions + .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) + { + Err(_) => true, + Ok(i) => { + new_v.versions[i].state == ObjectVersionState::Aborted + && v.state != ObjectVersionState::Aborted + } + }; + if newly_deleted { + let deleted_version = + Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); + version_table.insert(&deleted_version).await?; + } + } + } + Ok(()) + }) + } + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + match filter { + ObjectFilter::IsData => entry.versions.iter().any(|v| v.is_data()), + ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()), + } + } + + fn try_migrate(bytes: &[u8]) -> Option { + let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?; + Some(migrate_object(old_obj)) + } +} + +// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv +// (we just want to change bucket into bucket_id by hashing it) + +fn migrate_object(o: old::Object) -> Object { + let versions = o + .versions() + .iter() + .cloned() + .map(migrate_object_version) + .collect(); + Object { + bucket_id: blake2sum(o.bucket.as_bytes()), + key: o.key, + versions, + } +} + +fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion { + ObjectVersion { + uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(), + timestamp: v.timestamp, + state: match v.state { + old::ObjectVersionState::Uploading(h) => { + ObjectVersionState::Uploading(migrate_object_version_headers(h)) + } + old::ObjectVersionState::Complete(d) => { + ObjectVersionState::Complete(migrate_object_version_data(d)) + } + old::ObjectVersionState::Aborted => ObjectVersionState::Aborted, + }, + } +} + +fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders { + ObjectVersionHeaders { + content_type: h.content_type, + other: h.other, + } +} + +fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData { + match d { + old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker, + old::ObjectVersionData::Inline(m, b) => { + ObjectVersionData::Inline(migrate_object_version_meta(m), b) + } + old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock( + migrate_object_version_meta(m), + Hash::try_from(h.as_slice()).unwrap(), + ), + } +} + +fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta { + ObjectVersionMeta { + headers: migrate_object_version_headers(m.headers), + size: m.size, + etag: m.etag, + } +} diff --git a/src/model/s3/version_table.rs b/src/model/s3/version_table.rs new file mode 100644 index 00000000..ad096772 --- /dev/null +++ b/src/model/s3/version_table.rs @@ -0,0 +1,207 @@ +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +use garage_util::background::BackgroundRunner; +use garage_util::data::*; + +use garage_table::crdt::*; +use garage_table::replication::TableShardedReplication; +use garage_table::*; + +use crate::s3::block_ref_table::*; + +use garage_model_050::version_table as old; + +/// A version of an object +#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +pub struct Version { + /// UUID of the version, used as partition key + pub uuid: Uuid, + + // Actual data: the blocks for this version + // In the case of a multipart upload, also store the etags + // of individual parts and check them when doing CompleteMultipartUpload + /// Is this version deleted + pub deleted: crdt::Bool, + /// list of blocks of data composing the version + pub blocks: crdt::Map, + /// Etag of each part in case of a multipart upload, empty otherwise + pub parts_etags: crdt::Map, + + // Back link to bucket+key so that we can figure if + // this was deleted later on + /// Bucket in which the related object is stored + pub bucket_id: Uuid, + /// Key in which the related object is stored + pub key: String, +} + +impl Version { + pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { + Self { + uuid, + deleted: deleted.into(), + blocks: crdt::Map::new(), + parts_etags: crdt::Map::new(), + bucket_id, + key, + } + } + + pub fn has_part_number(&self, part_number: u64) -> bool { + let case1 = self + .parts_etags + .items() + .binary_search_by(|(k, _)| k.cmp(&part_number)) + .is_ok(); + let case2 = self + .blocks + .items() + .binary_search_by(|(k, _)| k.part_number.cmp(&part_number)) + .is_ok(); + case1 || case2 + } +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +pub struct VersionBlockKey { + /// Number of the part + pub part_number: u64, + /// Offset of this sub-segment in its part + pub offset: u64, +} + +impl Ord for VersionBlockKey { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.part_number + .cmp(&other.part_number) + .then(self.offset.cmp(&other.offset)) + } +} + +impl PartialOrd for VersionBlockKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Informations about a single block +#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] +pub struct VersionBlock { + /// Blake2 sum of the block + pub hash: Hash, + /// Size of the block + pub size: u64, +} + +impl AutoCrdt for VersionBlock { + const WARN_IF_DIFFERENT: bool = true; +} + +impl Entry for Version { + fn partition_key(&self) -> &Uuid { + &self.uuid + } + fn sort_key(&self) -> &EmptyKey { + &EmptyKey + } + fn is_tombstone(&self) -> bool { + self.deleted.get() + } +} + +impl Crdt for Version { + fn merge(&mut self, other: &Self) { + self.deleted.merge(&other.deleted); + + if self.deleted.get() { + self.blocks.clear(); + self.parts_etags.clear(); + } else { + self.blocks.merge(&other.blocks); + self.parts_etags.merge(&other.parts_etags); + } + } +} + +pub struct VersionTable { + pub background: Arc, + pub block_ref_table: Arc>, +} + +impl TableSchema for VersionTable { + const TABLE_NAME: &'static str = "version"; + + type P = Uuid; + type S = EmptyKey; + type E = Version; + type Filter = DeletedFilter; + + fn updated(&self, old: Option<&Self::E>, new: Option<&Self::E>) { + let block_ref_table = self.block_ref_table.clone(); + let old = old.cloned(); + let new = new.cloned(); + + self.background.spawn(async move { + if let (Some(old_v), Some(new_v)) = (old, new) { + // Propagate deletion of version blocks + if new_v.deleted.get() && !old_v.deleted.get() { + let deleted_block_refs = old_v + .blocks + .items() + .iter() + .map(|(_k, vb)| BlockRef { + block: vb.hash, + version: old_v.uuid, + deleted: true.into(), + }) + .collect::>(); + block_ref_table.insert_many(&deleted_block_refs[..]).await?; + } + } + Ok(()) + }) + } + + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted.get()) + } + + fn try_migrate(bytes: &[u8]) -> Option { + let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?; + + let blocks = old + .blocks + .items() + .iter() + .map(|(k, v)| { + ( + VersionBlockKey { + part_number: k.part_number, + offset: k.offset, + }, + VersionBlock { + hash: Hash::try_from(v.hash.as_slice()).unwrap(), + size: v.size, + }, + ) + }) + .collect::>(); + + let parts_etags = old + .parts_etags + .items() + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(); + + Some(Version { + uuid: Hash::try_from(old.uuid.as_slice()).unwrap(), + deleted: crdt::Bool::new(old.deleted.get()), + blocks, + parts_etags, + bucket_id: blake2sum(old.bucket.as_bytes()), + key: old.key, + }) + } +} diff --git a/src/model/version_table.rs b/src/model/version_table.rs deleted file mode 100644 index 839b1f4f..00000000 --- a/src/model/version_table.rs +++ /dev/null @@ -1,204 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::sync::Arc; - -use garage_util::background::BackgroundRunner; -use garage_util::data::*; - -use garage_table::crdt::*; -use garage_table::replication::TableShardedReplication; -use garage_table::*; - -use crate::block_ref_table::*; - -use garage_model_050::version_table as old; - -/// A version of an object -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct Version { - /// UUID of the version, used as partition key - pub uuid: Uuid, - - // Actual data: the blocks for this version - // In the case of a multipart upload, also store the etags - // of individual parts and check them when doing CompleteMultipartUpload - /// Is this version deleted - pub deleted: crdt::Bool, - /// list of blocks of data composing the version - pub blocks: crdt::Map, - /// Etag of each part in case of a multipart upload, empty otherwise - pub parts_etags: crdt::Map, - - // Back link to bucket+key so that we can figure if - // this was deleted later on - /// Bucket in which the related object is stored - pub bucket_id: Uuid, - /// Key in which the related object is stored - pub key: String, -} - -impl Version { - pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { - Self { - uuid, - deleted: deleted.into(), - blocks: crdt::Map::new(), - parts_etags: crdt::Map::new(), - bucket_id, - key, - } - } - - pub fn has_part_number(&self, part_number: u64) -> bool { - let case1 = self - .parts_etags - .items() - .binary_search_by(|(k, _)| k.cmp(&part_number)) - .is_ok(); - let case2 = self - .blocks - .items() - .binary_search_by(|(k, _)| k.part_number.cmp(&part_number)) - .is_ok(); - case1 || case2 - } -} - -#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlockKey { - /// Number of the part - pub part_number: u64, - /// Offset of this sub-segment in its part - pub offset: u64, -} - -impl Ord for VersionBlockKey { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.part_number - .cmp(&other.part_number) - .then(self.offset.cmp(&other.offset)) - } -} - -impl PartialOrd for VersionBlockKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Informations about a single block -#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlock { - /// Blake2 sum of the block - pub hash: Hash, - /// Size of the block - pub size: u64, -} - -impl AutoCrdt for VersionBlock { - const WARN_IF_DIFFERENT: bool = true; -} - -impl Entry for Version { - fn partition_key(&self) -> &Uuid { - &self.uuid - } - fn sort_key(&self) -> &EmptyKey { - &EmptyKey - } - fn is_tombstone(&self) -> bool { - self.deleted.get() - } -} - -impl Crdt for Version { - fn merge(&mut self, other: &Self) { - self.deleted.merge(&other.deleted); - - if self.deleted.get() { - self.blocks.clear(); - self.parts_etags.clear(); - } else { - self.blocks.merge(&other.blocks); - self.parts_etags.merge(&other.parts_etags); - } - } -} - -pub struct VersionTable { - pub background: Arc, - pub block_ref_table: Arc>, -} - -impl TableSchema for VersionTable { - const TABLE_NAME: &'static str = "version"; - - type P = Uuid; - type S = EmptyKey; - type E = Version; - type Filter = DeletedFilter; - - fn updated(&self, old: Option, new: Option) { - let block_ref_table = self.block_ref_table.clone(); - self.background.spawn(async move { - if let (Some(old_v), Some(new_v)) = (old, new) { - // Propagate deletion of version blocks - if new_v.deleted.get() && !old_v.deleted.get() { - let deleted_block_refs = old_v - .blocks - .items() - .iter() - .map(|(_k, vb)| BlockRef { - block: vb.hash, - version: old_v.uuid, - deleted: true.into(), - }) - .collect::>(); - block_ref_table.insert_many(&deleted_block_refs[..]).await?; - } - } - Ok(()) - }) - } - - fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { - filter.apply(entry.deleted.get()) - } - - fn try_migrate(bytes: &[u8]) -> Option { - let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?; - - let blocks = old - .blocks - .items() - .iter() - .map(|(k, v)| { - ( - VersionBlockKey { - part_number: k.part_number, - offset: k.offset, - }, - VersionBlock { - hash: Hash::try_from(v.hash.as_slice()).unwrap(), - size: v.size, - }, - ) - }) - .collect::>(); - - let parts_etags = old - .parts_etags - .items() - .iter() - .map(|(k, v)| (*k, v.clone())) - .collect::>(); - - Some(Version { - uuid: Hash::try_from(old.uuid.as_slice()).unwrap(), - deleted: crdt::Bool::new(old.deleted.get()), - blocks, - parts_etags, - bucket_id: blake2sum(old.bucket.as_bytes()), - key: old.key, - }) - } -} diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index 46d0dc1e..bed7f44a 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -52,5 +52,6 @@ netapp = { version = "0.4.4", features = ["telemetry"] } hyper = { version = "0.14", features = ["client", "http1", "runtime", "tcp"] } + [features] kubernetes-discovery = [ "kube", "k8s-openapi", "openssl", "schemars" ] diff --git a/src/table/data.rs b/src/table/data.rs index ff7965f5..5cb10066 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -1,8 +1,9 @@ use core::borrow::Borrow; +use std::convert::TryInto; use std::sync::Arc; use serde_bytes::ByteBuf; -use sled::Transactional; +use sled::{IVec, Transactional}; use tokio::sync::Notify; use garage_util::data::*; @@ -16,12 +17,13 @@ use crate::gc::GcTodoEntry; use crate::metrics::*; use crate::replication::*; use crate::schema::*; +use crate::util::*; pub struct TableData { system: Arc, - pub(crate) instance: F, - pub(crate) replication: R, + pub instance: F, + pub replication: R, pub store: sled::Tree, @@ -83,18 +85,48 @@ where pub fn read_range( &self, - p: &F::P, - s: &Option, + partition_key: &F::P, + start: &Option, + filter: &Option, + limit: usize, + enumeration_order: EnumerationOrder, + ) -> Result>, Error> { + let partition_hash = partition_key.hash(); + match enumeration_order { + EnumerationOrder::Forward => { + let first_key = match start { + None => partition_hash.to_vec(), + Some(sk) => self.tree_key(partition_key, sk), + }; + let range = self.store.range(first_key..); + self.read_range_aux(partition_hash, range, filter, limit) + } + EnumerationOrder::Reverse => match start { + Some(sk) => { + let last_key = self.tree_key(partition_key, sk); + let range = self.store.range(..=last_key).rev(); + self.read_range_aux(partition_hash, range, filter, limit) + } + None => { + let mut last_key = partition_hash.to_vec(); + let lower = u128::from_be_bytes(last_key[16..32].try_into().unwrap()); + last_key[16..32].copy_from_slice(&u128::to_be_bytes(lower + 1)); + let range = self.store.range(..last_key).rev(); + self.read_range_aux(partition_hash, range, filter, limit) + } + }, + } + } + + fn read_range_aux( + &self, + partition_hash: Hash, + range: impl Iterator>, filter: &Option, limit: usize, ) -> Result>, Error> { - let partition_hash = p.hash(); - let first_key = match s { - None => partition_hash.to_vec(), - Some(sk) => self.tree_key(p, sk), - }; let mut ret = vec![]; - for item in self.store.range(first_key..) { + for item in range { let (key, value) = item?; if &key[..32] != partition_hash.as_slice() { break; @@ -136,17 +168,31 @@ where let update = self.decode_entry(update_bytes)?; let tree_key = self.tree_key(update.partition_key(), update.sort_key()); + self.update_entry_with(&tree_key[..], |ent| match ent { + Some(mut ent) => { + ent.merge(&update); + ent + } + None => update.clone(), + })?; + Ok(()) + } + + pub fn update_entry_with( + &self, + tree_key: &[u8], + f: impl Fn(Option) -> F::E, + ) -> Result, Error> { let changed = (&self.store, &self.merkle_todo).transaction(|(store, mkl_todo)| { - let (old_entry, old_bytes, new_entry) = match store.get(&tree_key)? { + let (old_entry, old_bytes, new_entry) = match store.get(tree_key)? { Some(old_bytes) => { let old_entry = self .decode_entry(&old_bytes) .map_err(sled::transaction::ConflictableTransactionError::Abort)?; - let mut new_entry = old_entry.clone(); - new_entry.merge(&update); + let new_entry = f(Some(old_entry.clone())); (Some(old_entry), Some(old_bytes), new_entry) } - None => (None, None, update.clone()), + None => (None, None, f(None)), }; // Scenario 1: the value changed, so of course there is a change @@ -163,8 +209,8 @@ where if value_changed || encoding_changed { let new_bytes_hash = blake2sum(&new_bytes[..]); - mkl_todo.insert(tree_key.clone(), new_bytes_hash.as_slice())?; - store.insert(tree_key.clone(), new_bytes)?; + mkl_todo.insert(tree_key.to_vec(), new_bytes_hash.as_slice())?; + store.insert(tree_key.to_vec(), new_bytes)?; Ok(Some((old_entry, new_entry, new_bytes_hash))) } else { Ok(None) @@ -175,7 +221,7 @@ where self.metrics.internal_update_counter.add(1); let is_tombstone = new_entry.is_tombstone(); - self.instance.updated(old_entry, Some(new_entry)); + self.instance.updated(old_entry.as_ref(), Some(&new_entry)); self.merkle_todo_notify.notify_one(); if is_tombstone { // We are only responsible for GC'ing this item if we are the @@ -187,12 +233,14 @@ where let pk_hash = Hash::try_from(&tree_key[..32]).unwrap(); let nodes = self.replication.write_nodes(&pk_hash); if nodes.first() == Some(&self.system.id) { - GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?; + GcTodoEntry::new(tree_key.to_vec(), new_bytes_hash).save(&self.gc_todo)?; } } - } - Ok(()) + Ok(Some(new_entry)) + } else { + Ok(None) + } } pub(crate) fn delete_if_equal(self: &Arc, k: &[u8], v: &[u8]) -> Result { @@ -211,7 +259,7 @@ where self.metrics.internal_delete_counter.add(1); let old_entry = self.decode_entry(v)?; - self.instance.updated(Some(old_entry), None); + self.instance.updated(Some(&old_entry), None); self.merkle_todo_notify.notify_one(); } Ok(removed) @@ -235,7 +283,7 @@ where if let Some(old_v) = removed { let old_entry = self.decode_entry(&old_v[..])?; - self.instance.updated(Some(old_entry), None); + self.instance.updated(Some(&old_entry), None); self.merkle_todo_notify.notify_one(); Ok(true) } else { @@ -245,13 +293,13 @@ where // ---- Utility functions ---- - pub(crate) fn tree_key(&self, p: &F::P, s: &F::S) -> Vec { + pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec { let mut ret = p.hash().to_vec(); ret.extend(s.sort_key()); ret } - pub(crate) fn decode_entry(&self, bytes: &[u8]) -> Result { + pub fn decode_entry(&self, bytes: &[u8]) -> Result { match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) { Ok(x) => Ok(x), Err(e) => match F::try_migrate(bytes) { diff --git a/src/table/schema.rs b/src/table/schema.rs index eba918a2..37327037 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -86,7 +86,7 @@ pub trait TableSchema: Send + Sync { // as the update itself is an unchangeable fact that will never go back // due to CRDT logic. Typically errors in propagation of info should be logged // to stderr. - fn updated(&self, _old: Option, _new: Option) {} + fn updated(&self, _old: Option<&Self::E>, _new: Option<&Self::E>) {} fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool; } diff --git a/src/table/table.rs b/src/table/table.rs index 7f87a449..2a167604 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -1,4 +1,5 @@ -use std::collections::{BTreeMap, HashMap}; +use std::borrow::Borrow; +use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::sync::Arc; use std::time::Duration; @@ -26,8 +27,9 @@ use crate::merkle::*; use crate::replication::*; use crate::schema::*; use crate::sync::*; +use crate::util::*; -const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10); +pub const TABLE_RPC_TIMEOUT: Duration = Duration::from_secs(10); pub struct Table { pub system: Arc, @@ -45,7 +47,13 @@ pub(crate) enum TableRpc { ReadEntryResponse(Option), // Read range: read all keys in partition P, possibly starting at a certain sort key offset - ReadRange(F::P, Option, Option, usize), + ReadRange { + partition: F::P, + begin_sort_key: Option, + filter: Option, + limit: usize, + enumeration_order: EnumerationOrder, + }, Update(Vec>), } @@ -123,9 +131,13 @@ where Ok(()) } - pub async fn insert_many(&self, entries: &[F::E]) -> Result<(), Error> { + pub async fn insert_many(&self, entries: I) -> Result<(), Error> + where + I: IntoIterator + Send + Sync, + IE: Borrow + Send + Sync, + { let tracer = opentelemetry::global::tracer("garage_table"); - let span = tracer.start(format!("{} insert_many {}", F::TABLE_NAME, entries.len())); + let span = tracer.start(format!("{} insert_many", F::TABLE_NAME)); self.insert_many_internal(entries) .bound_record_duration(&self.data.metrics.put_request_duration) @@ -137,10 +149,15 @@ where Ok(()) } - async fn insert_many_internal(&self, entries: &[F::E]) -> Result<(), Error> { + async fn insert_many_internal(&self, entries: I) -> Result<(), Error> + where + I: IntoIterator + Send + Sync, + IE: Borrow + Send + Sync, + { let mut call_list: HashMap<_, Vec<_>> = HashMap::new(); - for entry in entries.iter() { + for entry in entries.into_iter() { + let entry = entry.borrow(); let hash = entry.partition_key().hash(); let who = self.data.replication.write_nodes(&hash); let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(entry)?)); @@ -261,12 +278,19 @@ where begin_sort_key: Option, filter: Option, limit: usize, + enumeration_order: EnumerationOrder, ) -> Result, Error> { let tracer = opentelemetry::global::tracer("garage_table"); let span = tracer.start(format!("{} get_range", F::TABLE_NAME)); let res = self - .get_range_internal(partition_key, begin_sort_key, filter, limit) + .get_range_internal( + partition_key, + begin_sort_key, + filter, + limit, + enumeration_order, + ) .bound_record_duration(&self.data.metrics.get_request_duration) .with_context(Context::current_with_span(span)) .await?; @@ -282,11 +306,18 @@ where begin_sort_key: Option, filter: Option, limit: usize, + enumeration_order: EnumerationOrder, ) -> Result, Error> { let hash = partition_key.hash(); let who = self.data.replication.read_nodes(&hash); - let rpc = TableRpc::::ReadRange(partition_key.clone(), begin_sort_key, filter, limit); + let rpc = TableRpc::::ReadRange { + partition: partition_key.clone(), + begin_sort_key, + filter, + limit, + enumeration_order, + }; let resps = self .system @@ -302,44 +333,65 @@ where ) .await?; - let mut ret = BTreeMap::new(); - let mut to_repair = BTreeMap::new(); + let mut ret: BTreeMap, F::E> = BTreeMap::new(); + let mut to_repair = BTreeSet::new(); for resp in resps { if let TableRpc::Update(entries) = resp { for entry_bytes in entries.iter() { let entry = self.data.decode_entry(entry_bytes.as_slice())?; let entry_key = self.data.tree_key(entry.partition_key(), entry.sort_key()); - match ret.remove(&entry_key) { - None => { - ret.insert(entry_key, Some(entry)); - } - Some(Some(mut prev)) => { - let must_repair = prev != entry; - prev.merge(&entry); - if must_repair { - to_repair.insert(entry_key.clone(), Some(prev.clone())); + match ret.get_mut(&entry_key) { + Some(e) => { + if *e != entry { + e.merge(&entry); + to_repair.insert(entry_key.clone()); } - ret.insert(entry_key, Some(prev)); } - Some(None) => unreachable!(), + None => { + ret.insert(entry_key, entry); + } } } + } else { + return Err(Error::unexpected_rpc_message(resp)); } } + if !to_repair.is_empty() { let self2 = self.clone(); + let to_repair = to_repair + .into_iter() + .map(|k| ret.get(&k).unwrap().clone()) + .collect::>(); self.system.background.spawn_cancellable(async move { - for (_, v) in to_repair.iter_mut() { - self2.repair_on_read(&who[..], v.take().unwrap()).await?; + for v in to_repair { + self2.repair_on_read(&who[..], v).await?; } Ok(()) }); } - let ret_vec = ret - .iter_mut() - .take(limit) - .map(|(_k, v)| v.take().unwrap()) - .collect::>(); + + // At this point, the `ret` btreemap might contain more than `limit` + // items, because nodes might have returned us each `limit` items + // but for different keys. We have to take only the first `limit` items + // in this map, in the specified enumeration order, for two reasons: + // 1. To return to the user no more than the number of items that they requested + // 2. To return only items for which we have a read quorum: we do not know + // that we have a read quorum for the items after the first `limit` + // of them + let ret_vec = match enumeration_order { + EnumerationOrder::Forward => ret + .into_iter() + .take(limit) + .map(|(_k, v)| v) + .collect::>(), + EnumerationOrder::Reverse => ret + .into_iter() + .rev() + .take(limit) + .map(|(_k, v)| v) + .collect::>(), + }; Ok(ret_vec) } @@ -378,8 +430,20 @@ where let value = self.data.read_entry(key, sort_key)?; Ok(TableRpc::ReadEntryResponse(value)) } - TableRpc::ReadRange(key, begin_sort_key, filter, limit) => { - let values = self.data.read_range(key, begin_sort_key, filter, *limit)?; + TableRpc::ReadRange { + partition, + begin_sort_key, + filter, + limit, + enumeration_order, + } => { + let values = self.data.read_range( + partition, + begin_sort_key, + filter, + *limit, + *enumeration_order, + )?; Ok(TableRpc::Update(values)) } TableRpc::Update(pairs) => { diff --git a/src/table/util.rs b/src/table/util.rs index 2a5c3afe..20595a94 100644 --- a/src/table/util.rs +++ b/src/table/util.rs @@ -17,7 +17,7 @@ impl PartitionKey for EmptyKey { } } -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum DeletedFilter { Any, Deleted, @@ -33,3 +33,19 @@ impl DeletedFilter { } } } + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum EnumerationOrder { + Forward, + Reverse, +} + +impl EnumerationOrder { + pub fn from_reverse(reverse: bool) -> Self { + if reverse { + Self::Reverse + } else { + Self::Forward + } + } +} diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index f13c1589..95cde531 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -41,3 +41,6 @@ http = "0.2" hyper = "0.14" opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] } + +[features] +k2v = [] diff --git a/src/util/config.rs b/src/util/config.rs index e4d96476..4d66bfe4 100644 --- a/src/util/config.rs +++ b/src/util/config.rs @@ -73,7 +73,11 @@ pub struct Config { pub sled_flush_every_ms: u64, /// Configuration for S3 api - pub s3_api: ApiConfig, + pub s3_api: S3ApiConfig, + + /// Configuration for K2V api + #[cfg(feature = "k2v")] + pub k2v_api: Option, /// Configuration for serving files as normal web server pub s3_web: WebConfig, @@ -85,7 +89,7 @@ pub struct Config { /// Configuration for S3 api #[derive(Deserialize, Debug, Clone)] -pub struct ApiConfig { +pub struct S3ApiConfig { /// Address and port to bind for api serving pub api_bind_addr: SocketAddr, /// S3 region to use @@ -95,6 +99,14 @@ pub struct ApiConfig { pub root_domain: Option, } +/// Configuration for K2V api +#[cfg(feature = "k2v")] +#[derive(Deserialize, Debug, Clone)] +pub struct K2VApiConfig { + /// Address and port to bind for api serving + pub api_bind_addr: SocketAddr, +} + /// Configuration for serving files as normal web server #[derive(Deserialize, Debug, Clone)] pub struct WebConfig { diff --git a/src/util/error.rs b/src/util/error.rs index bdb3a69b..8734a0c8 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -44,6 +44,9 @@ pub enum Error { #[error(display = "Tokio semaphore acquire error: {}", _0)] TokioSemAcquire(#[error(source)] tokio::sync::AcquireError), + #[error(display = "Tokio broadcast receive error: {}", _0)] + TokioBcastRecv(#[error(source)] tokio::sync::broadcast::error::RecvError), + #[error(display = "Remote error: {}", _0)] RemoteError(String), diff --git a/src/web/web_server.rs b/src/web/web_server.rs index c3d691d0..867adc51 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -20,8 +20,8 @@ use crate::error::*; use garage_api::error::{Error as ApiError, OkOrBadRequest, OkOrInternalError}; use garage_api::helpers::{authority_to_host, host_to_bucket}; -use garage_api::s3_cors::{add_cors_headers, find_matching_cors_rule, handle_options_for_bucket}; -use garage_api::s3_get::{handle_get, handle_head}; +use garage_api::s3::cors::{add_cors_headers, find_matching_cors_rule, handle_options_for_bucket}; +use garage_api::s3::get::{handle_get, handle_head}; use garage_model::garage::Garage; -- cgit v1.2.3