aboutsummaryrefslogtreecommitdiff
path: root/src/garage/tests/list.rs
diff options
context:
space:
mode:
authorAlex <alex@adnab.me>2022-05-10 13:16:57 +0200
committerAlex <alex@adnab.me>2022-05-10 13:16:57 +0200
commit5768bf362262f78376af14517c4921941986192e (patch)
treeb4baf3051eade0f63649443278bb3a3f4c38ec25 /src/garage/tests/list.rs
parentdef78c5e6f5da37a0d17b5652c525fbeccbc2e86 (diff)
downloadgarage-5768bf362262f78376af14517c4921941986192e.tar.gz
garage-5768bf362262f78376af14517c4921941986192e.zip
First implementation of K2V (#293)
**Specification:** View spec at [this URL](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md) - [x] Specify the structure of K2V triples - [x] Specify the DVVS format used for causality detection - [x] Specify the K2V index (just a counter of number of values per partition key) - [x] Specify single-item endpoints: ReadItem, InsertItem, DeleteItem - [x] Specify index endpoint: ReadIndex - [x] Specify multi-item endpoints: InsertBatch, ReadBatch, DeleteBatch - [x] Move to JSON objects instead of tuples - [x] Specify endpoints for polling for updates on single values (PollItem) **Implementation:** - [x] Table for K2V items, causal contexts - [x] Indexing mechanism and table for K2V index - [x] Make API handlers a bit more generic - [x] K2V API endpoint - [x] K2V API router - [x] ReadItem - [x] InsertItem - [x] DeleteItem - [x] PollItem - [x] ReadIndex - [x] InsertBatch - [x] ReadBatch - [x] DeleteBatch **Testing:** - [x] Just a simple Python script that does some requests to check visually that things are going right (does not contain parsing of results or assertions on returned values) - [x] Actual tests: - [x] Adapt testing framework - [x] Simple test with InsertItem + ReadItem - [x] Test with several Insert/Read/DeleteItem + ReadIndex - [x] Test all combinations of return formats for ReadItem - [x] Test with ReadBatch, InsertBatch, DeleteBatch - [x] Test with PollItem - [x] Test error codes - [ ] Fix most broken stuff - [x] test PollItem broken randomly - [x] when invalid causality tokens are given, errors should be 4xx not 5xx **Improvements:** - [x] Descending range queries - [x] Specify - [x] Implement - [x] Add test - [x] Batch updates to index counter - [x] Put K2V behind `k2v` feature flag Co-authored-by: Alex Auvolat <alex@adnab.me> Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/293 Co-authored-by: Alex <alex@adnab.me> Co-committed-by: Alex <alex@adnab.me>
Diffstat (limited to 'src/garage/tests/list.rs')
-rw-r--r--src/garage/tests/list.rs615
1 files changed, 0 insertions, 615 deletions
diff --git a/src/garage/tests/list.rs b/src/garage/tests/list.rs
deleted file mode 100644
index bb03f250..00000000
--- a/src/garage/tests/list.rs
+++ /dev/null
@@ -1,615 +0,0 @@
-use crate::common;
-
-const KEYS: [&str; 8] = ["a", "a/a", "a/b", "a/c", "a/d/a", "a/é", "b", "c"];
-const KEYS_MULTIPART: [&str; 5] = ["a", "a", "c", "c/a", "c/b"];
-
-#[tokio::test]
-async fn test_listobjectsv2() {
- let ctx = common::context();
- let bucket = ctx.create_bucket("listobjectsv2");
-
- for k in KEYS {
- ctx.client
- .put_object()
- .bucket(&bucket)
- .key(k)
- .send()
- .await
- .unwrap();
- }
-
- {
- // Scoping the variable to avoid reusing it
- // in a following assert due to copy paste
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 8);
- assert!(r.common_prefixes.is_none());
- }
-
- //@FIXME aws-sdk-s3 automatically checks max-key values.
- // If we set it to zero, it drops it, and it is probably
- // the same behavior on values bigger than 1000.
- // Boto and awscli do not perform these tests, we should write
- // our own minimal library to bypass AWS SDK's tests and be
- // sure that we behave correctly.
-
- {
- // With 2 elements
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .max_keys(2)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 2);
- assert!(r.common_prefixes.is_none());
- assert!(r.next_continuation_token.is_some());
- }
-
- {
- // With pagination
- let mut cnt = 0;
- let mut next = None;
- let last_idx = KEYS.len() - 1;
-
- for i in 0..KEYS.len() {
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .set_continuation_token(next)
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- cnt += 1;
- next = r.next_continuation_token;
-
- assert_eq!(r.contents.unwrap().len(), 1);
- assert!(r.common_prefixes.is_none());
- if i != last_idx {
- assert!(next.is_some());
- }
- }
- assert_eq!(cnt, KEYS.len());
- }
-
- {
- // With a delimiter
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 3);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
-
- {
- // With a delimiter and pagination
- let mut cnt_pfx = 0;
- let mut cnt_key = 0;
- let mut next = None;
-
- for _i in 0..KEYS.len() {
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .set_continuation_token(next)
- .delimiter("/")
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- next = r.next_continuation_token;
- match (r.contents, r.common_prefixes) {
- (Some(k), None) if k.len() == 1 => cnt_key += 1,
- (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1,
- _ => unreachable!("logic error"),
- };
- if next.is_none() {
- break;
- }
- }
- assert_eq!(cnt_key, 3);
- assert_eq!(cnt_pfx, 1);
- }
-
- {
- // With a prefix
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .prefix("a/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 5);
- assert!(r.common_prefixes.is_none());
- }
-
- {
- // With a prefix and a delimiter
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .prefix("a/")
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 4);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
-
- {
- // With a prefix, a delimiter and max_key
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .prefix("a/")
- .delimiter("/")
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.as_ref().unwrap().len(), 1);
- assert_eq!(
- r.contents
- .unwrap()
- .first()
- .unwrap()
- .key
- .as_ref()
- .unwrap()
- .as_str(),
- "a/a"
- );
- assert!(r.common_prefixes.is_none());
- }
- {
- // With start_after before all keys
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .start_after("Z")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 8);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With start_after after all keys
- let r = ctx
- .client
- .list_objects_v2()
- .bucket(&bucket)
- .start_after("c")
- .send()
- .await
- .unwrap();
-
- assert!(r.contents.is_none());
- assert!(r.common_prefixes.is_none());
- }
-}
-
-#[tokio::test]
-async fn test_listobjectsv1() {
- let ctx = common::context();
- let bucket = ctx.create_bucket("listobjects");
-
- for k in KEYS {
- ctx.client
- .put_object()
- .bucket(&bucket)
- .key(k)
- .send()
- .await
- .unwrap();
- }
-
- {
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 8);
- assert!(r.common_prefixes.is_none());
- }
-
- {
- // With 2 elements
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .max_keys(2)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 2);
- assert!(r.common_prefixes.is_none());
- assert!(r.next_marker.is_some());
- }
-
- {
- // With pagination
- let mut cnt = 0;
- let mut next = None;
- let last_idx = KEYS.len() - 1;
-
- for i in 0..KEYS.len() {
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .set_marker(next)
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- cnt += 1;
- next = r.next_marker;
-
- assert_eq!(r.contents.unwrap().len(), 1);
- assert!(r.common_prefixes.is_none());
- if i != last_idx {
- assert!(next.is_some());
- }
- }
- assert_eq!(cnt, KEYS.len());
- }
-
- {
- // With a delimiter
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 3);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
-
- {
- // With a delimiter and pagination
- let mut cnt_pfx = 0;
- let mut cnt_key = 0;
- let mut next = None;
-
- for _i in 0..KEYS.len() {
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .delimiter("/")
- .set_marker(next)
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- next = r.next_marker;
- match (r.contents, r.common_prefixes) {
- (Some(k), None) if k.len() == 1 => cnt_key += 1,
- (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1,
- _ => unreachable!("logic error"),
- };
- if next.is_none() {
- break;
- }
- }
- assert_eq!(cnt_key, 3);
- // We have no optimization to skip the whole prefix
- // on listobjectsv1 so we return the same one 5 times,
- // for each element. It is up to the client to merge its result.
- // This is compliant with AWS spec.
- assert_eq!(cnt_pfx, 5);
- }
-
- {
- // With a prefix
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .prefix("a/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 5);
- assert!(r.common_prefixes.is_none());
- }
-
- {
- // With a prefix and a delimiter
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .prefix("a/")
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 4);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
-
- {
- // With a prefix, a delimiter and max_key
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .prefix("a/")
- .delimiter("/")
- .max_keys(1)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.as_ref().unwrap().len(), 1);
- assert_eq!(
- r.contents
- .unwrap()
- .first()
- .unwrap()
- .key
- .as_ref()
- .unwrap()
- .as_str(),
- "a/a"
- );
- assert!(r.common_prefixes.is_none());
- }
- {
- // With marker before all keys
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .marker("Z")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.contents.unwrap().len(), 8);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With start_after after all keys
- let r = ctx
- .client
- .list_objects()
- .bucket(&bucket)
- .marker("c")
- .send()
- .await
- .unwrap();
-
- assert!(r.contents.is_none());
- assert!(r.common_prefixes.is_none());
- }
-}
-
-#[tokio::test]
-async fn test_listmultipart() {
- let ctx = common::context();
- let bucket = ctx.create_bucket("listmultipartuploads");
-
- for k in KEYS_MULTIPART {
- ctx.client
- .create_multipart_upload()
- .bucket(&bucket)
- .key(k)
- .send()
- .await
- .unwrap();
- }
-
- {
- // Default
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 5);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With pagination
- let mut next = None;
- let mut upnext = None;
- let last_idx = KEYS_MULTIPART.len() - 1;
-
- for i in 0..KEYS_MULTIPART.len() {
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .set_key_marker(next)
- .set_upload_id_marker(upnext)
- .max_uploads(1)
- .send()
- .await
- .unwrap();
-
- next = r.next_key_marker;
- upnext = r.next_upload_id_marker;
-
- assert_eq!(r.uploads.unwrap().len(), 1);
- assert!(r.common_prefixes.is_none());
- if i != last_idx {
- assert!(next.is_some());
- }
- }
- }
- {
- // With delimiter
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 3);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
- {
- // With delimiter and pagination
- let mut next = None;
- let mut upnext = None;
- let mut upcnt = 0;
- let mut pfxcnt = 0;
- let mut loopcnt = 0;
-
- while loopcnt < KEYS_MULTIPART.len() {
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .delimiter("/")
- .max_uploads(1)
- .set_key_marker(next)
- .set_upload_id_marker(upnext)
- .send()
- .await
- .unwrap();
-
- next = r.next_key_marker;
- upnext = r.next_upload_id_marker;
-
- loopcnt += 1;
- upcnt += r.uploads.unwrap_or_default().len();
- pfxcnt += r.common_prefixes.unwrap_or_default().len();
-
- if next.is_none() {
- break;
- }
- }
-
- assert_eq!(upcnt + pfxcnt, loopcnt);
- assert_eq!(upcnt, 3);
- assert_eq!(pfxcnt, 1);
- }
- {
- // With prefix
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .prefix("c")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 3);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With prefix and delimiter
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .prefix("c")
- .delimiter("/")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 1);
- assert_eq!(r.common_prefixes.unwrap().len(), 1);
- }
- {
- // With prefix, delimiter and max keys
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .prefix("c")
- .delimiter("/")
- .max_uploads(1)
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 1);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With starting token before the first element
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .key_marker("ZZZZZ")
- .send()
- .await
- .unwrap();
-
- assert_eq!(r.uploads.unwrap().len(), 5);
- assert!(r.common_prefixes.is_none());
- }
- {
- // With starting token after the last element
- let r = ctx
- .client
- .list_multipart_uploads()
- .bucket(&bucket)
- .key_marker("d")
- .send()
- .await
- .unwrap();
-
- assert!(r.uploads.is_none());
- assert!(r.common_prefixes.is_none());
- }
-}