aboutsummaryrefslogtreecommitdiff
path: root/src/garage/tests/s3
diff options
context:
space:
mode:
authorMendes <mendes.oulamara@pm.me>2022-10-04 18:14:49 +0200
committerMendes <mendes.oulamara@pm.me>2022-10-04 18:14:49 +0200
commit829f815a897b04986559910bbcbf53625adcdf20 (patch)
tree6db3c27cff2aded754a641d1f2b05c83be701267 /src/garage/tests/s3
parent99f96b9564c9c841dc6c56f1255a6e70ff884d46 (diff)
parenta096ced35562bd0a8877a1ee2f755be1edafe343 (diff)
downloadgarage-829f815a897b04986559910bbcbf53625adcdf20.tar.gz
garage-829f815a897b04986559910bbcbf53625adcdf20.zip
Merge remote-tracking branch 'origin/main' into optimal-layout
Diffstat (limited to 'src/garage/tests/s3')
-rw-r--r--src/garage/tests/s3/list.rs615
-rw-r--r--src/garage/tests/s3/mod.rs6
-rw-r--r--src/garage/tests/s3/multipart.rs415
-rw-r--r--src/garage/tests/s3/objects.rs275
-rw-r--r--src/garage/tests/s3/simple.rs31
-rw-r--r--src/garage/tests/s3/streaming_signature.rs185
-rw-r--r--src/garage/tests/s3/website.rs324
7 files changed, 1851 insertions, 0 deletions
diff --git a/src/garage/tests/s3/list.rs b/src/garage/tests/s3/list.rs
new file mode 100644
index 00000000..bb03f250
--- /dev/null
+++ b/src/garage/tests/s3/list.rs
@@ -0,0 +1,615 @@
+use crate::common;
+
+const KEYS: [&str; 8] = ["a", "a/a", "a/b", "a/c", "a/d/a", "a/é", "b", "c"];
+const KEYS_MULTIPART: [&str; 5] = ["a", "a", "c", "c/a", "c/b"];
+
+#[tokio::test]
+async fn test_listobjectsv2() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("listobjectsv2");
+
+ for k in KEYS {
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key(k)
+ .send()
+ .await
+ .unwrap();
+ }
+
+ {
+ // Scoping the variable to avoid reusing it
+ // in a following assert due to copy paste
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 8);
+ assert!(r.common_prefixes.is_none());
+ }
+
+ //@FIXME aws-sdk-s3 automatically checks max-key values.
+ // If we set it to zero, it drops it, and it is probably
+ // the same behavior on values bigger than 1000.
+ // Boto and awscli do not perform these tests, we should write
+ // our own minimal library to bypass AWS SDK's tests and be
+ // sure that we behave correctly.
+
+ {
+ // With 2 elements
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .max_keys(2)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 2);
+ assert!(r.common_prefixes.is_none());
+ assert!(r.next_continuation_token.is_some());
+ }
+
+ {
+ // With pagination
+ let mut cnt = 0;
+ let mut next = None;
+ let last_idx = KEYS.len() - 1;
+
+ for i in 0..KEYS.len() {
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .set_continuation_token(next)
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ cnt += 1;
+ next = r.next_continuation_token;
+
+ assert_eq!(r.contents.unwrap().len(), 1);
+ assert!(r.common_prefixes.is_none());
+ if i != last_idx {
+ assert!(next.is_some());
+ }
+ }
+ assert_eq!(cnt, KEYS.len());
+ }
+
+ {
+ // With a delimiter
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 3);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+
+ {
+ // With a delimiter and pagination
+ let mut cnt_pfx = 0;
+ let mut cnt_key = 0;
+ let mut next = None;
+
+ for _i in 0..KEYS.len() {
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .set_continuation_token(next)
+ .delimiter("/")
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ next = r.next_continuation_token;
+ match (r.contents, r.common_prefixes) {
+ (Some(k), None) if k.len() == 1 => cnt_key += 1,
+ (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1,
+ _ => unreachable!("logic error"),
+ };
+ if next.is_none() {
+ break;
+ }
+ }
+ assert_eq!(cnt_key, 3);
+ assert_eq!(cnt_pfx, 1);
+ }
+
+ {
+ // With a prefix
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .prefix("a/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 5);
+ assert!(r.common_prefixes.is_none());
+ }
+
+ {
+ // With a prefix and a delimiter
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .prefix("a/")
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 4);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+
+ {
+ // With a prefix, a delimiter and max_key
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .prefix("a/")
+ .delimiter("/")
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.as_ref().unwrap().len(), 1);
+ assert_eq!(
+ r.contents
+ .unwrap()
+ .first()
+ .unwrap()
+ .key
+ .as_ref()
+ .unwrap()
+ .as_str(),
+ "a/a"
+ );
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With start_after before all keys
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .start_after("Z")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 8);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With start_after after all keys
+ let r = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .start_after("c")
+ .send()
+ .await
+ .unwrap();
+
+ assert!(r.contents.is_none());
+ assert!(r.common_prefixes.is_none());
+ }
+}
+
+#[tokio::test]
+async fn test_listobjectsv1() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("listobjects");
+
+ for k in KEYS {
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key(k)
+ .send()
+ .await
+ .unwrap();
+ }
+
+ {
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 8);
+ assert!(r.common_prefixes.is_none());
+ }
+
+ {
+ // With 2 elements
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .max_keys(2)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 2);
+ assert!(r.common_prefixes.is_none());
+ assert!(r.next_marker.is_some());
+ }
+
+ {
+ // With pagination
+ let mut cnt = 0;
+ let mut next = None;
+ let last_idx = KEYS.len() - 1;
+
+ for i in 0..KEYS.len() {
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .set_marker(next)
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ cnt += 1;
+ next = r.next_marker;
+
+ assert_eq!(r.contents.unwrap().len(), 1);
+ assert!(r.common_prefixes.is_none());
+ if i != last_idx {
+ assert!(next.is_some());
+ }
+ }
+ assert_eq!(cnt, KEYS.len());
+ }
+
+ {
+ // With a delimiter
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 3);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+
+ {
+ // With a delimiter and pagination
+ let mut cnt_pfx = 0;
+ let mut cnt_key = 0;
+ let mut next = None;
+
+ for _i in 0..KEYS.len() {
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .delimiter("/")
+ .set_marker(next)
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ next = r.next_marker;
+ match (r.contents, r.common_prefixes) {
+ (Some(k), None) if k.len() == 1 => cnt_key += 1,
+ (None, Some(pfx)) if pfx.len() == 1 => cnt_pfx += 1,
+ _ => unreachable!("logic error"),
+ };
+ if next.is_none() {
+ break;
+ }
+ }
+ assert_eq!(cnt_key, 3);
+ // We have no optimization to skip the whole prefix
+ // on listobjectsv1 so we return the same one 5 times,
+ // for each element. It is up to the client to merge its result.
+ // This is compliant with AWS spec.
+ assert_eq!(cnt_pfx, 5);
+ }
+
+ {
+ // With a prefix
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .prefix("a/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 5);
+ assert!(r.common_prefixes.is_none());
+ }
+
+ {
+ // With a prefix and a delimiter
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .prefix("a/")
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 4);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+
+ {
+ // With a prefix, a delimiter and max_key
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .prefix("a/")
+ .delimiter("/")
+ .max_keys(1)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.as_ref().unwrap().len(), 1);
+ assert_eq!(
+ r.contents
+ .unwrap()
+ .first()
+ .unwrap()
+ .key
+ .as_ref()
+ .unwrap()
+ .as_str(),
+ "a/a"
+ );
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With marker before all keys
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .marker("Z")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.contents.unwrap().len(), 8);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With start_after after all keys
+ let r = ctx
+ .client
+ .list_objects()
+ .bucket(&bucket)
+ .marker("c")
+ .send()
+ .await
+ .unwrap();
+
+ assert!(r.contents.is_none());
+ assert!(r.common_prefixes.is_none());
+ }
+}
+
+#[tokio::test]
+async fn test_listmultipart() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("listmultipartuploads");
+
+ for k in KEYS_MULTIPART {
+ ctx.client
+ .create_multipart_upload()
+ .bucket(&bucket)
+ .key(k)
+ .send()
+ .await
+ .unwrap();
+ }
+
+ {
+ // Default
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 5);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With pagination
+ let mut next = None;
+ let mut upnext = None;
+ let last_idx = KEYS_MULTIPART.len() - 1;
+
+ for i in 0..KEYS_MULTIPART.len() {
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .set_key_marker(next)
+ .set_upload_id_marker(upnext)
+ .max_uploads(1)
+ .send()
+ .await
+ .unwrap();
+
+ next = r.next_key_marker;
+ upnext = r.next_upload_id_marker;
+
+ assert_eq!(r.uploads.unwrap().len(), 1);
+ assert!(r.common_prefixes.is_none());
+ if i != last_idx {
+ assert!(next.is_some());
+ }
+ }
+ }
+ {
+ // With delimiter
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 3);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+ {
+ // With delimiter and pagination
+ let mut next = None;
+ let mut upnext = None;
+ let mut upcnt = 0;
+ let mut pfxcnt = 0;
+ let mut loopcnt = 0;
+
+ while loopcnt < KEYS_MULTIPART.len() {
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .delimiter("/")
+ .max_uploads(1)
+ .set_key_marker(next)
+ .set_upload_id_marker(upnext)
+ .send()
+ .await
+ .unwrap();
+
+ next = r.next_key_marker;
+ upnext = r.next_upload_id_marker;
+
+ loopcnt += 1;
+ upcnt += r.uploads.unwrap_or_default().len();
+ pfxcnt += r.common_prefixes.unwrap_or_default().len();
+
+ if next.is_none() {
+ break;
+ }
+ }
+
+ assert_eq!(upcnt + pfxcnt, loopcnt);
+ assert_eq!(upcnt, 3);
+ assert_eq!(pfxcnt, 1);
+ }
+ {
+ // With prefix
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .prefix("c")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 3);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With prefix and delimiter
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .prefix("c")
+ .delimiter("/")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 1);
+ assert_eq!(r.common_prefixes.unwrap().len(), 1);
+ }
+ {
+ // With prefix, delimiter and max keys
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .prefix("c")
+ .delimiter("/")
+ .max_uploads(1)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 1);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With starting token before the first element
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .key_marker("ZZZZZ")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.uploads.unwrap().len(), 5);
+ assert!(r.common_prefixes.is_none());
+ }
+ {
+ // With starting token after the last element
+ let r = ctx
+ .client
+ .list_multipart_uploads()
+ .bucket(&bucket)
+ .key_marker("d")
+ .send()
+ .await
+ .unwrap();
+
+ assert!(r.uploads.is_none());
+ assert!(r.common_prefixes.is_none());
+ }
+}
diff --git a/src/garage/tests/s3/mod.rs b/src/garage/tests/s3/mod.rs
new file mode 100644
index 00000000..623eb665
--- /dev/null
+++ b/src/garage/tests/s3/mod.rs
@@ -0,0 +1,6 @@
+mod list;
+mod multipart;
+mod objects;
+mod simple;
+mod streaming_signature;
+mod website;
diff --git a/src/garage/tests/s3/multipart.rs b/src/garage/tests/s3/multipart.rs
new file mode 100644
index 00000000..895a2993
--- /dev/null
+++ b/src/garage/tests/s3/multipart.rs
@@ -0,0 +1,415 @@
+use crate::common;
+use aws_sdk_s3::model::{CompletedMultipartUpload, CompletedPart};
+use aws_sdk_s3::types::ByteStream;
+
+const SZ_5MB: usize = 5 * 1024 * 1024;
+const SZ_10MB: usize = 10 * 1024 * 1024;
+
+#[tokio::test]
+async fn test_uploadlistpart() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("uploadpart");
+
+ let u1 = vec![0xee; SZ_5MB];
+ let u2 = vec![0x11; SZ_5MB];
+
+ let up = ctx
+ .client
+ .create_multipart_upload()
+ .bucket(&bucket)
+ .key("a")
+ .send()
+ .await
+ .unwrap();
+ let uid = up.upload_id.as_ref().unwrap();
+
+ assert!(up.upload_id.is_some());
+
+ {
+ let r = ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .send()
+ .await
+ .unwrap();
+
+ assert!(r.parts.is_none());
+ }
+
+ let p1 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .part_number(2)
+ .body(ByteStream::from(u1))
+ .send()
+ .await
+ .unwrap();
+
+ {
+ // ListPart on 1st element
+ let r = ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .send()
+ .await
+ .unwrap();
+
+ let ps = r.parts.unwrap();
+ assert_eq!(ps.len(), 1);
+ let fp = ps.iter().find(|x| x.part_number == 2).unwrap();
+ assert!(fp.last_modified.is_some());
+ assert_eq!(
+ fp.e_tag.as_ref().unwrap(),
+ "\"3366bb9dcf710d6801b5926467d02e19\""
+ );
+ assert_eq!(fp.size, SZ_5MB as i64);
+ }
+
+ let p2 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .part_number(1)
+ .body(ByteStream::from(u2))
+ .send()
+ .await
+ .unwrap();
+
+ {
+ // ListPart on the 2 elements
+ let r = ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .send()
+ .await
+ .unwrap();
+
+ let ps = r.parts.unwrap();
+ assert_eq!(ps.len(), 2);
+ let fp = ps.iter().find(|x| x.part_number == 1).unwrap();
+ assert!(fp.last_modified.is_some());
+ assert_eq!(
+ fp.e_tag.as_ref().unwrap(),
+ "\"3c484266f9315485694556e6c693bfa2\""
+ );
+ assert_eq!(fp.size, SZ_5MB as i64);
+ }
+
+ {
+ // Call pagination
+ let r = ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .max_parts(1)
+ .send()
+ .await
+ .unwrap();
+
+ assert!(r.part_number_marker.is_none());
+ assert!(r.next_part_number_marker.is_some());
+ assert_eq!(r.max_parts, 1_i32);
+ assert!(r.is_truncated);
+ assert_eq!(r.key.unwrap(), "a");
+ assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
+ assert_eq!(r.parts.unwrap().len(), 1);
+
+ let r2 = ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .max_parts(1)
+ .part_number_marker(r.next_part_number_marker.as_ref().unwrap())
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(
+ r2.part_number_marker.as_ref().unwrap(),
+ r.next_part_number_marker.as_ref().unwrap()
+ );
+ assert_eq!(r2.max_parts, 1_i32);
+ assert!(r2.is_truncated);
+ assert_eq!(r2.key.unwrap(), "a");
+ assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
+ assert_eq!(r2.parts.unwrap().len(), 1);
+ }
+
+ let cmp = CompletedMultipartUpload::builder()
+ .parts(
+ CompletedPart::builder()
+ .part_number(1)
+ .e_tag(p2.e_tag.unwrap())
+ .build(),
+ )
+ .parts(
+ CompletedPart::builder()
+ .part_number(2)
+ .e_tag(p1.e_tag.unwrap())
+ .build(),
+ )
+ .build();
+
+ ctx.client
+ .complete_multipart_upload()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .multipart_upload(cmp)
+ .send()
+ .await
+ .unwrap();
+
+ // The multipart upload must not appear anymore
+ assert!(ctx
+ .client
+ .list_parts()
+ .bucket(&bucket)
+ .key("a")
+ .upload_id(uid)
+ .send()
+ .await
+ .is_err());
+
+ {
+ // The object must appear as a regular object
+ let r = ctx
+ .client
+ .head_object()
+ .bucket(&bucket)
+ .key("a")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.content_length, (SZ_5MB * 2) as i64);
+ }
+}
+
+#[tokio::test]
+async fn test_uploadpartcopy() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("uploadpartcopy");
+
+ let u1 = vec![0x11; SZ_10MB];
+ let u2 = vec![0x22; SZ_5MB];
+ let u3 = vec![0x33; SZ_5MB];
+ let u4 = vec![0x44; SZ_5MB];
+ let u5 = vec![0x55; SZ_5MB];
+
+ let overflow = 5500000 - SZ_5MB;
+ let mut exp_obj = u3.clone();
+ exp_obj.extend(&u4[500..]);
+ exp_obj.extend(&u5[..overflow + 1]);
+ exp_obj.extend(&u2);
+ exp_obj.extend(&u1[500..5500000 + 1]);
+
+ // (setup) Upload a single part object
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key("source1")
+ .body(ByteStream::from(u1))
+ .send()
+ .await
+ .unwrap();
+
+ // (setup) Upload a multipart object with 2 parts
+ {
+ let up = ctx
+ .client
+ .create_multipart_upload()
+ .bucket(&bucket)
+ .key("source2")
+ .send()
+ .await
+ .unwrap();
+ let uid = up.upload_id.as_ref().unwrap();
+
+ let p1 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("source2")
+ .upload_id(uid)
+ .part_number(1)
+ .body(ByteStream::from(u4))
+ .send()
+ .await
+ .unwrap();
+
+ let p2 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("source2")
+ .upload_id(uid)
+ .part_number(2)
+ .body(ByteStream::from(u5))
+ .send()
+ .await
+ .unwrap();
+
+ let cmp = CompletedMultipartUpload::builder()
+ .parts(
+ CompletedPart::builder()
+ .part_number(1)
+ .e_tag(p1.e_tag.unwrap())
+ .build(),
+ )
+ .parts(
+ CompletedPart::builder()
+ .part_number(2)
+ .e_tag(p2.e_tag.unwrap())
+ .build(),
+ )
+ .build();
+
+ ctx.client
+ .complete_multipart_upload()
+ .bucket(&bucket)
+ .key("source2")
+ .upload_id(uid)
+ .multipart_upload(cmp)
+ .send()
+ .await
+ .unwrap();
+ }
+
+ // Our multipart object that does copy
+ let up = ctx
+ .client
+ .create_multipart_upload()
+ .bucket(&bucket)
+ .key("target")
+ .send()
+ .await
+ .unwrap();
+ let uid = up.upload_id.as_ref().unwrap();
+
+ let p3 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("target")
+ .upload_id(uid)
+ .part_number(3)
+ .body(ByteStream::from(u2))
+ .send()
+ .await
+ .unwrap();
+
+ let p1 = ctx
+ .client
+ .upload_part()
+ .bucket(&bucket)
+ .key("target")
+ .upload_id(uid)
+ .part_number(1)
+ .body(ByteStream::from(u3))
+ .send()
+ .await
+ .unwrap();
+
+ let p2 = ctx
+ .client
+ .upload_part_copy()
+ .bucket(&bucket)
+ .key("target")
+ .upload_id(uid)
+ .part_number(2)
+ .copy_source("uploadpartcopy/source2")
+ .copy_source_range("bytes=500-5500000")
+ .send()
+ .await
+ .unwrap();
+
+ let p4 = ctx
+ .client
+ .upload_part_copy()
+ .bucket(&bucket)
+ .key("target")
+ .upload_id(uid)
+ .part_number(4)
+ .copy_source("uploadpartcopy/source1")
+ .copy_source_range("bytes=500-5500000")
+ .send()
+ .await
+ .unwrap();
+
+ let cmp = CompletedMultipartUpload::builder()
+ .parts(
+ CompletedPart::builder()
+ .part_number(1)
+ .e_tag(p1.e_tag.unwrap())
+ .build(),
+ )
+ .parts(
+ CompletedPart::builder()
+ .part_number(2)
+ .e_tag(p2.copy_part_result.unwrap().e_tag.unwrap())
+ .build(),
+ )
+ .parts(
+ CompletedPart::builder()
+ .part_number(3)
+ .e_tag(p3.e_tag.unwrap())
+ .build(),
+ )
+ .parts(
+ CompletedPart::builder()
+ .part_number(4)
+ .e_tag(p4.copy_part_result.unwrap().e_tag.unwrap())
+ .build(),
+ )
+ .build();
+
+ ctx.client
+ .complete_multipart_upload()
+ .bucket(&bucket)
+ .key("target")
+ .upload_id(uid)
+ .multipart_upload(cmp)
+ .send()
+ .await
+ .unwrap();
+
+ // (check) Get object
+
+ let obj = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key("target")
+ .send()
+ .await
+ .unwrap();
+
+ let real_obj = obj
+ .body
+ .collect()
+ .await
+ .expect("Error reading data")
+ .into_bytes();
+
+ assert_eq!(real_obj.len(), exp_obj.len());
+ assert_eq!(real_obj, exp_obj);
+}
diff --git a/src/garage/tests/s3/objects.rs b/src/garage/tests/s3/objects.rs
new file mode 100644
index 00000000..65f9e867
--- /dev/null
+++ b/src/garage/tests/s3/objects.rs
@@ -0,0 +1,275 @@
+use crate::common;
+use aws_sdk_s3::model::{Delete, ObjectIdentifier};
+use aws_sdk_s3::types::ByteStream;
+
+const STD_KEY: &str = "hello world";
+const CTRL_KEY: &str = "\x00\x01\x02\x00";
+const UTF8_KEY: &str = "\u{211D}\u{1F923}\u{1F44B}";
+const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+
+#[tokio::test]
+async fn test_putobject() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("putobject");
+
+ {
+ // Send an empty object (can serve as a directory marker)
+ // with a content type
+ let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
+ let content_type = "text/csv";
+ let r = ctx
+ .client
+ .put_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .content_type(content_type)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.e_tag.unwrap().as_str(), etag);
+ // We return a version ID here
+ // We should check if Amazon is returning one when versioning is not enabled
+ assert!(r.version_id.is_some());
+
+ let _version = r.version_id.unwrap();
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(o.body, b"");
+ assert_eq!(o.e_tag.unwrap(), etag);
+ // We do not return version ID
+ // We should check if Amazon is returning one when versioning is not enabled
+ // assert_eq!(o.version_id.unwrap(), _version);
+ assert_eq!(o.content_type.unwrap(), content_type);
+ assert!(o.last_modified.is_some());
+ assert_eq!(o.content_length, 0);
+ assert_eq!(o.parts_count, 0);
+ assert_eq!(o.tag_count, 0);
+ }
+
+ {
+ // Key with control characters,
+ // no content type and some data
+ let etag = "\"49f68a5c8493ec2c0bf489821c21fc3b\"";
+ let data = ByteStream::from_static(b"hi");
+
+ let r = ctx
+ .client
+ .put_object()
+ .bucket(&bucket)
+ .key(CTRL_KEY)
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.e_tag.unwrap().as_str(), etag);
+ assert!(r.version_id.is_some());
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(CTRL_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(o.body, b"hi");
+ assert_eq!(o.e_tag.unwrap(), etag);
+ assert!(o.last_modified.is_some());
+ assert_eq!(o.content_length, 2);
+ assert_eq!(o.parts_count, 0);
+ assert_eq!(o.tag_count, 0);
+ }
+
+ {
+ // Key with UTF8 codepoints including emoji
+ let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
+
+ let r = ctx
+ .client
+ .put_object()
+ .bucket(&bucket)
+ .key(UTF8_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.e_tag.unwrap().as_str(), etag);
+ assert!(r.version_id.is_some());
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(UTF8_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(o.body, b"");
+ assert_eq!(o.e_tag.unwrap(), etag);
+ assert!(o.last_modified.is_some());
+ assert_eq!(o.content_length, 0);
+ assert_eq!(o.parts_count, 0);
+ assert_eq!(o.tag_count, 0);
+ }
+}
+
+#[tokio::test]
+async fn test_getobject() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("getobject");
+
+ let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
+ let data = ByteStream::from_static(BODY);
+
+ let r = ctx
+ .client
+ .put_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.e_tag.unwrap().as_str(), etag);
+
+ {
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .range("bytes=1-9")
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(o.content_range.unwrap().as_str(), "bytes 1-9/62");
+ assert_bytes_eq!(o.body, &BODY[1..10]);
+ }
+ {
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .range("bytes=9-")
+ .send()
+ .await
+ .unwrap();
+ assert_eq!(o.content_range.unwrap().as_str(), "bytes 9-61/62");
+ assert_bytes_eq!(o.body, &BODY[9..]);
+ }
+ {
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .range("bytes=-5")
+ .send()
+ .await
+ .unwrap();
+ assert_eq!(o.content_range.unwrap().as_str(), "bytes 57-61/62");
+ assert_bytes_eq!(o.body, &BODY[57..]);
+ }
+}
+
+#[tokio::test]
+async fn test_deleteobject() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("deleteobject");
+
+ let mut to_del = Delete::builder();
+
+ // add content without data
+ for i in 0..5 {
+ let k = format!("k-{}", i);
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key(k.to_string())
+ .send()
+ .await
+ .unwrap();
+ if i > 0 {
+ to_del = to_del.objects(ObjectIdentifier::builder().key(k).build());
+ }
+ }
+
+ // add content with data
+ for i in 0..5 {
+ let k = format!("l-{}", i);
+ let data = ByteStream::from_static(BODY);
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key(k.to_string())
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ if i > 0 {
+ to_del = to_del.objects(ObjectIdentifier::builder().key(k).build());
+ }
+ }
+
+ ctx.client
+ .delete_object()
+ .bucket(&bucket)
+ .key("k-0")
+ .send()
+ .await
+ .unwrap();
+
+ ctx.client
+ .delete_object()
+ .bucket(&bucket)
+ .key("l-0")
+ .send()
+ .await
+ .unwrap();
+
+ let r = ctx
+ .client
+ .delete_objects()
+ .bucket(&bucket)
+ .delete(to_del.build())
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(r.deleted.unwrap().len(), 8);
+
+ let l = ctx
+ .client
+ .list_objects_v2()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ assert!(l.contents.is_none());
+
+ // Deleting a non-existing object shouldn't be a problem
+ ctx.client
+ .delete_object()
+ .bucket(&bucket)
+ .key("l-0")
+ .send()
+ .await
+ .unwrap();
+}
diff --git a/src/garage/tests/s3/simple.rs b/src/garage/tests/s3/simple.rs
new file mode 100644
index 00000000..f54ae9ac
--- /dev/null
+++ b/src/garage/tests/s3/simple.rs
@@ -0,0 +1,31 @@
+use crate::common;
+
+#[tokio::test]
+async fn test_simple() {
+ use aws_sdk_s3::types::ByteStream;
+
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("test-simple");
+
+ let data = ByteStream::from_static(b"Hello world!");
+
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key("test")
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ let res = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key("test")
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(res.body, b"Hello world!");
+}
diff --git a/src/garage/tests/s3/streaming_signature.rs b/src/garage/tests/s3/streaming_signature.rs
new file mode 100644
index 00000000..c68f7dfc
--- /dev/null
+++ b/src/garage/tests/s3/streaming_signature.rs
@@ -0,0 +1,185 @@
+use std::collections::HashMap;
+
+use crate::common;
+use common::custom_requester::BodySignature;
+use hyper::Method;
+
+const STD_KEY: &str = "hello-world";
+//const CTRL_KEY: &str = "\x00\x01\x02\x00";
+const BODY: &[u8; 62] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+
+#[tokio::test]
+async fn test_putobject_streaming() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("putobject-streaming");
+
+ {
+ // Send an empty object (can serve as a directory marker)
+ // with a content type
+ let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
+ let content_type = "text/csv";
+ let mut headers = HashMap::new();
+ headers.insert("content-type".to_owned(), content_type.to_owned());
+ let _ = ctx
+ .custom_request
+ .builder(bucket.clone())
+ .method(Method::PUT)
+ .path(STD_KEY.to_owned())
+ .unsigned_headers(headers)
+ .vhost_style(true)
+ .body(vec![])
+ .body_signature(BodySignature::Streaming(10))
+ .send()
+ .await
+ .unwrap();
+
+ // assert_eq!(r.e_tag.unwrap().as_str(), etag);
+ // We return a version ID here
+ // We should check if Amazon is returning one when versioning is not enabled
+ // assert!(r.version_id.is_some());
+
+ //let _version = r.version_id.unwrap();
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ .key(STD_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(o.body, b"");
+ assert_eq!(o.e_tag.unwrap(), etag);
+ // We do not return version ID
+ // We should check if Amazon is returning one when versioning is not enabled
+ // assert_eq!(o.version_id.unwrap(), _version);
+ assert_eq!(o.content_type.unwrap(), content_type);
+ assert!(o.last_modified.is_some());
+ assert_eq!(o.content_length, 0);
+ assert_eq!(o.parts_count, 0);
+ assert_eq!(o.tag_count, 0);
+ }
+
+ {
+ let etag = "\"46cf18a9b447991b450cad3facf5937e\"";
+
+ let _ = ctx
+ .custom_request
+ .builder(bucket.clone())
+ .method(Method::PUT)
+ //.path(CTRL_KEY.to_owned()) at the moment custom_request does not encode url so this
+ //fail
+ .path("abc".to_owned())
+ .vhost_style(true)
+ .body(BODY.to_vec())
+ .body_signature(BodySignature::Streaming(16))
+ .send()
+ .await
+ .unwrap();
+
+ // assert_eq!(r.e_tag.unwrap().as_str(), etag);
+ // assert!(r.version_id.is_some());
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(&bucket)
+ //.key(CTRL_KEY)
+ .key("abc")
+ .send()
+ .await
+ .unwrap();
+
+ assert_bytes_eq!(o.body, BODY);
+ assert_eq!(o.e_tag.unwrap(), etag);
+ assert!(o.last_modified.is_some());
+ assert_eq!(o.content_length, 62);
+ assert_eq!(o.parts_count, 0);
+ assert_eq!(o.tag_count, 0);
+ }
+}
+
+#[tokio::test]
+async fn test_create_bucket_streaming() {
+ let ctx = common::context();
+ let bucket = "createbucket-streaming";
+
+ {
+ // create bucket
+ let _ = ctx
+ .custom_request
+ .builder(bucket.to_owned())
+ .method(Method::PUT)
+ .body_signature(BodySignature::Streaming(10))
+ .send()
+ .await
+ .unwrap();
+
+ // test if the bucket exists and works properly
+ let etag = "\"d41d8cd98f00b204e9800998ecf8427e\"";
+ let content_type = "text/csv";
+ let _ = ctx
+ .client
+ .put_object()
+ .bucket(bucket)
+ .key(STD_KEY)
+ .content_type(content_type)
+ .send()
+ .await
+ .unwrap();
+
+ let o = ctx
+ .client
+ .get_object()
+ .bucket(bucket)
+ .key(STD_KEY)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(o.e_tag.unwrap(), etag);
+ }
+}
+
+#[tokio::test]
+async fn test_put_website_streaming() {
+ let ctx = common::context();
+ let bucket = ctx.create_bucket("putwebsite-streaming");
+
+ {
+ let website_config = r#"<?xml version="1.0" encoding="UTF-8"?>
+<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <ErrorDocument>
+ <Key>err/error.html</Key>
+ </ErrorDocument>
+ <IndexDocument>
+ <Suffix>home.html</Suffix>
+ </IndexDocument>
+</WebsiteConfiguration>"#;
+
+ let mut query = HashMap::new();
+ query.insert("website".to_owned(), None);
+ let _ = ctx
+ .custom_request
+ .builder(bucket.clone())
+ .method(Method::PUT)
+ .query_params(query)
+ .body(website_config.as_bytes().to_vec())
+ .body_signature(BodySignature::Streaming(10))
+ .send()
+ .await
+ .unwrap();
+
+ let o = ctx
+ .client
+ .get_bucket_website()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ assert_eq!(o.index_document.unwrap().suffix.unwrap(), "home.html");
+ assert_eq!(o.error_document.unwrap().key.unwrap(), "err/error.html");
+ }
+}
diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs
new file mode 100644
index 00000000..0570ac6a
--- /dev/null
+++ b/src/garage/tests/s3/website.rs
@@ -0,0 +1,324 @@
+use crate::common;
+use crate::common::ext::*;
+use aws_sdk_s3::{
+ model::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
+ types::ByteStream,
+};
+use http::Request;
+use hyper::{
+ body::{to_bytes, Body},
+ Client,
+};
+
+const BODY: &[u8; 16] = b"<h1>bonjour</h1>";
+const BODY_ERR: &[u8; 6] = b"erreur";
+
+#[tokio::test]
+async fn test_website() {
+ const BCKT_NAME: &str = "my-website";
+ let ctx = common::context();
+ let bucket = ctx.create_bucket(BCKT_NAME);
+
+ let data = ByteStream::from_static(BODY);
+
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key("index.html")
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ let client = Client::new();
+
+ let req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let mut resp = client.request(req()).await.unwrap();
+
+ assert_eq!(resp.status(), 404);
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ ); /* check that we do not leak body */
+
+ ctx.garage
+ .command()
+ .args(["bucket", "website", "--allow", BCKT_NAME])
+ .quiet()
+ .expect_success_status("Could not allow website on bucket");
+
+ resp = client.request(req()).await.unwrap();
+ assert_eq!(resp.status(), 200);
+ assert_eq!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+
+ ctx.garage
+ .command()
+ .args(["bucket", "website", "--deny", BCKT_NAME])
+ .quiet()
+ .expect_success_status("Could not deny website on bucket");
+
+ resp = client.request(req()).await.unwrap();
+ assert_eq!(resp.status(), 404);
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ ); /* check that we do not leak body */
+}
+
+#[tokio::test]
+async fn test_website_s3_api() {
+ const BCKT_NAME: &str = "my-cors";
+ let ctx = common::context();
+ let bucket = ctx.create_bucket(BCKT_NAME);
+
+ let data = ByteStream::from_static(BODY);
+
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key("site/home.html")
+ .body(data)
+ .send()
+ .await
+ .unwrap();
+
+ ctx.client
+ .put_object()
+ .bucket(&bucket)
+ .key("err/error.html")
+ .body(ByteStream::from_static(BODY_ERR))
+ .send()
+ .await
+ .unwrap();
+
+ let conf = WebsiteConfiguration::builder()
+ .index_document(IndexDocument::builder().suffix("home.html").build())
+ .error_document(ErrorDocument::builder().key("err/error.html").build())
+ .build();
+
+ ctx.client
+ .put_bucket_website()
+ .bucket(&bucket)
+ .website_configuration(conf)
+ .send()
+ .await
+ .unwrap();
+
+ let cors = CorsConfiguration::builder()
+ .cors_rules(
+ CorsRule::builder()
+ .id("main-rule")
+ .allowed_headers("*")
+ .allowed_methods("GET")
+ .allowed_methods("PUT")
+ .allowed_origins("*")
+ .build(),
+ )
+ .build();
+
+ ctx.client
+ .put_bucket_cors()
+ .bucket(&bucket)
+ .cors_configuration(cors)
+ .send()
+ .await
+ .unwrap();
+
+ {
+ let cors_res = ctx
+ .client
+ .get_bucket_cors()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ let main_rule = cors_res.cors_rules().unwrap().iter().next().unwrap();
+
+ assert_eq!(main_rule.id.as_ref().unwrap(), "main-rule");
+ assert_eq!(
+ main_rule.allowed_headers.as_ref().unwrap(),
+ &vec!["*".to_string()]
+ );
+ assert_eq!(
+ main_rule.allowed_origins.as_ref().unwrap(),
+ &vec!["*".to_string()]
+ );
+ assert_eq!(
+ main_rule.allowed_methods.as_ref().unwrap(),
+ &vec!["GET".to_string(), "PUT".to_string()]
+ );
+ }
+
+ let client = Client::new();
+
+ // Test direct requests with CORS
+ {
+ let req = Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .header("Origin", "https://example.com")
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 200);
+ assert_eq!(
+ resp.headers().get("access-control-allow-origin").unwrap(),
+ "*"
+ );
+ assert_eq!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+ }
+
+ // Test ErrorDocument on 404
+ {
+ let req = Request::builder()
+ .method("GET")
+ .uri(format!(
+ "http://127.0.0.1:{}/wrong.html",
+ ctx.garage.web_port
+ ))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 404);
+ assert_eq!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY_ERR.as_ref()
+ );
+ }
+
+ // Test CORS with an allowed preflight request
+ {
+ let req = Request::builder()
+ .method("OPTIONS")
+ .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .header("Origin", "https://example.com")
+ .header("Access-Control-Request-Method", "PUT")
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 200);
+ assert_eq!(
+ resp.headers().get("access-control-allow-origin").unwrap(),
+ "*"
+ );
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+ }
+
+ // Test CORS with a forbidden preflight request
+ {
+ let req = Request::builder()
+ .method("OPTIONS")
+ .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .header("Origin", "https://example.com")
+ .header("Access-Control-Request-Method", "DELETE")
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 403);
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+ }
+
+ //@TODO test CORS on the S3 endpoint. We need to handle auth manually to check it.
+
+ // Delete cors
+ ctx.client
+ .delete_bucket_cors()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ // Check CORS are deleted from the API
+ // @FIXME check what is the expected behavior when GetBucketCors is called on a bucket without
+ // any CORS.
+ assert!(ctx
+ .client
+ .get_bucket_cors()
+ .bucket(&bucket)
+ .send()
+ .await
+ .is_err());
+
+ // Test CORS are not sent anymore on a previously allowed request
+ {
+ let req = Request::builder()
+ .method("OPTIONS")
+ .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .header("Origin", "https://example.com")
+ .header("Access-Control-Request-Method", "PUT")
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 403);
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+ }
+
+ // Disallow website from the API
+ ctx.client
+ .delete_bucket_website()
+ .bucket(&bucket)
+ .send()
+ .await
+ .unwrap();
+
+ // Check that the website is not served anymore
+ {
+ let req = Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
+ .header("Host", format!("{}.web.garage", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap();
+
+ let mut resp = client.request(req).await.unwrap();
+
+ assert_eq!(resp.status(), 404);
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY_ERR.as_ref()
+ );
+ assert_ne!(
+ to_bytes(resp.body_mut()).await.unwrap().as_ref(),
+ BODY.as_ref()
+ );
+ }
+}