diff options
author | Quentin <quentin@deuxfleurs.fr> | 2020-12-10 20:12:56 +0100 |
---|---|---|
committer | Quentin <quentin@deuxfleurs.fr> | 2020-12-10 20:12:56 +0100 |
commit | e8c12072cefa37d9aec023fd6087b2d190ee3e4c (patch) | |
tree | 88b15bfe703d2454d99550010ad04934513d25a4 | |
parent | 51d0c14e440f00f24dbed6c3bce915a183a2bb65 (diff) | |
parent | 022b386a5085cad79d649a82846c41cad730920b (diff) | |
download | garage-e8c12072cefa37d9aec023fd6087b2d190ee3e4c.tar.gz garage-e8c12072cefa37d9aec023fd6087b2d190ee3e4c.zip |
Merge branch 'master' into feature/website
-rw-r--r-- | Cargo.lock | 1 | ||||
-rw-r--r-- | README.md | 10 | ||||
-rw-r--r-- | script/dev-env-aws.sh | 14 | ||||
-rw-r--r--[-rwxr-xr-x] | script/dev-env-s3cmd.sh (renamed from script/dev-env.sh) | 0 | ||||
-rwxr-xr-x | script/test-smoke.sh | 52 | ||||
-rw-r--r-- | src/api/Cargo.toml | 1 | ||||
-rw-r--r-- | src/api/s3_get.rs | 5 | ||||
-rw-r--r-- | src/api/s3_list.rs | 36 | ||||
-rw-r--r-- | src/api/s3_put.rs | 17 | ||||
-rw-r--r-- | src/table/table.rs | 3 |
10 files changed, 117 insertions, 22 deletions
@@ -462,6 +462,7 @@ dependencies = [ "log", "md-5", "percent-encoding", + "rand", "roxmltree", "sha2", "tokio", @@ -26,12 +26,18 @@ We propose the following quickstart to setup a full dev. environment as quickly 4. Run `./script/dev-cluster.sh` to launch a test cluster (feel free to read the script) 5. Run `./script/dev-configure.sh` to configure your test cluster with default values (same datacenter, 100 tokens) 6. Run `./script/dev-bucket.sh` to create a bucket named `eprouvette` and an API key that will be stored in `/tmp/garage.s3` - 7. Run `source ./script/dev-env.sh` to configure your CLI environment + 7. Run `source ./script/dev-env-aws.sh` to configure your CLI environment 8. You can use `garage` to manage the cluster. Try `garage --help`. - 9. You can use `s3grg` to add, remove, and delete files. Try `s3grg --help`, `s3grg cp /proc/cpuinfo s3://eprouvette/cpuinfo.txt`, `s3grg ls s3://eprouvette`. `s3grg` is a wrapper on the `aws s3` subcommand configured with the previously generated API key (the one in `/tmp/garage.s3`). + 9. You can use the `awsgrg` alias to add, remove, and delete files. Try `awsgrg help`, `awsgrg cp /proc/cpuinfo s3://eprouvette/cpuinfo.txt`, or `awsgrg ls s3://eprouvette`. `awsgrg` is a wrapper on the `aws s3` command pre-configured with the previously generated API key (the one in `/tmp/garage.s3`) and localhost as the endpoint. Now you should be ready to start hacking on garage! +## S3 compatibility + +Only a subset of S3 is supported: adding, listing, getting and deleting files in a bucket. +Bucket management, ACL and other advanced features are not (yet?) handled through the S3 API but through the `garage` CLI. +We primarily test `garage` against the `awscli` tool and `nextcloud`. + ## Setting up Garage Use the `genkeys.sh` script to generate TLS keys for encrypting communications between Garage nodes. diff --git a/script/dev-env-aws.sh b/script/dev-env-aws.sh new file mode 100644 index 00000000..c9a57660 --- /dev/null +++ b/script/dev-env-aws.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +SCRIPT_FOLDER="`dirname \"${BASH_SOURCE[0]}\"`" +REPO_FOLDER="${SCRIPT_FOLDER}/../" +GARAGE_DEBUG="${REPO_FOLDER}/target/debug/" +GARAGE_RELEASE="${REPO_FOLDER}/target/release/" +PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH" + +export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` +export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` +export AWS_DEFAULT_REGION='garage' + +alias awsgrg="aws s3 \ + --endpoint-url http://127.0.0.1:3911" diff --git a/script/dev-env.sh b/script/dev-env-s3cmd.sh index 15f08e2e..15f08e2e 100755..100644 --- a/script/dev-env.sh +++ b/script/dev-env-s3cmd.sh diff --git a/script/test-smoke.sh b/script/test-smoke.sh index 7faf2a07..111afac9 100755 --- a/script/test-smoke.sh +++ b/script/test-smoke.sh @@ -11,12 +11,52 @@ ${SCRIPT_FOLDER}/dev-clean.sh ${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 & ${SCRIPT_FOLDER}/dev-configure.sh ${SCRIPT_FOLDER}/dev-bucket.sh -source ${SCRIPT_FOLDER}/dev-env.sh +source ${SCRIPT_FOLDER}/dev-env-aws.sh +source ${SCRIPT_FOLDER}/dev-env-s3cmd.sh -dd if=/dev/urandom of=/tmp/garage.rnd bs=1M count=10 +garage status +garage key list +garage bucket list -s3grg cp /tmp/garage.rnd s3://eprouvette/ -s3grg ls s3://eprouvette -s3grg cp s3://eprouvette/garage.rnd /tmp/garage.dl +dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # < INLINE_THRESHOLD = 3072 bytes +dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 +dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 -diff /tmp/garage.rnd /tmp/garage.dl +for idx in $(seq 1 3); do + # AWS sends + awsgrg cp /tmp/garage.$idx.rnd s3://eprouvette/garage.$idx.aws + + awsgrg ls s3://eprouvette + + awsgrg cp s3://eprouvette/garage.$idx.aws /tmp/garage.$idx.dl + diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl + rm /tmp/garage.$idx.dl + + s3grg get s3://eprouvette/garage.$idx.aws /tmp/garage.$idx.dl + diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl + rm /tmp/garage.$idx.dl + + awsgrg rm s3://eprouvette/garage.$idx.aws + + # S3CMD sends + s3grg put /tmp/garage.$idx.rnd s3://eprouvette/garage.$idx.s3cmd + + s3grg ls s3://eprouvette + + s3grg get s3://eprouvette/garage.$idx.s3cmd /tmp/garage.$idx.dl + diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl + rm /tmp/garage.$idx.dl + + awsgrg cp s3://eprouvette/garage.$idx.s3cmd /tmp/garage.$idx.dl + diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl + rm /tmp/garage.$idx.dl + + s3grg rm s3://eprouvette/garage.$idx.s3cmd +done +rm /tmp/garage.{1,2,3}.rnd + +garage bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID +garage bucket delete --yes eprouvette +garage key delete --yes $AWS_ACCESS_KEY_ID + +echo "success" diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index a366f9b8..079993c3 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -27,6 +27,7 @@ md-5 = "0.9.1" sha2 = "0.8" hmac = "0.7" crypto-mac = "0.7" +rand = "0.7" futures = "0.3" futures-util = "0.3" diff --git a/src/api/s3_get.rs b/src/api/s3_get.rs index 43215923..1a23f476 100644 --- a/src/api/s3_get.rs +++ b/src/api/s3_get.rs @@ -24,10 +24,13 @@ fn object_headers( "Content-Type", version_meta.headers.content_type.to_string(), ) - .header("ETag", version_meta.etag.to_string()) .header("Last-Modified", date_str) .header("Accept-Ranges", format!("bytes")); + if !version_meta.etag.is_empty() { + resp = resp.header("ETag", format!("\"{}\"", version_meta.etag)); + } + for (k, v) in version_meta.headers.other.iter() { resp = resp.header(k, v.to_string()); } diff --git a/src/api/s3_list.rs b/src/api/s3_list.rs index 3b739a8a..599d0d11 100644 --- a/src/api/s3_list.rs +++ b/src/api/s3_list.rs @@ -18,6 +18,7 @@ use crate::encoding::*; struct ListResultInfo { last_modified: u64, size: u64, + etag: String, } pub async fn handle_list( @@ -56,12 +57,12 @@ pub async fn handle_list( for object in objects.iter() { if !object.key.starts_with(prefix) { - truncated = false; + truncated = None; break 'query_loop; } if let Some(version) = object.versions().iter().find(|x| x.is_data()) { if result_keys.len() + result_common_prefixes.len() >= max_keys { - truncated = true; + truncated = Some(object.key.to_string()); break 'query_loop; } let common_prefix = if delimiter.len() > 0 { @@ -75,19 +76,18 @@ pub async fn handle_list( if let Some(pfx) = common_prefix { result_common_prefixes.insert(pfx.to_string()); } else { - let size = match &version.state { - ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => { - meta.size - } + let meta = match &version.state { + ObjectVersionState::Complete(ObjectVersionData::Inline(meta, _)) => meta, ObjectVersionState::Complete(ObjectVersionData::FirstBlock(meta, _)) => { - meta.size + meta } _ => unreachable!(), }; let info = match result_keys.get(&object.key) { None => ListResultInfo { last_modified: version.timestamp, - size, + size: meta.size, + etag: meta.etag.to_string(), }, Some(_lri) => { return Err(Error::Message(format!("Duplicate key?? {}", object.key))) @@ -98,7 +98,7 @@ pub async fn handle_list( } } if objects.len() < max_keys + 1 { - truncated = false; + truncated = None; break 'query_loop; } if objects.len() > 0 { @@ -113,11 +113,22 @@ pub async fn handle_list( r#"<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">"# ) .unwrap(); - writeln!(&mut xml, "\t<Bucket>{}</Bucket>", bucket).unwrap(); + writeln!(&mut xml, "\t<Name>{}</Name>", bucket).unwrap(); writeln!(&mut xml, "\t<Prefix>{}</Prefix>", prefix).unwrap(); + if let Some(mkr) = marker { + writeln!(&mut xml, "\t<Marker>{}</Marker>", mkr).unwrap(); + } writeln!(&mut xml, "\t<KeyCount>{}</KeyCount>", result_keys.len()).unwrap(); writeln!(&mut xml, "\t<MaxKeys>{}</MaxKeys>", max_keys).unwrap(); - writeln!(&mut xml, "\t<IsTruncated>{}</IsTruncated>", truncated).unwrap(); + writeln!( + &mut xml, + "\t<IsTruncated>{}</IsTruncated>", + truncated.is_some() + ) + .unwrap(); + if let Some(next_marker) = truncated { + writeln!(&mut xml, "\t<NextMarker>{}</NextMarker>", next_marker).unwrap(); + } for (key, info) in result_keys.iter() { let last_modif = NaiveDateTime::from_timestamp(info.last_modified as i64 / 1000, 0); let last_modif = DateTime::<Utc>::from_utc(last_modif, Utc); @@ -132,6 +143,9 @@ pub async fn handle_list( .unwrap(); writeln!(&mut xml, "\t\t<LastModified>{}</LastModified>", last_modif).unwrap(); writeln!(&mut xml, "\t\t<Size>{}</Size>", info.size).unwrap(); + if !info.etag.is_empty() { + writeln!(&mut xml, "\t\t<ETag>\"{}\"</ETag>", info.etag).unwrap(); + } writeln!(&mut xml, "\t\t<StorageClass>STANDARD</StorageClass>").unwrap(); writeln!(&mut xml, "\t</Contents>").unwrap(); } diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs index 9c4d625c..c42309b2 100644 --- a/src/api/s3_put.rs +++ b/src/api/s3_put.rs @@ -428,6 +428,21 @@ pub async fn handle_complete_multipart_upload( _ => unreachable!(), }; + // ETag calculation: we produce ETags that have the same form as + // those of S3 multipart uploads, but we don't use their actual + // calculation for the first part (we use random bytes). This + // shouldn't impact compatibility as the S3 docs specify that + // the ETag is an opaque value in case of a multipart upload. + // See also: https://teppen.io/2018/06/23/aws_s3_etags/ + let num_parts = version.blocks().last().unwrap().part_number + - version.blocks().first().unwrap().part_number + + 1; + let etag = format!( + "{}-{}", + hex::encode(&rand::random::<[u8; 16]>()[..]), + num_parts + ); + // TODO: check that all the parts that they pretend they gave us are indeed there // TODO: when we read the XML from _req, remember to check the sha256 sum of the payload // against the signed x-amz-content-sha256 @@ -442,7 +457,7 @@ pub async fn handle_complete_multipart_upload( ObjectVersionMeta { headers, size: total_size, - etag: "".to_string(), // TODO + etag: etag, }, version.blocks()[0].hash, )); diff --git a/src/table/table.rs b/src/table/table.rs index 5dfee3c8..acb46325 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -391,7 +391,8 @@ where let (old_entry, new_entry) = self.store.transaction(|db| { let (old_entry, new_entry) = match db.get(&tree_key)? { Some(prev_bytes) => { - let old_entry = self.decode_entry(&prev_bytes) + let old_entry = self + .decode_entry(&prev_bytes) .map_err(sled::ConflictableTransactionError::Abort)?; let mut new_entry = old_entry.clone(); new_entry.merge(&update); |