aboutsummaryrefslogtreecommitdiff
path: root/src/api
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2021-03-10 16:21:56 +0100
committerAlex Auvolat <alex@adnab.me>2021-03-10 16:21:56 +0100
commitf319a7d3740ba8b83c9c0eae27edfda1c1d14c03 (patch)
treeefde4606ad33dcf5ad357f82553ad3b07d4a9858 /src/api
parent6a3dcf39740cda27e61b93582b6fea66991ec4f2 (diff)
downloadgarage-f319a7d3740ba8b83c9c0eae27edfda1c1d14c03.tar.gz
garage-f319a7d3740ba8b83c9c0eae27edfda1c1d14c03.zip
Refactor model stuff, including cleaner CRDTs
Diffstat (limited to 'src/api')
-rw-r--r--src/api/s3_copy.rs13
-rw-r--r--src/api/s3_get.rs12
-rw-r--r--src/api/s3_put.rs37
3 files changed, 34 insertions, 28 deletions
diff --git a/src/api/s3_copy.rs b/src/api/s3_copy.rs
index b6ec48b0..c6c30095 100644
--- a/src/api/s3_copy.rs
+++ b/src/api/s3_copy.rs
@@ -66,25 +66,28 @@ pub async fn handle_copy(
.await?;
let source_version = source_version.ok_or(Error::NotFound)?;
- let dest_version = Version::new(
+ let mut dest_version = Version::new(
new_uuid,
dest_bucket.to_string(),
dest_key.to_string(),
false,
- source_version.blocks().to_vec(),
);
+ for (bk, bv) in source_version.blocks.items().iter() {
+ dest_version.blocks.put(*bk, *bv);
+ }
let dest_object = Object::new(
dest_bucket.to_string(),
dest_key.to_string(),
vec![dest_object_version],
);
let dest_block_refs = dest_version
- .blocks()
+ .blocks
+ .items()
.iter()
.map(|b| BlockRef {
- block: b.hash,
+ block: b.1.hash,
version: new_uuid,
- deleted: false,
+ deleted: false.into(),
})
.collect::<Vec<_>>();
futures::try_join!(
diff --git a/src/api/s3_get.rs b/src/api/s3_get.rs
index 68e7c66a..22a55b55 100644
--- a/src/api/s3_get.rs
+++ b/src/api/s3_get.rs
@@ -146,9 +146,10 @@ pub async fn handle_get(
let version = version.ok_or(Error::NotFound)?;
let mut blocks = version
- .blocks()
+ .blocks
+ .items()
.iter()
- .map(|vb| (vb.hash, None))
+ .map(|(_, vb)| (vb.hash, None))
.collect::<Vec<_>>();
blocks[0].1 = Some(first_block);
@@ -219,11 +220,12 @@ pub async fn handle_get_range(
// file (whereas block.offset designates the offset of the block WITHIN THE PART
// block.part_number, which is not the same in the case of a multipart upload)
let mut blocks = Vec::with_capacity(std::cmp::min(
- version.blocks().len(),
- 4 + ((end - begin) / std::cmp::max(version.blocks()[0].size as u64, 1024)) as usize,
+ version.blocks.len(),
+ 4 + ((end - begin) / std::cmp::max(version.blocks.items()[0].1.size as u64, 1024))
+ as usize,
));
let mut true_offset = 0;
- for b in version.blocks().iter() {
+ for (_, b) in version.blocks.items().iter() {
if true_offset >= end {
break;
}
diff --git a/src/api/s3_put.rs b/src/api/s3_put.rs
index ec599a05..37a1ece2 100644
--- a/src/api/s3_put.rs
+++ b/src/api/s3_put.rs
@@ -94,7 +94,7 @@ pub async fn handle_put(
garage.object_table.insert(&object).await?;
// Initialize corresponding entry in version table
- let version = Version::new(version_uuid, bucket.into(), key.into(), false, vec![]);
+ let version = Version::new(version_uuid, bucket.into(), key.into(), false);
let first_block_hash = sha256sum(&first_block[..]);
// Transfer data and verify checksum
@@ -242,19 +242,18 @@ async fn put_block_meta(
) -> Result<(), GarageError> {
// TODO: don't clone, restart from empty block list ??
let mut version = version.clone();
- version
- .add_block(VersionBlock {
+ version.blocks.put(
+ VersionBlockKey {
part_number,
offset,
- hash,
- size,
- })
- .unwrap();
+ },
+ VersionBlock { hash, size },
+ );
let block_ref = BlockRef {
block: hash,
version: version.uuid,
- deleted: false,
+ deleted: false.into(),
};
futures::try_join!(
@@ -389,7 +388,7 @@ pub async fn handle_put_part(
}
// Copy block to store
- let version = Version::new(version_uuid, bucket, key, false, vec![]);
+ let version = Version::new(version_uuid, bucket, key, false);
let first_block_hash = sha256sum(&first_block[..]);
let (_, md5sum_arr, sha256sum) = read_and_put_blocks(
&garage,
@@ -454,7 +453,7 @@ pub async fn handle_complete_multipart_upload(
};
let version = version.ok_or(Error::BadRequest(format!("Version not found")))?;
- if version.blocks().len() == 0 {
+ if version.blocks.len() == 0 {
return Err(Error::BadRequest(format!("No data was uploaded")));
}
@@ -466,9 +465,10 @@ pub async fn handle_complete_multipart_upload(
// Check that the list of parts they gave us corresponds to the parts we have here
// TODO: check MD5 sum of all uploaded parts? but that would mean we have to store them somewhere...
let mut parts = version
- .blocks()
+ .blocks
+ .items()
.iter()
- .map(|x| x.part_number)
+ .map(|x| x.0.part_number)
.collect::<Vec<_>>();
parts.dedup();
let same_parts = body_list_of_parts
@@ -485,8 +485,8 @@ pub async fn handle_complete_multipart_upload(
// shouldn't impact compatibility as the S3 docs specify that
// the ETag is an opaque value in case of a multipart upload.
// See also: https://teppen.io/2018/06/23/aws_s3_etags/
- let num_parts = version.blocks().last().unwrap().part_number
- - version.blocks().first().unwrap().part_number
+ let num_parts = version.blocks.items().last().unwrap().0.part_number
+ - version.blocks.items().first().unwrap().0.part_number
+ 1;
let etag = format!(
"{}-{}",
@@ -495,17 +495,18 @@ pub async fn handle_complete_multipart_upload(
);
let total_size = version
- .blocks()
+ .blocks
+ .items()
.iter()
- .map(|x| x.size)
+ .map(|x| x.1.size)
.fold(0, |x, y| x + y);
object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock(
ObjectVersionMeta {
headers,
size: total_size,
- etag: etag,
+ etag,
},
- version.blocks()[0].hash,
+ version.blocks.items()[0].1.hash,
));
let final_object = Object::new(bucket.clone(), key.clone(), vec![object_version]);