From 5768bf362262f78376af14517c4921941986192e Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 10 May 2022 13:16:57 +0200 Subject: First implementation of K2V (#293) **Specification:** View spec at [this URL](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md) - [x] Specify the structure of K2V triples - [x] Specify the DVVS format used for causality detection - [x] Specify the K2V index (just a counter of number of values per partition key) - [x] Specify single-item endpoints: ReadItem, InsertItem, DeleteItem - [x] Specify index endpoint: ReadIndex - [x] Specify multi-item endpoints: InsertBatch, ReadBatch, DeleteBatch - [x] Move to JSON objects instead of tuples - [x] Specify endpoints for polling for updates on single values (PollItem) **Implementation:** - [x] Table for K2V items, causal contexts - [x] Indexing mechanism and table for K2V index - [x] Make API handlers a bit more generic - [x] K2V API endpoint - [x] K2V API router - [x] ReadItem - [x] InsertItem - [x] DeleteItem - [x] PollItem - [x] ReadIndex - [x] InsertBatch - [x] ReadBatch - [x] DeleteBatch **Testing:** - [x] Just a simple Python script that does some requests to check visually that things are going right (does not contain parsing of results or assertions on returned values) - [x] Actual tests: - [x] Adapt testing framework - [x] Simple test with InsertItem + ReadItem - [x] Test with several Insert/Read/DeleteItem + ReadIndex - [x] Test all combinations of return formats for ReadItem - [x] Test with ReadBatch, InsertBatch, DeleteBatch - [x] Test with PollItem - [x] Test error codes - [ ] Fix most broken stuff - [x] test PollItem broken randomly - [x] when invalid causality tokens are given, errors should be 4xx not 5xx **Improvements:** - [x] Descending range queries - [x] Specify - [x] Implement - [x] Add test - [x] Batch updates to index counter - [x] Put K2V behind `k2v` feature flag Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/293 Co-authored-by: Alex Co-committed-by: Alex --- src/api/s3/put.rs | 753 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 753 insertions(+) create mode 100644 src/api/s3/put.rs (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs new file mode 100644 index 00000000..89aa8d84 --- /dev/null +++ b/src/api/s3/put.rs @@ -0,0 +1,753 @@ +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::sync::Arc; + +use futures::prelude::*; +use hyper::body::{Body, Bytes}; +use hyper::header::{HeaderMap, HeaderValue}; +use hyper::{Request, Response}; +use md5::{digest::generic_array::*, Digest as Md5Digest, Md5}; +use sha2::Sha256; + +use garage_table::*; +use garage_util::data::*; +use garage_util::error::Error as GarageError; +use garage_util::time::*; + +use garage_block::manager::INLINE_THRESHOLD; +use garage_model::garage::Garage; +use garage_model::s3::block_ref_table::*; +use garage_model::s3::object_table::*; +use garage_model::s3::version_table::*; + +use crate::error::*; +use crate::s3::xml as s3_xml; +use crate::signature::verify_signed_content; + +pub async fn handle_put( + garage: Arc, + req: Request, + bucket_id: Uuid, + key: &str, + content_sha256: Option, +) -> Result, Error> { + // Retrieve interesting headers from request + let headers = get_headers(req.headers())?; + debug!("Object headers: {:?}", headers); + + let content_md5 = match req.headers().get("content-md5") { + Some(x) => Some(x.to_str()?.to_string()), + None => None, + }; + + let (_head, body) = req.into_parts(); + let body = body.map_err(Error::from); + + save_stream( + garage, + headers, + body, + bucket_id, + key, + content_md5, + content_sha256, + ) + .await + .map(|(uuid, md5)| put_response(uuid, md5)) +} + +pub(crate) async fn save_stream> + Unpin>( + garage: Arc, + headers: ObjectVersionHeaders, + body: S, + bucket_id: Uuid, + key: &str, + content_md5: Option, + content_sha256: Option, +) -> Result<(Uuid, String), Error> { + // Generate identity of new version + let version_uuid = gen_uuid(); + let version_timestamp = now_msec(); + + let mut chunker = StreamChunker::new(body, garage.config.block_size); + let first_block = chunker.next().await?.unwrap_or_default(); + + // If body is small enough, store it directly in the object table + // as "inline data". We can then return immediately. + if first_block.len() < INLINE_THRESHOLD { + let mut md5sum = Md5::new(); + md5sum.update(&first_block[..]); + let data_md5sum = md5sum.finalize(); + let data_md5sum_hex = hex::encode(data_md5sum); + + let data_sha256sum = sha256sum(&first_block[..]); + + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + )?; + + let object_version = ObjectVersion { + uuid: version_uuid, + timestamp: version_timestamp, + state: ObjectVersionState::Complete(ObjectVersionData::Inline( + ObjectVersionMeta { + headers, + size: first_block.len() as u64, + etag: data_md5sum_hex.clone(), + }, + first_block, + )), + }; + + let object = Object::new(bucket_id, key.into(), vec![object_version]); + garage.object_table.insert(&object).await?; + + return Ok((version_uuid, data_md5sum_hex)); + } + + // Write version identifier in object table so that we have a trace + // that we are uploading something + let mut object_version = ObjectVersion { + uuid: version_uuid, + timestamp: version_timestamp, + state: ObjectVersionState::Uploading(headers.clone()), + }; + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + garage.object_table.insert(&object).await?; + + // Initialize corresponding entry in version table + // Write this entry now, even with empty block list, + // to prevent block_ref entries from being deleted (they can be deleted + // if the reference a version that isn't found in the version table) + let version = Version::new(version_uuid, bucket_id, key.into(), false); + garage.version_table.insert(&version).await?; + + // Transfer data and verify checksum + let first_block_hash = blake2sum(&first_block[..]); + let tx_result = read_and_put_blocks( + &garage, + &version, + 1, + first_block, + first_block_hash, + &mut chunker, + ) + .await + .and_then(|(total_size, data_md5sum, data_sha256sum)| { + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + ) + .map(|()| (total_size, data_md5sum)) + }); + + // If something went wrong, clean up + let (total_size, md5sum_arr) = match tx_result { + Ok(rv) => rv, + Err(e) => { + // Mark object as aborted, this will free the blocks further down + object_version.state = ObjectVersionState::Aborted; + let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + garage.object_table.insert(&object).await?; + return Err(e); + } + }; + + // Save final object state, marked as Complete + let md5sum_hex = hex::encode(md5sum_arr); + object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + ObjectVersionMeta { + headers, + size: total_size, + etag: md5sum_hex.clone(), + }, + first_block_hash, + )); + let object = Object::new(bucket_id, key.into(), vec![object_version]); + garage.object_table.insert(&object).await?; + + Ok((version_uuid, md5sum_hex)) +} + +/// Validate MD5 sum against content-md5 header +/// and sha256sum against signed content-sha256 +fn ensure_checksum_matches( + data_md5sum: &[u8], + data_sha256sum: garage_util::data::FixedBytes32, + content_md5: Option<&str>, + content_sha256: Option, +) -> Result<(), Error> { + if let Some(expected_sha256) = content_sha256 { + if expected_sha256 != data_sha256sum { + return Err(Error::BadRequest( + "Unable to validate x-amz-content-sha256".to_string(), + )); + } else { + trace!("Successfully validated x-amz-content-sha256"); + } + } + if let Some(expected_md5) = content_md5 { + if expected_md5.trim_matches('"') != base64::encode(data_md5sum) { + return Err(Error::BadRequest( + "Unable to validate content-md5".to_string(), + )); + } else { + trace!("Successfully validated content-md5"); + } + } + Ok(()) +} + +async fn read_and_put_blocks> + Unpin>( + garage: &Garage, + version: &Version, + part_number: u64, + first_block: Vec, + first_block_hash: Hash, + chunker: &mut StreamChunker, +) -> Result<(u64, GenericArray, Hash), Error> { + let mut md5hasher = Md5::new(); + let mut sha256hasher = Sha256::new(); + md5hasher.update(&first_block[..]); + sha256hasher.update(&first_block[..]); + + let mut next_offset = first_block.len(); + let mut put_curr_version_block = put_block_meta( + garage, + version, + part_number, + 0, + first_block_hash, + first_block.len() as u64, + ); + let mut put_curr_block = garage + .block_manager + .rpc_put_block(first_block_hash, first_block); + + loop { + let (_, _, next_block) = futures::try_join!( + put_curr_block.map_err(Error::from), + put_curr_version_block.map_err(Error::from), + chunker.next(), + )?; + if let Some(block) = next_block { + md5hasher.update(&block[..]); + sha256hasher.update(&block[..]); + let block_hash = blake2sum(&block[..]); + let block_len = block.len(); + put_curr_version_block = put_block_meta( + garage, + version, + part_number, + next_offset as u64, + block_hash, + block_len as u64, + ); + put_curr_block = garage.block_manager.rpc_put_block(block_hash, block); + next_offset += block_len; + } else { + break; + } + } + + let total_size = next_offset as u64; + let data_md5sum = md5hasher.finalize(); + + let data_sha256sum = sha256hasher.finalize(); + let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap(); + + Ok((total_size, data_md5sum, data_sha256sum)) +} + +async fn put_block_meta( + garage: &Garage, + version: &Version, + part_number: u64, + offset: u64, + hash: Hash, + size: u64, +) -> Result<(), GarageError> { + let mut version = version.clone(); + version.blocks.put( + VersionBlockKey { + part_number, + offset, + }, + VersionBlock { hash, size }, + ); + + let block_ref = BlockRef { + block: hash, + version: version.uuid, + deleted: false.into(), + }; + + futures::try_join!( + garage.version_table.insert(&version), + garage.block_ref_table.insert(&block_ref), + )?; + Ok(()) +} + +struct StreamChunker>> { + stream: S, + read_all: bool, + block_size: usize, + buf: VecDeque, +} + +impl> + Unpin> StreamChunker { + fn new(stream: S, block_size: usize) -> Self { + Self { + stream, + read_all: false, + block_size, + buf: VecDeque::with_capacity(2 * block_size), + } + } + + async fn next(&mut self) -> Result>, Error> { + while !self.read_all && self.buf.len() < self.block_size { + if let Some(block) = self.stream.next().await { + let bytes = block?; + trace!("Body next: {} bytes", bytes.len()); + self.buf.extend(bytes); + } else { + self.read_all = true; + } + } + + if self.buf.is_empty() { + Ok(None) + } else if self.buf.len() <= self.block_size { + let block = self.buf.drain(..).collect::>(); + Ok(Some(block)) + } else { + let block = self.buf.drain(..self.block_size).collect::>(); + Ok(Some(block)) + } + } +} + +pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response { + Response::builder() + .header("x-amz-version-id", hex::encode(version_uuid)) + .header("ETag", format!("\"{}\"", md5sum_hex)) + .body(Body::from(vec![])) + .unwrap() +} + +pub async fn handle_create_multipart_upload( + garage: Arc, + req: &Request, + bucket_name: &str, + bucket_id: Uuid, + key: &str, +) -> Result, Error> { + let version_uuid = gen_uuid(); + let headers = get_headers(req.headers())?; + + // Create object in object table + let object_version = ObjectVersion { + uuid: version_uuid, + timestamp: now_msec(), + state: ObjectVersionState::Uploading(headers), + }; + let object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&object).await?; + + // Insert empty version so that block_ref entries refer to something + // (they are inserted concurrently with blocks in the version table, so + // there is the possibility that they are inserted before the version table + // is created, in which case it is allowed to delete them, e.g. in repair_*) + let version = Version::new(version_uuid, bucket_id, key.into(), false); + garage.version_table.insert(&version).await?; + + // Send success response + let result = s3_xml::InitiateMultipartUploadResult { + xmlns: (), + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key.to_string()), + upload_id: s3_xml::Value(hex::encode(version_uuid)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_put_part( + garage: Arc, + req: Request, + bucket_id: Uuid, + key: &str, + part_number: u64, + upload_id: &str, + content_sha256: Option, +) -> Result, Error> { + let version_uuid = decode_upload_id(upload_id)?; + + let content_md5 = match req.headers().get("content-md5") { + Some(x) => Some(x.to_str()?.to_string()), + None => None, + }; + + // Read first chuck, and at the same time try to get object to see if it exists + let key = key.to_string(); + + let body = req.into_body().map_err(Error::from); + let mut chunker = StreamChunker::new(body, garage.config.block_size); + + let (object, version, first_block) = futures::try_join!( + garage + .object_table + .get(&bucket_id, &key) + .map_err(Error::from), + garage + .version_table + .get(&version_uuid, &EmptyKey) + .map_err(Error::from), + chunker.next(), + )?; + + // Check object is valid and multipart block can be accepted + let first_block = first_block.ok_or_bad_request("Empty body")?; + let object = object.ok_or_bad_request("Object not found")?; + + if !object + .versions() + .iter() + .any(|v| v.uuid == version_uuid && v.is_uploading()) + { + return Err(Error::NoSuchUpload); + } + + // Check part hasn't already been uploaded + if let Some(v) = version { + if v.has_part_number(part_number) { + return Err(Error::BadRequest(format!( + "Part number {} has already been uploaded", + part_number + ))); + } + } + + // Copy block to store + let version = Version::new(version_uuid, bucket_id, key, false); + let first_block_hash = blake2sum(&first_block[..]); + let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( + &garage, + &version, + part_number, + first_block, + first_block_hash, + &mut chunker, + ) + .await?; + + // Verify that checksums map + ensure_checksum_matches( + data_md5sum.as_slice(), + data_sha256sum, + content_md5.as_deref(), + content_sha256, + )?; + + // Store part etag in version + let data_md5sum_hex = hex::encode(data_md5sum); + let mut version = version; + version + .parts_etags + .put(part_number, data_md5sum_hex.clone()); + garage.version_table.insert(&version).await?; + + let response = Response::builder() + .header("ETag", format!("\"{}\"", data_md5sum_hex)) + .body(Body::empty()) + .unwrap(); + Ok(response) +} + +pub async fn handle_complete_multipart_upload( + garage: Arc, + req: Request, + bucket_name: &str, + bucket_id: Uuid, + key: &str, + upload_id: &str, + content_sha256: Option, +) -> Result, Error> { + let body = hyper::body::to_bytes(req.into_body()).await?; + + if let Some(content_sha256) = content_sha256 { + verify_signed_content(content_sha256, &body[..])?; + } + + let body_xml = roxmltree::Document::parse(std::str::from_utf8(&body)?)?; + let body_list_of_parts = parse_complete_multipart_upload_body(&body_xml) + .ok_or_bad_request("Invalid CompleteMultipartUpload XML")?; + debug!( + "CompleteMultipartUpload list of parts: {:?}", + body_list_of_parts + ); + + let version_uuid = decode_upload_id(upload_id)?; + + // Get object and version + let key = key.to_string(); + let (object, version) = futures::try_join!( + garage.object_table.get(&bucket_id, &key), + garage.version_table.get(&version_uuid, &EmptyKey), + )?; + + let object = object.ok_or(Error::NoSuchKey)?; + let mut object_version = object + .versions() + .iter() + .find(|v| v.uuid == version_uuid && v.is_uploading()) + .cloned() + .ok_or(Error::NoSuchUpload)?; + + let version = version.ok_or(Error::NoSuchKey)?; + if version.blocks.is_empty() { + return Err(Error::BadRequest("No data was uploaded".to_string())); + } + + let headers = match object_version.state { + ObjectVersionState::Uploading(headers) => headers, + _ => unreachable!(), + }; + + // Check that part numbers are an increasing sequence. + // (it doesn't need to start at 1 nor to be a continuous sequence, + // see discussion in #192) + if body_list_of_parts.is_empty() { + return Err(Error::EntityTooSmall); + } + if !body_list_of_parts + .iter() + .zip(body_list_of_parts.iter().skip(1)) + .all(|(p1, p2)| p1.part_number < p2.part_number) + { + return Err(Error::InvalidPartOrder); + } + + // Garage-specific restriction, see #204: part numbers must be + // consecutive starting at 1 + if body_list_of_parts[0].part_number != 1 + || !body_list_of_parts + .iter() + .zip(body_list_of_parts.iter().skip(1)) + .all(|(p1, p2)| p1.part_number + 1 == p2.part_number) + { + return Err(Error::NotImplemented("Garage does not support completing a Multipart upload with non-consecutive part numbers. This is a restriction of Garage's data model, which might be fixed in a future release. See issue #204 for more information on this topic.".into())); + } + + // Check that the list of parts they gave us corresponds to the parts we have here + debug!("Expected parts from request: {:?}", body_list_of_parts); + debug!("Parts stored in version: {:?}", version.parts_etags.items()); + let parts = version + .parts_etags + .items() + .iter() + .map(|pair| (&pair.0, &pair.1)); + let same_parts = body_list_of_parts + .iter() + .map(|x| (&x.part_number, &x.etag)) + .eq(parts); + if !same_parts { + return Err(Error::InvalidPart); + } + + // Check that all blocks belong to one of the parts + let block_parts = version + .blocks + .items() + .iter() + .map(|(bk, _)| bk.part_number) + .collect::>(); + let same_parts = body_list_of_parts + .iter() + .map(|x| x.part_number) + .eq(block_parts.into_iter()); + if !same_parts { + return Err(Error::BadRequest( + "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again.".into(), + )); + } + + // Calculate etag of final object + // To understand how etags are calculated, read more here: + // https://teppen.io/2018/06/23/aws_s3_etags/ + let num_parts = body_list_of_parts.len(); + let mut etag_md5_hasher = Md5::new(); + for (_, etag) in version.parts_etags.items().iter() { + etag_md5_hasher.update(etag.as_bytes()); + } + let etag = format!("{}-{}", hex::encode(etag_md5_hasher.finalize()), num_parts); + + // Calculate total size of final object + let total_size = version.blocks.items().iter().map(|x| x.1.size).sum(); + + // Write final object version + object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( + ObjectVersionMeta { + headers, + size: total_size, + etag: etag.clone(), + }, + version.blocks.items()[0].1.hash, + )); + + let final_object = Object::new(bucket_id, key.clone(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + // Send response saying ok we're done + let result = s3_xml::CompleteMultipartUploadResult { + xmlns: (), + location: None, + bucket: s3_xml::Value(bucket_name.to_string()), + key: s3_xml::Value(key), + etag: s3_xml::Value(format!("\"{}\"", etag)), + }; + let xml = s3_xml::to_xml_with_header(&result)?; + + Ok(Response::new(Body::from(xml.into_bytes()))) +} + +pub async fn handle_abort_multipart_upload( + garage: Arc, + bucket_id: Uuid, + key: &str, + upload_id: &str, +) -> Result, Error> { + let version_uuid = decode_upload_id(upload_id)?; + + let object = garage + .object_table + .get(&bucket_id, &key.to_string()) + .await?; + let object = object.ok_or(Error::NoSuchKey)?; + + let object_version = object + .versions() + .iter() + .find(|v| v.uuid == version_uuid && v.is_uploading()); + let mut object_version = match object_version { + None => return Err(Error::NoSuchUpload), + Some(x) => x.clone(), + }; + + object_version.state = ObjectVersionState::Aborted; + let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + Ok(Response::new(Body::from(vec![]))) +} + +fn get_mime_type(headers: &HeaderMap) -> Result { + Ok(headers + .get(hyper::header::CONTENT_TYPE) + .map(|x| x.to_str()) + .unwrap_or(Ok("blob"))? + .to_string()) +} + +pub(crate) fn get_headers(headers: &HeaderMap) -> Result { + let content_type = get_mime_type(headers)?; + let mut other = BTreeMap::new(); + + // Preserve standard headers + let standard_header = vec![ + hyper::header::CACHE_CONTROL, + hyper::header::CONTENT_DISPOSITION, + hyper::header::CONTENT_ENCODING, + hyper::header::CONTENT_LANGUAGE, + hyper::header::EXPIRES, + ]; + for h in standard_header.iter() { + if let Some(v) = headers.get(h) { + match v.to_str() { + Ok(v_str) => { + other.insert(h.to_string(), v_str.to_string()); + } + Err(e) => { + warn!("Discarding header {}, error in .to_str(): {}", h, e); + } + } + } + } + + // Preserve x-amz-meta- headers + for (k, v) in headers.iter() { + if k.as_str().starts_with("x-amz-meta-") { + match v.to_str() { + Ok(v_str) => { + other.insert(k.to_string(), v_str.to_string()); + } + Err(e) => { + warn!("Discarding header {}, error in .to_str(): {}", k, e); + } + } + } + } + + Ok(ObjectVersionHeaders { + content_type, + other, + }) +} + +pub fn decode_upload_id(id: &str) -> Result { + let id_bin = hex::decode(id).map_err(|_| Error::NoSuchUpload)?; + if id_bin.len() != 32 { + return Err(Error::NoSuchUpload); + } + let mut uuid = [0u8; 32]; + uuid.copy_from_slice(&id_bin[..]); + Ok(Uuid::from(uuid)) +} + +#[derive(Debug)] +struct CompleteMultipartUploadPart { + etag: String, + part_number: u64, +} + +fn parse_complete_multipart_upload_body( + xml: &roxmltree::Document, +) -> Option> { + let mut parts = vec![]; + + let root = xml.root(); + let cmu = root.first_child()?; + if !cmu.has_tag_name("CompleteMultipartUpload") { + return None; + } + + for item in cmu.children() { + // Only parse nodes + if !item.is_element() { + continue; + } + + if item.has_tag_name("Part") { + let etag = item.children().find(|e| e.has_tag_name("ETag"))?.text()?; + let part_number = item + .children() + .find(|e| e.has_tag_name("PartNumber"))? + .text()?; + parts.push(CompleteMultipartUploadPart { + etag: etag.trim_matches('"').to_string(), + part_number: part_number.parse().ok()?, + }); + } else { + return None; + } + } + + Some(parts) +} -- cgit v1.2.3 From 382e74c798263d042b1c6ca3788c866a8c69c4f4 Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 24 May 2022 12:16:39 +0200 Subject: First version of admin API (#298) **Spec:** - [x] Start writing - [x] Specify all layout endpoints - [x] Specify all endpoints for operations on keys - [x] Specify all endpoints for operations on key/bucket permissions - [x] Specify all endpoints for operations on buckets - [x] Specify all endpoints for operations on bucket aliases View rendered spec at **Code:** - [x] Refactor code for admin api to use common api code that was created for K2V **General endpoints:** - [x] Metrics - [x] GetClusterStatus - [x] ConnectClusterNodes - [x] GetClusterLayout - [x] UpdateClusterLayout - [x] ApplyClusterLayout - [x] RevertClusterLayout **Key-related endpoints:** - [x] ListKeys - [x] CreateKey - [x] ImportKey - [x] GetKeyInfo - [x] UpdateKey - [x] DeleteKey **Bucket-related endpoints:** - [x] ListBuckets - [x] CreateBucket - [x] GetBucketInfo - [x] DeleteBucket - [x] PutBucketWebsite - [x] DeleteBucketWebsite **Operations on key/bucket permissions:** - [x] BucketAllowKey - [x] BucketDenyKey **Operations on bucket aliases:** - [x] GlobalAliasBucket - [x] GlobalUnaliasBucket - [x] LocalAliasBucket - [x] LocalUnaliasBucket **And also:** - [x] Separate error type for the admin API (this PR includes a quite big refactoring of error handling) - [x] Add management of website access - [ ] Check that nothing is missing wrt what can be done using the CLI - [ ] Improve formatting of the spec - [x] Make sure everyone is cool with the API design Fix #231 Fix #295 Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/298 Co-authored-by: Alex Co-committed-by: Alex --- src/api/s3/put.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index 89aa8d84..8b06ef3f 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -19,7 +19,7 @@ use garage_model::s3::block_ref_table::*; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; -use crate::error::*; +use crate::s3::error::*; use crate::s3::xml as s3_xml; use crate::signature::verify_signed_content; @@ -183,8 +183,8 @@ fn ensure_checksum_matches( ) -> Result<(), Error> { if let Some(expected_sha256) = content_sha256 { if expected_sha256 != data_sha256sum { - return Err(Error::BadRequest( - "Unable to validate x-amz-content-sha256".to_string(), + return Err(Error::bad_request( + "Unable to validate x-amz-content-sha256", )); } else { trace!("Successfully validated x-amz-content-sha256"); @@ -192,9 +192,7 @@ fn ensure_checksum_matches( } if let Some(expected_md5) = content_md5 { if expected_md5.trim_matches('"') != base64::encode(data_md5sum) { - return Err(Error::BadRequest( - "Unable to validate content-md5".to_string(), - )); + return Err(Error::bad_request("Unable to validate content-md5")); } else { trace!("Successfully validated content-md5"); } @@ -428,7 +426,7 @@ pub async fn handle_put_part( // Check part hasn't already been uploaded if let Some(v) = version { if v.has_part_number(part_number) { - return Err(Error::BadRequest(format!( + return Err(Error::bad_request(format!( "Part number {} has already been uploaded", part_number ))); @@ -513,7 +511,7 @@ pub async fn handle_complete_multipart_upload( let version = version.ok_or(Error::NoSuchKey)?; if version.blocks.is_empty() { - return Err(Error::BadRequest("No data was uploaded".to_string())); + return Err(Error::bad_request("No data was uploaded")); } let headers = match object_version.state { @@ -574,8 +572,8 @@ pub async fn handle_complete_multipart_upload( .map(|x| x.part_number) .eq(block_parts.into_iter()); if !same_parts { - return Err(Error::BadRequest( - "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again.".into(), + return Err(Error::bad_request( + "Part numbers in block list and part list do not match. This can happen if a part was partially uploaded. Please abort the multipart upload and try again." )); } -- cgit v1.2.3 From 77e3fd6db2c9cd3a10889bd071e95ef839cfbefc Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 Jun 2022 20:20:28 +0200 Subject: improve internal item counter mechanisms and implement bucket quotas (#326) - [x] Refactoring of internal counting API - [x] Repair procedure for counters (it's an offline procedure!!!) - [x] New counter for objects in buckets - [x] Add quotas to buckets struct - [x] Add CLI to manage bucket quotas - [x] Add admin API to manage bucket quotas - [x] Apply quotas by adding checks on put operations - [x] Proof-read Co-authored-by: Alex Auvolat Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/326 Co-authored-by: Alex Co-committed-by: Alex --- src/api/s3/put.rs | 129 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 103 insertions(+), 26 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index 8b06ef3f..9ef37421 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; use std::sync::Arc; use futures::prelude::*; @@ -14,7 +14,9 @@ use garage_util::error::Error as GarageError; use garage_util::time::*; use garage_block::manager::INLINE_THRESHOLD; +use garage_model::bucket_table::Bucket; use garage_model::garage::Garage; +use garage_model::index_counter::CountedItem; use garage_model::s3::block_ref_table::*; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; @@ -26,7 +28,7 @@ use crate::signature::verify_signed_content; pub async fn handle_put( garage: Arc, req: Request, - bucket_id: Uuid, + bucket: &Bucket, key: &str, content_sha256: Option, ) -> Result, Error> { @@ -46,7 +48,7 @@ pub async fn handle_put( garage, headers, body, - bucket_id, + bucket, key, content_md5, content_sha256, @@ -59,7 +61,7 @@ pub(crate) async fn save_stream> + Unpin>( garage: Arc, headers: ObjectVersionHeaders, body: S, - bucket_id: Uuid, + bucket: &Bucket, key: &str, content_md5: Option, content_sha256: Option, @@ -80,6 +82,7 @@ pub(crate) async fn save_stream> + Unpin>( let data_md5sum_hex = hex::encode(data_md5sum); let data_sha256sum = sha256sum(&first_block[..]); + let size = first_block.len() as u64; ensure_checksum_matches( data_md5sum.as_slice(), @@ -88,20 +91,22 @@ pub(crate) async fn save_stream> + Unpin>( content_sha256, )?; + check_quotas(&garage, bucket, key, size).await?; + let object_version = ObjectVersion { uuid: version_uuid, timestamp: version_timestamp, state: ObjectVersionState::Complete(ObjectVersionData::Inline( ObjectVersionMeta { headers, - size: first_block.len() as u64, + size, etag: data_md5sum_hex.clone(), }, first_block, )), }; - let object = Object::new(bucket_id, key.into(), vec![object_version]); + let object = Object::new(bucket.id, key.into(), vec![object_version]); garage.object_table.insert(&object).await?; return Ok((version_uuid, data_md5sum_hex)); @@ -114,36 +119,42 @@ pub(crate) async fn save_stream> + Unpin>( timestamp: version_timestamp, state: ObjectVersionState::Uploading(headers.clone()), }; - let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); garage.object_table.insert(&object).await?; // Initialize corresponding entry in version table // Write this entry now, even with empty block list, // to prevent block_ref entries from being deleted (they can be deleted // if the reference a version that isn't found in the version table) - let version = Version::new(version_uuid, bucket_id, key.into(), false); + let version = Version::new(version_uuid, bucket.id, key.into(), false); garage.version_table.insert(&version).await?; // Transfer data and verify checksum let first_block_hash = blake2sum(&first_block[..]); - let tx_result = read_and_put_blocks( - &garage, - &version, - 1, - first_block, - first_block_hash, - &mut chunker, - ) - .await - .and_then(|(total_size, data_md5sum, data_sha256sum)| { + + let tx_result = (|| async { + let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks( + &garage, + &version, + 1, + first_block, + first_block_hash, + &mut chunker, + ) + .await?; + ensure_checksum_matches( data_md5sum.as_slice(), data_sha256sum, content_md5.as_deref(), content_sha256, - ) - .map(|()| (total_size, data_md5sum)) - }); + )?; + + check_quotas(&garage, bucket, key, total_size).await?; + + Ok((total_size, data_md5sum)) + })() + .await; // If something went wrong, clean up let (total_size, md5sum_arr) = match tx_result { @@ -151,7 +162,7 @@ pub(crate) async fn save_stream> + Unpin>( Err(e) => { // Mark object as aborted, this will free the blocks further down object_version.state = ObjectVersionState::Aborted; - let object = Object::new(bucket_id, key.into(), vec![object_version.clone()]); + let object = Object::new(bucket.id, key.into(), vec![object_version.clone()]); garage.object_table.insert(&object).await?; return Err(e); } @@ -167,7 +178,7 @@ pub(crate) async fn save_stream> + Unpin>( }, first_block_hash, )); - let object = Object::new(bucket_id, key.into(), vec![object_version]); + let object = Object::new(bucket.id, key.into(), vec![object_version]); garage.object_table.insert(&object).await?; Ok((version_uuid, md5sum_hex)) @@ -200,6 +211,64 @@ fn ensure_checksum_matches( Ok(()) } +/// Check that inserting this object with this size doesn't exceed bucket quotas +async fn check_quotas( + garage: &Arc, + bucket: &Bucket, + key: &str, + size: u64, +) -> Result<(), Error> { + let quotas = bucket.state.as_option().unwrap().quotas.get(); + if quotas.max_objects.is_none() && quotas.max_size.is_none() { + return Ok(()); + }; + + let key = key.to_string(); + let (prev_object, counters) = futures::try_join!( + garage.object_table.get(&bucket.id, &key), + garage.object_counter_table.table.get(&bucket.id, &EmptyKey), + )?; + + let counters = counters + .map(|x| x.filtered_values(&garage.system.ring.borrow())) + .unwrap_or_default(); + + let (prev_cnt_obj, prev_cnt_size) = match prev_object { + Some(o) => { + let prev_cnt = o.counts().into_iter().collect::>(); + ( + prev_cnt.get(OBJECTS).cloned().unwrap_or_default(), + prev_cnt.get(BYTES).cloned().unwrap_or_default(), + ) + } + None => (0, 0), + }; + let cnt_obj_diff = 1 - prev_cnt_obj; + let cnt_size_diff = size as i64 - prev_cnt_size; + + if let Some(mo) = quotas.max_objects { + let current_objects = counters.get(OBJECTS).cloned().unwrap_or_default(); + if cnt_obj_diff > 0 && current_objects + cnt_obj_diff > mo as i64 { + return Err(Error::forbidden(format!( + "Object quota is reached, maximum objects for this bucket: {}", + mo + ))); + } + } + + if let Some(ms) = quotas.max_size { + let current_size = counters.get(BYTES).cloned().unwrap_or_default(); + if cnt_size_diff > 0 && current_size + cnt_size_diff > ms as i64 { + return Err(Error::forbidden(format!( + "Bucket size quota is reached, maximum total size of objects for this bucket: {}. The bucket is already {} bytes, and this object would add {} bytes.", + ms, current_size, size + ))); + } + } + + Ok(()) +} + async fn read_and_put_blocks> + Unpin>( garage: &Garage, version: &Version, @@ -473,7 +542,7 @@ pub async fn handle_complete_multipart_upload( garage: Arc, req: Request, bucket_name: &str, - bucket_id: Uuid, + bucket: &Bucket, key: &str, upload_id: &str, content_sha256: Option, @@ -497,7 +566,7 @@ pub async fn handle_complete_multipart_upload( // Get object and version let key = key.to_string(); let (object, version) = futures::try_join!( - garage.object_table.get(&bucket_id, &key), + garage.object_table.get(&bucket.id, &key), garage.version_table.get(&version_uuid, &EmptyKey), )?; @@ -590,6 +659,14 @@ pub async fn handle_complete_multipart_upload( // Calculate total size of final object let total_size = version.blocks.items().iter().map(|x| x.1.size).sum(); + if let Err(e) = check_quotas(&garage, bucket, &key, total_size).await { + object_version.state = ObjectVersionState::Aborted; + let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); + garage.object_table.insert(&final_object).await?; + + return Err(e); + } + // Write final object version object_version.state = ObjectVersionState::Complete(ObjectVersionData::FirstBlock( ObjectVersionMeta { @@ -600,7 +677,7 @@ pub async fn handle_complete_multipart_upload( version.blocks.items()[0].1.hash, )); - let final_object = Object::new(bucket_id, key.clone(), vec![object_version]); + let final_object = Object::new(bucket.id, key.clone(), vec![object_version]); garage.object_table.insert(&final_object).await?; // Send response saying ok we're done -- cgit v1.2.3 From 1b2e1296eb99630e969e585ede0424072adc2d0c Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 18 Jul 2022 17:18:47 +0200 Subject: Compute hashes on dedicated threads --- src/api/s3/put.rs | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index 9ef37421..fbfa6f0d 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -9,6 +9,7 @@ use md5::{digest::generic_array::*, Digest as Md5Digest, Md5}; use sha2::Sha256; use garage_table::*; +use garage_util::async_hash::*; use garage_util::data::*; use garage_util::error::Error as GarageError; use garage_util::time::*; @@ -130,7 +131,8 @@ pub(crate) async fn save_stream> + Unpin>( garage.version_table.insert(&version).await?; // Transfer data and verify checksum - let first_block_hash = blake2sum(&first_block[..]); + let first_block = Bytes::from(first_block); + let first_block_hash = async_blake2sum(first_block.clone()).await; let tx_result = (|| async { let (total_size, data_md5sum, data_sha256sum) = read_and_put_blocks( @@ -273,14 +275,16 @@ async fn read_and_put_blocks> + Unpin>( garage: &Garage, version: &Version, part_number: u64, - first_block: Vec, + first_block: Bytes, first_block_hash: Hash, chunker: &mut StreamChunker, ) -> Result<(u64, GenericArray, Hash), Error> { - let mut md5hasher = Md5::new(); - let mut sha256hasher = Sha256::new(); - md5hasher.update(&first_block[..]); - sha256hasher.update(&first_block[..]); + let first_block = Bytes::from(first_block); + + let md5hasher = AsyncHasher::::new(); + let sha256hasher = AsyncHasher::::new(); + md5hasher.update(first_block.clone()); + sha256hasher.update(first_block.clone()); let mut next_offset = first_block.len(); let mut put_curr_version_block = put_block_meta( @@ -302,9 +306,10 @@ async fn read_and_put_blocks> + Unpin>( chunker.next(), )?; if let Some(block) = next_block { - md5hasher.update(&block[..]); - sha256hasher.update(&block[..]); - let block_hash = blake2sum(&block[..]); + let block = Bytes::from(block); + md5hasher.update(block.clone()); + sha256hasher.update(block.clone()); + let block_hash = async_blake2sum(block.clone()).await; let block_len = block.len(); put_curr_version_block = put_block_meta( garage, @@ -322,9 +327,9 @@ async fn read_and_put_blocks> + Unpin>( } let total_size = next_offset as u64; - let data_md5sum = md5hasher.finalize(); + let data_md5sum = md5hasher.finalize().await; - let data_sha256sum = sha256hasher.finalize(); + let data_sha256sum = sha256hasher.finalize().await; let data_sha256sum = Hash::try_from(&data_sha256sum[..]).unwrap(); Ok((total_size, data_md5sum, data_sha256sum)) @@ -504,7 +509,10 @@ pub async fn handle_put_part( // Copy block to store let version = Version::new(version_uuid, bucket_id, key, false); - let first_block_hash = blake2sum(&first_block[..]); + + let first_block = Bytes::from(first_block); + let first_block_hash = async_blake2sum(first_block.clone()).await; + let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( &garage, &version, -- cgit v1.2.3 From 2f111e6b3d772b10c8ed6279ce0c82d22852afd1 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 18 Jul 2022 18:40:57 +0200 Subject: Performance improvements: - reduce contention on mutation_lock by having 256 of them - better lmdb defaults --- src/api/s3/put.rs | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index fbfa6f0d..a182f04d 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -8,6 +8,11 @@ use hyper::{Request, Response}; use md5::{digest::generic_array::*, Digest as Md5Digest, Md5}; use sha2::Sha256; +use opentelemetry::{ + trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer}, + Context, +}; + use garage_table::*; use garage_util::async_hash::*; use garage_util::data::*; @@ -279,12 +284,21 @@ async fn read_and_put_blocks> + Unpin>( first_block_hash: Hash, chunker: &mut StreamChunker, ) -> Result<(u64, GenericArray, Hash), Error> { + let tracer = opentelemetry::global::tracer("garage"); + let first_block = Bytes::from(first_block); let md5hasher = AsyncHasher::::new(); let sha256hasher = AsyncHasher::::new(); - md5hasher.update(first_block.clone()); - sha256hasher.update(first_block.clone()); + + futures::future::join( + md5hasher.update(first_block.clone()), + sha256hasher.update(first_block.clone()), + ) + .with_context(Context::current_with_span( + tracer.start("Hash first block (md5, sha256)"), + )) + .await; let mut next_offset = first_block.len(); let mut put_curr_version_block = put_block_meta( @@ -307,9 +321,15 @@ async fn read_and_put_blocks> + Unpin>( )?; if let Some(block) = next_block { let block = Bytes::from(block); - md5hasher.update(block.clone()); - sha256hasher.update(block.clone()); - let block_hash = async_blake2sum(block.clone()).await; + let (_, _, block_hash) = futures::future::join3( + md5hasher.update(block.clone()), + sha256hasher.update(block.clone()), + async_blake2sum(block.clone()), + ) + .with_context(Context::current_with_span( + tracer.start("Hash block (md5, sha256, blake2)"), + )) + .await; let block_len = block.len(); put_curr_version_block = put_block_meta( garage, -- cgit v1.2.3 From 2cad656a0332b19481ce779f5026b07c6ed8198f Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 22 Jul 2022 18:40:06 +0200 Subject: More make clippy happy --- src/api/s3/put.rs | 2 -- 1 file changed, 2 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index a182f04d..2c51909f 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -286,8 +286,6 @@ async fn read_and_put_blocks> + Unpin>( ) -> Result<(u64, GenericArray, Hash), Error> { let tracer = opentelemetry::global::tracer("garage"); - let first_block = Bytes::from(first_block); - let md5hasher = AsyncHasher::::new(); let sha256hasher = AsyncHasher::::new(); -- cgit v1.2.3 From ad35b18bb146fcbf5e817c10837c6e835b1af5b7 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 25 Jul 2022 11:59:55 +0200 Subject: Faster chunker --- src/api/s3/put.rs | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index 2c51909f..e6698bfa 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -387,7 +387,8 @@ struct StreamChunker>> { stream: S, read_all: bool, block_size: usize, - buf: VecDeque, + buf: VecDeque, + buf_len: usize, } impl> + Unpin> StreamChunker { @@ -396,29 +397,50 @@ impl> + Unpin> StreamChunker { stream, read_all: false, block_size, - buf: VecDeque::with_capacity(2 * block_size), + buf: VecDeque::with_capacity(8), + buf_len: 0, } } async fn next(&mut self) -> Result>, Error> { - while !self.read_all && self.buf.len() < self.block_size { + while !self.read_all && self.buf_len < self.block_size { if let Some(block) = self.stream.next().await { let bytes = block?; trace!("Body next: {} bytes", bytes.len()); - self.buf.extend(bytes); + self.buf_len += bytes.len(); + self.buf.push_back(bytes); } else { self.read_all = true; } } - if self.buf.is_empty() { + if self.buf_len == 0 { Ok(None) - } else if self.buf.len() <= self.block_size { - let block = self.buf.drain(..).collect::>(); - Ok(Some(block)) } else { - let block = self.buf.drain(..self.block_size).collect::>(); - Ok(Some(block)) + let mut slices = Vec::with_capacity(self.buf.len()); + let mut taken = 0; + while self.buf_len > 0 && taken < self.block_size { + let front = self.buf.pop_front().unwrap(); + if taken + front.len() <= self.block_size { + taken += front.len(); + self.buf_len -= front.len(); + slices.push(front); + } else { + let front_take = self.block_size - taken; + slices.push(front.slice(..front_take)); + self.buf.push_front(front.slice(front_take..)); + self.buf_len -= front_take; + break; + } + } + Ok(Some( + slices + .iter() + .map(|x| &x[..]) + .collect::>() + .concat() + .into(), + )) } } } -- cgit v1.2.3 From 16f6a1a65d4b973ea13cd00bbfdd7e225041e447 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 25 Jul 2022 12:06:06 +0200 Subject: fix clippy --- src/api/s3/put.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index e6698bfa..dc0530df 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -434,12 +434,7 @@ impl> + Unpin> StreamChunker { } } Ok(Some( - slices - .iter() - .map(|x| &x[..]) - .collect::>() - .concat() - .into(), + slices.iter().map(|x| &x[..]).collect::>().concat(), )) } } -- cgit v1.2.3 From 13b5f28c7e8dec12b1db61735931b3830a3c893f Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Fri, 2 Sep 2022 13:46:42 +0200 Subject: Make use of BytesBuf from new Netapp --- src/api/s3/put.rs | 43 ++++++++++--------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) (limited to 'src/api/s3/put.rs') diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs index dc0530df..97b8e4e3 100644 --- a/src/api/s3/put.rs +++ b/src/api/s3/put.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::sync::Arc; use futures::prelude::*; @@ -13,6 +13,7 @@ use opentelemetry::{ Context, }; +use garage_rpc::netapp::bytes_buf::BytesBuf; use garage_table::*; use garage_util::async_hash::*; use garage_util::data::*; @@ -108,7 +109,7 @@ pub(crate) async fn save_stream> + Unpin>( size, etag: data_md5sum_hex.clone(), }, - first_block, + first_block.to_vec(), )), }; @@ -136,7 +137,6 @@ pub(crate) async fn save_stream> + Unpin>( garage.version_table.insert(&version).await?; // Transfer data and verify checksum - let first_block = Bytes::from(first_block); let first_block_hash = async_blake2sum(first_block.clone()).await; let tx_result = (|| async { @@ -318,7 +318,6 @@ async fn read_and_put_blocks> + Unpin>( chunker.next(), )?; if let Some(block) = next_block { - let block = Bytes::from(block); let (_, _, block_hash) = futures::future::join3( md5hasher.update(block.clone()), sha256hasher.update(block.clone()), @@ -387,8 +386,7 @@ struct StreamChunker>> { stream: S, read_all: bool, block_size: usize, - buf: VecDeque, - buf_len: usize, + buf: BytesBuf, } impl> + Unpin> StreamChunker { @@ -397,45 +395,25 @@ impl> + Unpin> StreamChunker { stream, read_all: false, block_size, - buf: VecDeque::with_capacity(8), - buf_len: 0, + buf: BytesBuf::new(), } } - async fn next(&mut self) -> Result>, Error> { - while !self.read_all && self.buf_len < self.block_size { + async fn next(&mut self) -> Result, Error> { + while !self.read_all && self.buf.len() < self.block_size { if let Some(block) = self.stream.next().await { let bytes = block?; trace!("Body next: {} bytes", bytes.len()); - self.buf_len += bytes.len(); - self.buf.push_back(bytes); + self.buf.extend(bytes); } else { self.read_all = true; } } - if self.buf_len == 0 { + if self.buf.is_empty() { Ok(None) } else { - let mut slices = Vec::with_capacity(self.buf.len()); - let mut taken = 0; - while self.buf_len > 0 && taken < self.block_size { - let front = self.buf.pop_front().unwrap(); - if taken + front.len() <= self.block_size { - taken += front.len(); - self.buf_len -= front.len(); - slices.push(front); - } else { - let front_take = self.block_size - taken; - slices.push(front.slice(..front_take)); - self.buf.push_front(front.slice(front_take..)); - self.buf_len -= front_take; - break; - } - } - Ok(Some( - slices.iter().map(|x| &x[..]).collect::>().concat(), - )) + Ok(Some(self.buf.take_max(self.block_size))) } } } @@ -545,7 +523,6 @@ pub async fn handle_put_part( // Copy block to store let version = Version::new(version_uuid, bucket_id, key, false); - let first_block = Bytes::from(first_block); let first_block_hash = async_blake2sum(first_block.clone()).await; let (_, data_md5sum, data_sha256sum) = read_and_put_blocks( -- cgit v1.2.3