From a4f9f19ac34f36b3e3531dfeeabf2723d51fdf97 Mon Sep 17 00:00:00 2001 From: withinboredom Date: Tue, 19 Apr 2022 12:49:43 +0200 Subject: remove size limitation in UploadPartCopy (#280) This removes the >1mb s3_copy restriction. This restriction doesn't seem to be documented anywhere (I could be wrong). It also causes some software to fail (such as #248). Co-authored-by: Rob Landers Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/280 Co-authored-by: withinboredom Co-committed-by: withinboredom --- src/api/s3_copy.rs | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'src/api/s3_copy.rs') diff --git a/src/api/s3_copy.rs b/src/api/s3_copy.rs index 2d050ff6..f92dfcf1 100644 --- a/src/api/s3_copy.rs +++ b/src/api/s3_copy.rs @@ -268,7 +268,6 @@ pub async fn handle_upload_part_copy( let mut blocks_to_copy = vec![]; let mut current_offset = 0; - let mut size_to_copy = 0; for (_bk, block) in source_version.blocks.items().iter() { let (block_begin, block_end) = (current_offset, current_offset + block.size); @@ -289,10 +288,6 @@ pub async fn handle_upload_part_copy( (Some(b), None) => Some(b as usize..block.size as usize), (None, None) => None, }; - size_to_copy += range_to_copy - .as_ref() - .map(|x| x.len() as u64) - .unwrap_or(block.size); blocks_to_copy.push((block.hash, range_to_copy)); } @@ -300,13 +295,6 @@ pub async fn handle_upload_part_copy( current_offset = block_end; } - if size_to_copy < 1024 * 1024 { - return Err(Error::BadRequest(format!( - "Not enough data to copy: {} bytes (minimum: 1MB)", - size_to_copy - ))); - } - // Now, actually copy the blocks let mut md5hasher = Md5::new(); -- cgit v1.2.3