aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Davies <jpds@protonmail.com>2023-05-09 20:49:34 +0100
committerJonathan Davies <jpds@protonmail.com>2023-05-09 20:49:34 +0100
commitc783194e8b8c3263fad579a85ea07d62e63b16be (patch)
tree362189dcfde317ef4dd0613ab621684469c17168
parentb925f53dc3b0bae77aa3f73e581faace2eb3b21a (diff)
downloadgarage-c783194e8b8c3263fad579a85ea07d62e63b16be.tar.gz
garage-c783194e8b8c3263fad579a85ea07d62e63b16be.zip
*: apply clippy recommendations.
-rw-r--r--src/api/admin/api_server.rs2
-rw-r--r--src/api/admin/bucket.rs4
-rw-r--r--src/api/admin/key.rs4
-rw-r--r--src/api/generic_server.rs2
-rw-r--r--src/api/k2v/batch.rs4
-rw-r--r--src/api/s3/get.rs4
-rw-r--r--src/block/repair.rs14
-rw-r--r--src/model/k2v/seen.rs2
-rw-r--r--src/table/data.rs16
-rw-r--r--src/table/util.rs9
-rw-r--r--src/util/config.rs2
-rw-r--r--src/util/forwarded_headers.rs2
-rw-r--r--src/web/web_server.rs2
13 files changed, 28 insertions, 39 deletions
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index 58dd38d8..b0dfdfb7 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -105,7 +105,7 @@ impl AdminApiServer {
let bucket_id = self
.garage
.bucket_helper()
- .resolve_global_bucket_name(&domain)
+ .resolve_global_bucket_name(domain)
.await?
.ok_or(HelperError::NoSuchBucket(domain.to_string()))?;
diff --git a/src/api/admin/bucket.rs b/src/api/admin/bucket.rs
index e60f07ca..f0a4a9e7 100644
--- a/src/api/admin/bucket.rs
+++ b/src/api/admin/bucket.rs
@@ -183,8 +183,8 @@ async fn bucket_info_results(
}
}),
keys: relevant_keys
- .into_iter()
- .map(|(_, key)| {
+ .into_values()
+ .map(|key| {
let p = key.state.as_option().unwrap();
GetBucketInfoKey {
access_key_id: key.key_id,
diff --git a/src/api/admin/key.rs b/src/api/admin/key.rs
index 2bbabb7b..d74ca361 100644
--- a/src/api/admin/key.rs
+++ b/src/api/admin/key.rs
@@ -183,8 +183,8 @@ async fn key_info_results(garage: &Arc<Garage>, key: Key) -> Result<Response<Bod
create_bucket: *key_state.allow_create_bucket.get(),
},
buckets: relevant_buckets
- .into_iter()
- .map(|(_, bucket)| {
+ .into_values()
+ .map(|bucket| {
let state = bucket.state.as_option().unwrap();
KeyInfoBucketResult {
id: hex::encode(bucket.id),
diff --git a/src/api/generic_server.rs b/src/api/generic_server.rs
index d0354d28..757b85ec 100644
--- a/src/api/generic_server.rs
+++ b/src/api/generic_server.rs
@@ -128,7 +128,7 @@ impl<A: ApiHandler> ApiServer<A> {
let uri = req.uri().clone();
if let Ok(forwarded_for_ip_addr) =
- forwarded_headers::handle_forwarded_for_headers(&req.headers())
+ forwarded_headers::handle_forwarded_for_headers(req.headers())
{
info!(
"{} (via {}) {} {}",
diff --git a/src/api/k2v/batch.rs b/src/api/k2v/batch.rs
index 26d678da..294380ea 100644
--- a/src/api/k2v/batch.rs
+++ b/src/api/k2v/batch.rs
@@ -282,8 +282,8 @@ pub(crate) async fn handle_poll_range(
if let Some((items, seen_marker)) = resp {
let resp = PollRangeResponse {
items: items
- .into_iter()
- .map(|(_k, i)| ReadBatchResponseItem::from(i))
+ .into_values()
+ .map(ReadBatchResponseItem::from)
.collect::<Vec<_>>(),
seen_marker,
};
diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs
index 2a99551a..cde7b461 100644
--- a/src/api/s3/get.rs
+++ b/src/api/s3/get.rs
@@ -443,7 +443,7 @@ fn body_from_blocks_range(
// block.part_number, which is not the same in the case of a multipart upload)
let mut blocks: Vec<(VersionBlock, u64)> = Vec::with_capacity(std::cmp::min(
all_blocks.len(),
- 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size as u64, 1024)) as usize,
+ 4 + ((end - begin) / std::cmp::max(all_blocks[0].1.size, 1024)) as usize,
));
let mut block_offset: u64 = 0;
for (_, b) in all_blocks.iter() {
@@ -454,7 +454,7 @@ fn body_from_blocks_range(
if block_offset < end && block_offset + b.size > begin {
blocks.push((*b, block_offset));
}
- block_offset += b.size as u64;
+ block_offset += b.size;
}
let order_stream = OrderTag::stream();
diff --git a/src/block/repair.rs b/src/block/repair.rs
index c89484d9..71093d69 100644
--- a/src/block/repair.rs
+++ b/src/block/repair.rs
@@ -220,14 +220,12 @@ fn randomize_next_scrub_run_time(timestamp: u64) -> u64 {
// Take SCRUB_INTERVAL and mix in a random interval of 10 days to attempt to
// balance scrub load across different cluster nodes.
- let next_run_timestamp = timestamp
+ timestamp
+ SCRUB_INTERVAL
.saturating_add(Duration::from_secs(
rand::thread_rng().gen_range(0..3600 * 24 * 10),
))
- .as_millis() as u64;
-
- next_run_timestamp
+ .as_millis() as u64
}
impl Default for ScrubWorkerPersisted {
@@ -241,18 +239,14 @@ impl Default for ScrubWorkerPersisted {
}
}
+#[derive(Default)]
enum ScrubWorkerState {
Running(BlockStoreIterator),
Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub
+ #[default]
Finished,
}
-impl Default for ScrubWorkerState {
- fn default() -> Self {
- ScrubWorkerState::Finished
- }
-}
-
#[derive(Debug)]
pub enum ScrubWorkerCommand {
Start,
diff --git a/src/model/k2v/seen.rs b/src/model/k2v/seen.rs
index 8fe3a582..59d4ca5b 100644
--- a/src/model/k2v/seen.rs
+++ b/src/model/k2v/seen.rs
@@ -79,7 +79,7 @@ impl RangeSeenMarker {
let bytes = nonversioned_encode(&self)?;
let bytes = zstd::stream::encode_all(&mut &bytes[..], zstd::DEFAULT_COMPRESSION_LEVEL)?;
- Ok(BASE64_STANDARD.encode(&bytes))
+ Ok(BASE64_STANDARD.encode(bytes))
}
/// Decode from msgpack+zstd+b64 representation, returns None on error.
diff --git a/src/table/data.rs b/src/table/data.rs
index 26cc3a5a..e76836ca 100644
--- a/src/table/data.rs
+++ b/src/table/data.rs
@@ -44,22 +44,22 @@ pub struct TableData<F: TableSchema, R: TableReplication> {
impl<F: TableSchema, R: TableReplication> TableData<F, R> {
pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> {
let store = db
- .open_tree(&format!("{}:table", F::TABLE_NAME))
+ .open_tree(format!("{}:table", F::TABLE_NAME))
.expect("Unable to open DB tree");
let merkle_tree = db
- .open_tree(&format!("{}:merkle_tree", F::TABLE_NAME))
+ .open_tree(format!("{}:merkle_tree", F::TABLE_NAME))
.expect("Unable to open DB Merkle tree tree");
let merkle_todo = db
- .open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
+ .open_tree(format!("{}:merkle_todo", F::TABLE_NAME))
.expect("Unable to open DB Merkle TODO tree");
let insert_queue = db
- .open_tree(&format!("{}:insert_queue", F::TABLE_NAME))
+ .open_tree(format!("{}:insert_queue", F::TABLE_NAME))
.expect("Unable to open insert queue DB tree");
let gc_todo = db
- .open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
+ .open_tree(format!("{}:gc_todo_v2", F::TABLE_NAME))
.expect("Unable to open GC DB tree");
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
@@ -90,7 +90,7 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
pub fn read_entry(&self, p: &F::P, s: &F::S) -> Result<Option<ByteBuf>, Error> {
let tree_key = self.tree_key(p, s);
- if let Some(bytes) = self.store.get(&tree_key)? {
+ if let Some(bytes) = self.store.get(tree_key)? {
Ok(Some(ByteBuf::from(bytes.to_vec())))
} else {
Ok(None)
@@ -132,10 +132,10 @@ impl<F: TableSchema, R: TableReplication> TableData<F, R> {
}
}
- fn read_range_aux<'a>(
+ fn read_range_aux(
&self,
partition_hash: Hash,
- range: db::ValueIter<'a>,
+ range: db::ValueIter,
filter: &Option<F::Filter>,
limit: usize,
) -> Result<Vec<Arc<ByteBuf>>, Error> {
diff --git a/src/table/util.rs b/src/table/util.rs
index 0b10cf3f..663a7e11 100644
--- a/src/table/util.rs
+++ b/src/table/util.rs
@@ -34,8 +34,9 @@ impl DeletedFilter {
}
}
-#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub enum EnumerationOrder {
+ #[default]
Forward,
Reverse,
}
@@ -49,9 +50,3 @@ impl EnumerationOrder {
}
}
}
-
-impl Default for EnumerationOrder {
- fn default() -> Self {
- EnumerationOrder::Forward
- }
-}
diff --git a/src/util/config.rs b/src/util/config.rs
index 2176353e..95835bbb 100644
--- a/src/util/config.rs
+++ b/src/util/config.rs
@@ -223,7 +223,7 @@ fn secret_from_file(
#[cfg(unix)]
if std::env::var("GARAGE_ALLOW_WORLD_READABLE_SECRETS").as_deref() != Ok("true") {
use std::os::unix::fs::MetadataExt;
- let metadata = std::fs::metadata(&file_path)?;
+ let metadata = std::fs::metadata(file_path)?;
if metadata.mode() & 0o077 != 0 {
return Err(format!("File {} is world-readable! (mode: 0{:o}, expected 0600)\nRefusing to start until this is fixed, or environment variable GARAGE_ALLOW_WORLD_READABLE_SECRETS is set to true.", file_path, metadata.mode()).into());
}
diff --git a/src/util/forwarded_headers.rs b/src/util/forwarded_headers.rs
index 6ae275aa..12f76434 100644
--- a/src/util/forwarded_headers.rs
+++ b/src/util/forwarded_headers.rs
@@ -13,7 +13,7 @@ pub fn handle_forwarded_for_headers(headers: &HeaderMap<HeaderValue>) -> Result<
.to_str()
.ok_or_message("Error parsing X-Forwarded-For header")?;
- let client_ip = IpAddr::from_str(&forwarded_for_ip_str)
+ let client_ip = IpAddr::from_str(forwarded_for_ip_str)
.ok_or_message("Valid IP address not found in X-Forwarded-For header")?;
Ok(client_ip.to_string())
diff --git a/src/web/web_server.rs b/src/web/web_server.rs
index 0c7edf23..de63b842 100644
--- a/src/web/web_server.rs
+++ b/src/web/web_server.rs
@@ -106,7 +106,7 @@ impl WebServer {
addr: SocketAddr,
) -> Result<Response<Body>, Infallible> {
if let Ok(forwarded_for_ip_addr) =
- forwarded_headers::handle_forwarded_for_headers(&req.headers())
+ forwarded_headers::handle_forwarded_for_headers(req.headers())
{
info!(
"{} (via {}) {} {}",