diff options
Diffstat (limited to 'src/table')
-rw-r--r-- | src/table/Cargo.toml | 1 | ||||
-rw-r--r-- | src/table/gc.rs | 8 | ||||
-rw-r--r-- | src/table/replication/parameters.rs | 4 | ||||
-rw-r--r-- | src/table/sync.rs | 5 | ||||
-rw-r--r-- | src/table/table.rs | 10 |
5 files changed, 14 insertions, 14 deletions
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index e704cd3c..fad6ea08 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -22,7 +22,6 @@ opentelemetry.workspace = true async-trait.workspace = true arc-swap.workspace = true -bytes.workspace = true hex.workspace = true hexdump.workspace = true tracing.workspace = true diff --git a/src/table/gc.rs b/src/table/gc.rs index d30a1849..28ea119d 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use std::time::Duration; use async_trait::async_trait; + use serde::{Deserialize, Serialize}; use serde_bytes::ByteBuf; @@ -258,21 +259,20 @@ impl<F: TableSchema, R: TableReplication> TableGc<F, R> { .await .err_context("GC: remote delete tombstones")?; - // GC has been successfull for all of these entries. + // GC has been successful for all of these entries. // We now remove them all from our local table and from the GC todo list. for item in items { self.data .delete_if_equal_hash(&item.key[..], item.value_hash) .err_context("GC: local delete tombstones")?; item.remove_if_equal(&self.data.gc_todo) - .err_context("GC: remove from todo list after successfull GC")?; + .err_context("GC: remove from todo list after successful GC")?; } Ok(()) } } -#[async_trait] impl<F: TableSchema, R: TableReplication> EndpointHandler<GcRpc> for TableGc<F, R> { async fn handle(self: &Arc<Self>, message: &GcRpc, _from: NodeID) -> Result<GcRpc, Error> { match message { @@ -383,7 +383,7 @@ impl GcTodoEntry { /// Removes the GcTodoEntry from the gc_todo tree if the /// hash of the serialized value is the same here as in the tree. - /// This is usefull to remove a todo entry only under the condition + /// This is useful to remove a todo entry only under the condition /// that it has not changed since the time it was read, i.e. /// what we have to do is still the same pub(crate) fn remove_if_equal(&self, gc_todo_tree: &db::Tree) -> Result<(), Error> { diff --git a/src/table/replication/parameters.rs b/src/table/replication/parameters.rs index 682c1ea6..3649fad3 100644 --- a/src/table/replication/parameters.rs +++ b/src/table/replication/parameters.rs @@ -13,12 +13,12 @@ pub trait TableReplication: Send + Sync + 'static { /// Which nodes to send read requests to fn read_nodes(&self, hash: &Hash) -> Vec<Uuid>; - /// Responses needed to consider a read succesfull + /// Responses needed to consider a read successful fn read_quorum(&self) -> usize; /// Which nodes to send writes to fn write_sets(&self, hash: &Hash) -> Self::WriteSets; - /// Responses needed to consider a write succesfull in each set + /// Responses needed to consider a write successful in each set fn write_quorum(&self) -> usize; // Accessing partitions, for Merkle tree & sync diff --git a/src/table/sync.rs b/src/table/sync.rs index cd080df0..2d43b9fc 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -316,7 +316,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> { SyncRpc::RootCkDifferent(true) => VecDeque::from(vec![root_ck_key]), x => { return Err(Error::Message(format!( - "Invalid respone to RootCkHash RPC: {}", + "Invalid response to RootCkHash RPC: {}", debug_serialize(x) ))); } @@ -362,7 +362,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> { SyncRpc::Node(_, node) => node, x => { return Err(Error::Message(format!( - "Invalid respone to GetNode RPC: {}", + "Invalid response to GetNode RPC: {}", debug_serialize(x) ))); } @@ -444,7 +444,6 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> { // ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ====== -#[async_trait] impl<F: TableSchema, R: TableReplication> EndpointHandler<SyncRpc> for TableSyncer<F, R> { async fn handle(self: &Arc<Self>, message: &SyncRpc, from: NodeID) -> Result<SyncRpc, Error> { match message { diff --git a/src/table/table.rs b/src/table/table.rs index a5be2910..c96f4731 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -2,7 +2,6 @@ use std::borrow::Borrow; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::sync::Arc; -use async_trait::async_trait; use futures::stream::*; use serde::{Deserialize, Serialize}; use serde_bytes::ByteBuf; @@ -171,11 +170,11 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> { // We will here batch all items into a single request for each concerned // node, with all of the entries it must store within that request. // Each entry has to be saved to a specific list of "write sets", i.e. a set - // of node within wich a quorum must be achieved. In normal operation, there + // of node within which a quorum must be achieved. In normal operation, there // is a single write set which corresponds to the quorum in the current // cluster layout, but when the layout is updated, multiple write sets might // have to be handled at once. Here, since we are sending many entries, we - // will have to handle many write sets in all cases. The algorihtm is thus + // will have to handle many write sets in all cases. The algorithm is thus // to send one request to each node with all the items it must save, // and keep track of the OK responses within each write set: if for all sets // a quorum of nodes has answered OK, then the insert has succeeded and @@ -204,6 +203,10 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> { entries_vec.push((write_sets, e_enc)); } + if entries_vec.is_empty() { + return Ok(()); + } + // Compute a deduplicated list of all of the write sets, // and compute an index from each node to the position of the sets in which // it takes part, to optimize the detection of a quorum. @@ -496,7 +499,6 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> { } } -#[async_trait] impl<F: TableSchema, R: TableReplication> EndpointHandler<TableRpc<F>> for Table<F, R> { async fn handle( self: &Arc<Self>, |