aboutsummaryrefslogtreecommitdiff
path: root/src/table
diff options
context:
space:
mode:
Diffstat (limited to 'src/table')
-rw-r--r--src/table/gc.rs6
-rw-r--r--src/table/replication/parameters.rs4
-rw-r--r--src/table/sync.rs4
-rw-r--r--src/table/table.rs4
4 files changed, 9 insertions, 9 deletions
diff --git a/src/table/gc.rs b/src/table/gc.rs
index d30a1849..9e060390 100644
--- a/src/table/gc.rs
+++ b/src/table/gc.rs
@@ -258,14 +258,14 @@ impl<F: TableSchema, R: TableReplication> TableGc<F, R> {
.await
.err_context("GC: remote delete tombstones")?;
- // GC has been successfull for all of these entries.
+ // GC has been successful for all of these entries.
// We now remove them all from our local table and from the GC todo list.
for item in items {
self.data
.delete_if_equal_hash(&item.key[..], item.value_hash)
.err_context("GC: local delete tombstones")?;
item.remove_if_equal(&self.data.gc_todo)
- .err_context("GC: remove from todo list after successfull GC")?;
+ .err_context("GC: remove from todo list after successful GC")?;
}
Ok(())
@@ -383,7 +383,7 @@ impl GcTodoEntry {
/// Removes the GcTodoEntry from the gc_todo tree if the
/// hash of the serialized value is the same here as in the tree.
- /// This is usefull to remove a todo entry only under the condition
+ /// This is useful to remove a todo entry only under the condition
/// that it has not changed since the time it was read, i.e.
/// what we have to do is still the same
pub(crate) fn remove_if_equal(&self, gc_todo_tree: &db::Tree) -> Result<(), Error> {
diff --git a/src/table/replication/parameters.rs b/src/table/replication/parameters.rs
index 682c1ea6..3649fad3 100644
--- a/src/table/replication/parameters.rs
+++ b/src/table/replication/parameters.rs
@@ -13,12 +13,12 @@ pub trait TableReplication: Send + Sync + 'static {
/// Which nodes to send read requests to
fn read_nodes(&self, hash: &Hash) -> Vec<Uuid>;
- /// Responses needed to consider a read succesfull
+ /// Responses needed to consider a read successful
fn read_quorum(&self) -> usize;
/// Which nodes to send writes to
fn write_sets(&self, hash: &Hash) -> Self::WriteSets;
- /// Responses needed to consider a write succesfull in each set
+ /// Responses needed to consider a write successful in each set
fn write_quorum(&self) -> usize;
// Accessing partitions, for Merkle tree & sync
diff --git a/src/table/sync.rs b/src/table/sync.rs
index cd080df0..234ee8ea 100644
--- a/src/table/sync.rs
+++ b/src/table/sync.rs
@@ -316,7 +316,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
SyncRpc::RootCkDifferent(true) => VecDeque::from(vec![root_ck_key]),
x => {
return Err(Error::Message(format!(
- "Invalid respone to RootCkHash RPC: {}",
+ "Invalid response to RootCkHash RPC: {}",
debug_serialize(x)
)));
}
@@ -362,7 +362,7 @@ impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
SyncRpc::Node(_, node) => node,
x => {
return Err(Error::Message(format!(
- "Invalid respone to GetNode RPC: {}",
+ "Invalid response to GetNode RPC: {}",
debug_serialize(x)
)));
}
diff --git a/src/table/table.rs b/src/table/table.rs
index a5be2910..ea8471d0 100644
--- a/src/table/table.rs
+++ b/src/table/table.rs
@@ -171,11 +171,11 @@ impl<F: TableSchema, R: TableReplication> Table<F, R> {
// We will here batch all items into a single request for each concerned
// node, with all of the entries it must store within that request.
// Each entry has to be saved to a specific list of "write sets", i.e. a set
- // of node within wich a quorum must be achieved. In normal operation, there
+ // of node within which a quorum must be achieved. In normal operation, there
// is a single write set which corresponds to the quorum in the current
// cluster layout, but when the layout is updated, multiple write sets might
// have to be handled at once. Here, since we are sending many entries, we
- // will have to handle many write sets in all cases. The algorihtm is thus
+ // will have to handle many write sets in all cases. The algorithm is thus
// to send one request to each node with all the items it must save,
// and keep track of the OK responses within each write set: if for all sets
// a quorum of nodes has answered OK, then the insert has succeeded and