aboutsummaryrefslogtreecommitdiff
path: root/src/rpc
diff options
context:
space:
mode:
Diffstat (limited to 'src/rpc')
-rw-r--r--src/rpc/layout/graph_algo.rs2
-rw-r--r--src/rpc/layout/helper.rs2
-rw-r--r--src/rpc/layout/manager.rs2
-rw-r--r--src/rpc/layout/mod.rs2
-rw-r--r--src/rpc/layout/test.rs2
-rw-r--r--src/rpc/layout/version.rs2
-rw-r--r--src/rpc/rpc_helper.rs4
-rw-r--r--src/rpc/system.rs8
8 files changed, 12 insertions, 12 deletions
diff --git a/src/rpc/layout/graph_algo.rs b/src/rpc/layout/graph_algo.rs
index bd33e97f..29d4a043 100644
--- a/src/rpc/layout/graph_algo.rs
+++ b/src/rpc/layout/graph_algo.rs
@@ -133,7 +133,7 @@ impl Graph<FlowEdge> {
/// This function shuffles the order of the edge lists. It keeps the ids of the
/// reversed edges consistent.
fn shuffle_edges(&mut self) {
- // We use deterministic randomness so that the layout calculation algorihtm
+ // We use deterministic randomness so that the layout calculation algorithm
// will output the same thing every time it is run. This way, the results
// pre-calculated in `garage layout show` will match exactly those used
// in practice with `garage layout apply`
diff --git a/src/rpc/layout/helper.rs b/src/rpc/layout/helper.rs
index 3a033ab2..44c826f9 100644
--- a/src/rpc/layout/helper.rs
+++ b/src/rpc/layout/helper.rs
@@ -90,7 +90,7 @@ impl LayoutHelper {
// sync_map_min is the minimum value of sync_map among storage nodes
// in the cluster (non-gateway nodes only, current and previous layouts).
// It is the highest layout version for which we know that all relevant
- // storage nodes have fullfilled a sync, and therefore it is safe to
+ // storage nodes have fulfilled a sync, and therefore it is safe to
// use a read quorum within that layout to ensure consistency.
// Gateway nodes are excluded here because they hold no relevant data
// (they store the bucket and access key tables, but we don't have
diff --git a/src/rpc/layout/manager.rs b/src/rpc/layout/manager.rs
index a0dcf50e..21907ec7 100644
--- a/src/rpc/layout/manager.rs
+++ b/src/rpc/layout/manager.rs
@@ -48,7 +48,7 @@ impl LayoutManager {
Ok(x) => {
if x.current().replication_factor != replication_factor.replication_factor() {
return Err(Error::Message(format!(
- "Prevous cluster layout has replication factor {}, which is different than the one specified in the config file ({}). The previous cluster layout can be purged, if you know what you are doing, simply by deleting the `cluster_layout` file in your metadata directory.",
+ "Previous cluster layout has replication factor {}, which is different than the one specified in the config file ({}). The previous cluster layout can be purged, if you know what you are doing, simply by deleting the `cluster_layout` file in your metadata directory.",
x.current().replication_factor,
replication_factor.replication_factor()
)));
diff --git a/src/rpc/layout/mod.rs b/src/rpc/layout/mod.rs
index aafdea46..ce21a524 100644
--- a/src/rpc/layout/mod.rs
+++ b/src/rpc/layout/mod.rs
@@ -241,7 +241,7 @@ mod v010 {
/// The versions currently in use in the cluster
pub versions: Vec<LayoutVersion>,
/// At most 5 of the previous versions, not used by the garage_table
- /// module, but usefull for the garage_block module to find data blocks
+ /// module, but useful for the garage_block module to find data blocks
/// that have not yet been moved
pub old_versions: Vec<LayoutVersion>,
diff --git a/src/rpc/layout/test.rs b/src/rpc/layout/test.rs
index fcbb9dfc..5462160b 100644
--- a/src/rpc/layout/test.rs
+++ b/src/rpc/layout/test.rs
@@ -9,7 +9,7 @@ use crate::replication_mode::ReplicationFactor;
// This function checks that the partition size S computed is at least better than the
// one given by a very naive algorithm. To do so, we try to run the naive algorithm
-// assuming a partion size of S+1. If we succed, it means that the optimal assignment
+// assuming a partition size of S+1. If we succeed, it means that the optimal assignment
// was not optimal. The naive algorithm is the following :
// - we compute the max number of partitions associated to every node, capped at the
// partition number. It gives the number of tokens of every node.
diff --git a/src/rpc/layout/version.rs b/src/rpc/layout/version.rs
index ee4b2821..a569c7c6 100644
--- a/src/rpc/layout/version.rs
+++ b/src/rpc/layout/version.rs
@@ -471,7 +471,7 @@ impl LayoutVersion {
}
}
- // We clear the ring assignemnt data
+ // We clear the ring assignment data
self.ring_assignment_data = Vec::<CompactNodeType>::new();
Ok(Some(old_assignment))
diff --git a/src/rpc/rpc_helper.rs b/src/rpc/rpc_helper.rs
index ea3e5e76..b8ca8120 100644
--- a/src/rpc/rpc_helper.rs
+++ b/src/rpc/rpc_helper.rs
@@ -413,7 +413,7 @@ impl RpcHelper {
/// Make a RPC call to multiple servers, returning either a Vec of responses,
/// or an error if quorum could not be reached due to too many errors
///
- /// Contrary to try_call_many, this fuction is especially made for broadcast
+ /// Contrary to try_call_many, this function is especially made for broadcast
/// write operations. In particular:
///
/// - The request are sent to all specified nodes as soon as `try_write_many_sets`
@@ -506,7 +506,7 @@ impl RpcHelper {
// If we have a quorum of ok in all quorum sets, then it's a success!
if result_tracker.all_quorums_ok() {
- // Continue all other requets in background
+ // Continue all other requests in background
tokio::spawn(async move {
resp_stream.collect::<Vec<(Uuid, Result<_, _>)>>().await;
drop(drop_on_complete);
diff --git a/src/rpc/system.rs b/src/rpc/system.rs
index 753d8c8d..0fa68218 100644
--- a/src/rpc/system.rs
+++ b/src/rpc/system.rs
@@ -54,7 +54,7 @@ pub const SYSTEM_RPC_PATH: &str = "garage_rpc/system.rs/SystemRpc";
/// RPC messages related to membership
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum SystemRpc {
- /// Response to successfull advertisements
+ /// Response to successful advertisements
Ok,
/// Request to connect to a specific node (in <pubkey>@<host>:<port> format, pubkey = full-length node ID)
Connect(String),
@@ -172,7 +172,7 @@ pub struct ClusterHealth {
pub enum ClusterHealthStatus {
/// All nodes are available
Healthy,
- /// Some storage nodes are unavailable, but quorum is stil
+ /// Some storage nodes are unavailable, but quorum is still
/// achieved for all partitions
Degraded,
/// Quorum is not available for some partitions
@@ -286,7 +286,7 @@ impl System {
let mut local_status = NodeStatus::initial(replication_factor, &layout_manager);
local_status.update_disk_usage(&config.metadata_dir, &config.data_dir);
- // ---- if enabled, set up additionnal peer discovery methods ----
+ // ---- if enabled, set up additional peer discovery methods ----
#[cfg(feature = "consul-discovery")]
let consul_discovery = match &config.consul_discovery {
Some(cfg) => Some(
@@ -337,7 +337,7 @@ impl System {
Ok(sys)
}
- /// Perform bootstraping, starting the ping loop
+ /// Perform bootstrapping, starting the ping loop
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
join!(
self.netapp.clone().listen(