aboutsummaryrefslogtreecommitdiff
path: root/src/rpc/system.rs
diff options
context:
space:
mode:
authorAlex <alex@adnab.me>2024-03-07 16:32:52 +0000
committerAlex <alex@adnab.me>2024-03-07 16:32:52 +0000
commit20c0b4ffb2ae8e250068f8bf8001b5811a6bb6f2 (patch)
treeee7ab2f5eaf862927ef87d43e661557906742cc5 /src/rpc/system.rs
parent2fd13c7d135949a83ed52ed81672ac7e1956f134 (diff)
parentc1769bbe69f723fb3980cf4fdac7615cfb782720 (diff)
downloadgarage-20c0b4ffb2ae8e250068f8bf8001b5811a6bb6f2.tar.gz
garage-20c0b4ffb2ae8e250068f8bf8001b5811a6bb6f2.zip
Merge pull request 'ReplicationMode -> ConsistencyMode+ReplicationFactor' (#750) from yuka/garage:split-consistency-mode into next-0.10
Reviewed-on: https://git.deuxfleurs.fr/Deuxfleurs/garage/pulls/750
Diffstat (limited to 'src/rpc/system.rs')
-rw-r--r--src/rpc/system.rs22
1 files changed, 11 insertions, 11 deletions
diff --git a/src/rpc/system.rs b/src/rpc/system.rs
index 1c668306..54d589d2 100644
--- a/src/rpc/system.rs
+++ b/src/rpc/system.rs
@@ -112,8 +112,7 @@ pub struct System {
metrics: ArcSwapOption<SystemMetrics>,
- replication_mode: ReplicationMode,
- pub(crate) replication_factor: usize,
+ pub(crate) replication_factor: ReplicationFactor,
/// Path to metadata directory
pub metadata_dir: PathBuf,
@@ -243,7 +242,8 @@ impl System {
/// Create this node's membership manager
pub fn new(
network_key: NetworkKey,
- replication_mode: ReplicationMode,
+ replication_factor: ReplicationFactor,
+ consistency_mode: ConsistencyMode,
config: &Config,
) -> Result<Arc<Self>, Error> {
// ---- setup netapp RPC protocol ----
@@ -274,14 +274,13 @@ impl System {
let persist_peer_list = Persister::new(&config.metadata_dir, "peer_list");
// ---- setup cluster layout and layout manager ----
- let replication_factor = replication_mode.replication_factor();
-
let layout_manager = LayoutManager::new(
config,
netapp.id,
system_endpoint.clone(),
peering.clone(),
- replication_mode,
+ replication_factor,
+ consistency_mode,
)?;
let mut local_status = NodeStatus::initial(replication_factor, &layout_manager);
@@ -315,7 +314,6 @@ impl System {
netapp: netapp.clone(),
peering: peering.clone(),
system_endpoint,
- replication_mode,
replication_factor,
rpc_listen_addr: config.rpc_bind_addr,
rpc_public_addr,
@@ -427,7 +425,9 @@ impl System {
}
pub fn health(&self) -> ClusterHealth {
- let quorum = self.replication_mode.write_quorum();
+ let quorum = self
+ .replication_factor
+ .write_quorum(ConsistencyMode::Consistent);
// Gather information about running nodes.
// Technically, `nodes` contains currently running nodes, as well
@@ -631,7 +631,7 @@ impl System {
.count();
let not_configured = self.cluster_layout().check().is_err();
- let no_peers = n_connected < self.replication_factor;
+ let no_peers = n_connected < self.replication_factor.into();
let expected_n_nodes = self.cluster_layout().all_nodes().len();
let bad_peers = n_connected != expected_n_nodes;
@@ -774,14 +774,14 @@ impl EndpointHandler<SystemRpc> for System {
}
impl NodeStatus {
- fn initial(replication_factor: usize, layout_manager: &LayoutManager) -> Self {
+ fn initial(replication_factor: ReplicationFactor, layout_manager: &LayoutManager) -> Self {
NodeStatus {
hostname: Some(
gethostname::gethostname()
.into_string()
.unwrap_or_else(|_| "<invalid utf-8>".to_string()),
),
- replication_factor,
+ replication_factor: replication_factor.into(),
layout_digest: layout_manager.layout().digest(),
meta_disk_avail: None,
data_disk_avail: None,