aboutsummaryrefslogtreecommitdiff
path: root/src/table/replication
diff options
context:
space:
mode:
Diffstat (limited to 'src/table/replication')
-rw-r--r--src/table/replication/fullcopy.rs33
-rw-r--r--src/table/replication/parameters.rs13
-rw-r--r--src/table/replication/sharded.rs20
3 files changed, 31 insertions, 35 deletions
diff --git a/src/table/replication/fullcopy.rs b/src/table/replication/fullcopy.rs
index a5faece9..aea8c1f3 100644
--- a/src/table/replication/fullcopy.rs
+++ b/src/table/replication/fullcopy.rs
@@ -8,21 +8,10 @@ use crate::replication::*;
#[derive(Clone)]
pub struct TableFullReplication {
+ pub system: Arc<System>,
pub max_faults: usize,
}
-#[derive(Clone)]
-struct Neighbors {
- ring: Arc<Ring>,
- neighbors: Vec<UUID>,
-}
-
-impl TableFullReplication {
- pub fn new(max_faults: usize) -> Self {
- TableFullReplication { max_faults }
- }
-}
-
impl TableReplication for TableFullReplication {
// Full replication schema: all nodes store everything
// Writes are disseminated in an epidemic manner in the network
@@ -30,18 +19,23 @@ impl TableReplication for TableFullReplication {
// Advantage: do all reads locally, extremely fast
// Inconvenient: only suitable to reasonably small tables
- fn read_nodes(&self, _hash: &Hash, system: &System) -> Vec<UUID> {
- vec![system.id]
+ fn partition_of(&self, _hash: &Hash) -> u16 {
+ 0u16
+ }
+
+ fn read_nodes(&self, _hash: &Hash) -> Vec<UUID> {
+ vec![self.system.id]
}
fn read_quorum(&self) -> usize {
1
}
- fn write_nodes(&self, hash: &Hash, system: &System) -> Vec<UUID> {
- self.replication_nodes(hash, system.ring.borrow().as_ref())
+ fn write_nodes(&self, _hash: &Hash) -> Vec<UUID> {
+ let ring = self.system.ring.borrow();
+ ring.config.members.keys().cloned().collect::<Vec<_>>()
}
- fn write_quorum(&self, system: &System) -> usize {
- let nmembers = system.ring.borrow().config.members.len();
+ fn write_quorum(&self) -> usize {
+ let nmembers = self.system.ring.borrow().config.members.len();
if nmembers > self.max_faults {
nmembers - self.max_faults
} else {
@@ -52,9 +46,6 @@ impl TableReplication for TableFullReplication {
self.max_faults
}
- fn replication_nodes(&self, _hash: &Hash, ring: &Ring) -> Vec<UUID> {
- ring.config.members.keys().cloned().collect::<Vec<_>>()
- }
fn split_points(&self, _ring: &Ring) -> Vec<Hash> {
let mut ret = vec![];
ret.push([0u8; 32].into());
diff --git a/src/table/replication/parameters.rs b/src/table/replication/parameters.rs
index 4607b050..ace82bd9 100644
--- a/src/table/replication/parameters.rs
+++ b/src/table/replication/parameters.rs
@@ -1,4 +1,3 @@
-use garage_rpc::membership::System;
use garage_rpc::ring::Ring;
use garage_util::data::*;
@@ -7,16 +6,18 @@ pub trait TableReplication: Send + Sync {
// See examples in table_sharded.rs and table_fullcopy.rs
// To understand various replication methods
+ // Partition number of data item (for Merkle tree)
+ fn partition_of(&self, hash: &Hash) -> u16;
+
// Which nodes to send reads from
- fn read_nodes(&self, hash: &Hash, system: &System) -> Vec<UUID>;
+ fn read_nodes(&self, hash: &Hash) -> Vec<UUID>;
fn read_quorum(&self) -> usize;
// Which nodes to send writes to
- fn write_nodes(&self, hash: &Hash, system: &System) -> Vec<UUID>;
- fn write_quorum(&self, system: &System) -> usize;
+ fn write_nodes(&self, hash: &Hash) -> Vec<UUID>;
+ fn write_quorum(&self) -> usize;
fn max_write_errors(&self) -> usize;
- // Which are the nodes that do actually replicate the data
- fn replication_nodes(&self, hash: &Hash, ring: &Ring) -> Vec<UUID>;
+ // Get partition boundaries
fn split_points(&self, ring: &Ring) -> Vec<Hash>;
}
diff --git a/src/table/replication/sharded.rs b/src/table/replication/sharded.rs
index 886c7c08..966be31a 100644
--- a/src/table/replication/sharded.rs
+++ b/src/table/replication/sharded.rs
@@ -1,3 +1,5 @@
+use std::sync::Arc;
+
use garage_rpc::membership::System;
use garage_rpc::ring::Ring;
use garage_util::data::*;
@@ -6,6 +8,7 @@ use crate::replication::*;
#[derive(Clone)]
pub struct TableShardedReplication {
+ pub system: Arc<System>,
pub replication_factor: usize,
pub read_quorum: usize,
pub write_quorum: usize,
@@ -19,28 +22,29 @@ impl TableReplication for TableShardedReplication {
// - reads are done on all of the nodes that replicate the data
// - writes as well
- fn read_nodes(&self, hash: &Hash, system: &System) -> Vec<UUID> {
- let ring = system.ring.borrow().clone();
+ fn partition_of(&self, hash: &Hash) -> u16 {
+ self.system.ring.borrow().partition_of(hash)
+ }
+
+ fn read_nodes(&self, hash: &Hash) -> Vec<UUID> {
+ let ring = self.system.ring.borrow().clone();
ring.walk_ring(&hash, self.replication_factor)
}
fn read_quorum(&self) -> usize {
self.read_quorum
}
- fn write_nodes(&self, hash: &Hash, system: &System) -> Vec<UUID> {
- let ring = system.ring.borrow().clone();
+ fn write_nodes(&self, hash: &Hash) -> Vec<UUID> {
+ let ring = self.system.ring.borrow();
ring.walk_ring(&hash, self.replication_factor)
}
- fn write_quorum(&self, _system: &System) -> usize {
+ fn write_quorum(&self) -> usize {
self.write_quorum
}
fn max_write_errors(&self) -> usize {
self.replication_factor - self.write_quorum
}
- fn replication_nodes(&self, hash: &Hash, ring: &Ring) -> Vec<UUID> {
- ring.walk_ring(&hash, self.replication_factor)
- }
fn split_points(&self, ring: &Ring) -> Vec<Hash> {
let mut ret = vec![];