aboutsummaryrefslogtreecommitdiff
path: root/src/block
diff options
context:
space:
mode:
Diffstat (limited to 'src/block')
-rw-r--r--src/block/Cargo.toml18
-rw-r--r--src/block/block.rs48
-rw-r--r--src/block/lib.rs2
-rw-r--r--src/block/manager.rs941
-rw-r--r--src/block/metrics.rs4
-rw-r--r--src/block/rc.rs49
-rw-r--r--src/block/repair.rs466
-rw-r--r--src/block/resync.rs589
8 files changed, 1450 insertions, 667 deletions
diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml
index 9cba69ee..cd409001 100644
--- a/src/block/Cargo.toml
+++ b/src/block/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "garage_block"
-version = "0.7.0"
+version = "0.8.0"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@@ -14,20 +14,22 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-garage_rpc = { version = "0.7.0", path = "../rpc" }
-garage_util = { version = "0.7.0", path = "../util" }
-garage_table = { version = "0.7.0", path = "../table" }
+garage_db = { version = "0.8.0", path = "../db" }
+garage_rpc = { version = "0.8.0", path = "../rpc" }
+garage_util = { version = "0.8.0", path = "../util" }
+garage_table = { version = "0.8.0", path = "../table" }
opentelemetry = "0.17"
+arc-swap = "1.5"
async-trait = "0.1.7"
bytes = "1.0"
hex = "0.4"
tracing = "0.1.30"
rand = "0.8"
-zstd = { version = "0.9", default-features = false }
-sled = "0.34"
+async-compression = { version = "0.3", features = ["tokio", "zstd"] }
+zstd = { version = "0.9", default-features = false }
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
@@ -36,3 +38,7 @@ serde_bytes = "0.11"
futures = "0.3"
futures-util = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
+tokio-util = { version = "0.6", features = ["io"] }
+
+[features]
+system-libs = [ "zstd/pkg-config" ]
diff --git a/src/block/block.rs b/src/block/block.rs
index 4d3fbcb8..935aa900 100644
--- a/src/block/block.rs
+++ b/src/block/block.rs
@@ -1,16 +1,22 @@
+use bytes::Bytes;
use serde::{Deserialize, Serialize};
use zstd::stream::{decode_all as zstd_decode, Encoder};
use garage_util::data::*;
use garage_util::error::*;
+#[derive(Debug, Serialize, Deserialize, Copy, Clone)]
+pub enum DataBlockHeader {
+ Plain,
+ Compressed,
+}
+
/// A possibly compressed block of data
-#[derive(Debug, Serialize, Deserialize)]
pub enum DataBlock {
/// Uncompressed data
- Plain(#[serde(with = "serde_bytes")] Vec<u8>),
+ Plain(Bytes),
/// Data compressed with zstd
- Compressed(#[serde(with = "serde_bytes")] Vec<u8>),
+ Compressed(Bytes),
}
impl DataBlock {
@@ -30,7 +36,7 @@ impl DataBlock {
/// Get the buffer, possibly decompressing it, and verify it's integrity.
/// For Plain block, data is compared to hash, for Compressed block, zstd checksumming system
/// is used instead.
- pub fn verify_get(self, hash: Hash) -> Result<Vec<u8>, Error> {
+ pub fn verify_get(self, hash: Hash) -> Result<Bytes, Error> {
match self {
DataBlock::Plain(data) => {
if blake2sum(&data) == hash {
@@ -39,9 +45,9 @@ impl DataBlock {
Err(Error::CorruptData(hash))
}
}
- DataBlock::Compressed(data) => {
- zstd_decode(&data[..]).map_err(|_| Error::CorruptData(hash))
- }
+ DataBlock::Compressed(data) => zstd_decode(&data[..])
+ .map_err(|_| Error::CorruptData(hash))
+ .map(Bytes::from),
}
}
@@ -61,13 +67,31 @@ impl DataBlock {
}
}
- pub fn from_buffer(data: Vec<u8>, level: Option<i32>) -> DataBlock {
- if let Some(level) = level {
- if let Ok(data) = zstd_encode(&data[..], level) {
- return DataBlock::Compressed(data);
+ pub async fn from_buffer(data: Bytes, level: Option<i32>) -> DataBlock {
+ tokio::task::spawn_blocking(move || {
+ if let Some(level) = level {
+ if let Ok(data) = zstd_encode(&data[..], level) {
+ return DataBlock::Compressed(data.into());
+ }
}
+ DataBlock::Plain(data)
+ })
+ .await
+ .unwrap()
+ }
+
+ pub fn into_parts(self) -> (DataBlockHeader, Bytes) {
+ match self {
+ DataBlock::Plain(data) => (DataBlockHeader::Plain, data),
+ DataBlock::Compressed(data) => (DataBlockHeader::Compressed, data),
+ }
+ }
+
+ pub fn from_parts(h: DataBlockHeader, bytes: Bytes) -> Self {
+ match h {
+ DataBlockHeader::Plain => DataBlock::Plain(bytes),
+ DataBlockHeader::Compressed => DataBlock::Compressed(bytes),
}
- DataBlock::Plain(data)
}
}
diff --git a/src/block/lib.rs b/src/block/lib.rs
index dc685657..d2814f77 100644
--- a/src/block/lib.rs
+++ b/src/block/lib.rs
@@ -2,6 +2,8 @@
extern crate tracing;
pub mod manager;
+pub mod repair;
+pub mod resync;
mod block;
mod metrics;
diff --git a/src/block/manager.rs b/src/block/manager.rs
index 1c04a335..7f439b96 100644
--- a/src/block/manager.rs
+++ b/src/block/manager.rs
@@ -1,29 +1,32 @@
-use std::convert::TryInto;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
+use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
+use bytes::Bytes;
use serde::{Deserialize, Serialize};
-use futures::future::*;
-use futures::select;
+use futures::Stream;
+use futures_util::stream::StreamExt;
use tokio::fs;
-use tokio::io::{AsyncReadExt, AsyncWriteExt};
-use tokio::sync::{watch, Mutex, Notify};
+use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
+use tokio::sync::{mpsc, Mutex, MutexGuard};
use opentelemetry::{
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
- Context, KeyValue,
+ Context,
};
+use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
+
+use garage_db as db;
+
use garage_util::data::*;
use garage_util::error::*;
use garage_util::metrics::RecordDuration;
-use garage_util::sled_counter::SledCountedTree;
-use garage_util::time::*;
-use garage_util::tranquilizer::Tranquilizer;
+use garage_rpc::rpc_helper::OrderTag;
use garage_rpc::system::System;
use garage_rpc::*;
@@ -32,24 +35,12 @@ use garage_table::replication::{TableReplication, TableShardedReplication};
use crate::block::*;
use crate::metrics::*;
use crate::rc::*;
+use crate::repair::*;
+use crate::resync::*;
/// Size under which data will be stored inlined in database instead of as files
pub const INLINE_THRESHOLD: usize = 3072;
-// Timeout for RPCs that read and write blocks to remote nodes
-const BLOCK_RW_TIMEOUT: Duration = Duration::from_secs(30);
-// Timeout for RPCs that ask other nodes whether they need a copy
-// of a given block before we delete it locally
-const NEED_BLOCK_QUERY_TIMEOUT: Duration = Duration::from_secs(5);
-
-// The delay between the time where a resync operation fails
-// and the time when it is retried, with exponential backoff
-// (multiplied by 2, 4, 8, 16, etc. for every consecutive failure).
-const RESYNC_RETRY_DELAY: Duration = Duration::from_secs(60);
-// The minimum retry delay is 60 seconds = 1 minute
-// The maximum retry delay is 60 seconds * 2^6 = 60 seconds << 6 = 64 minutes (~1 hour)
-const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6;
-
// The delay between the moment when the reference counter
// drops to zero, and the moment where we allow ourselves
// to delete the block locally.
@@ -60,12 +51,12 @@ pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
pub enum BlockRpc {
Ok,
/// Message to ask for a block of data, by hash
- GetBlock(Hash),
+ GetBlock(Hash, Option<OrderTag>),
/// Message to send a block of data, either because requested, of for first delivery of new
/// block
PutBlock {
hash: Hash,
- data: DataBlock,
+ header: DataBlockHeader,
},
/// Ask other node if they should have this block, but don't actually have it
NeedBlockQuery(Hash),
@@ -85,20 +76,18 @@ pub struct BlockManager {
pub data_dir: PathBuf,
compression_level: Option<i32>,
- background_tranquility: u32,
- mutation_lock: Mutex<BlockManagerLocked>,
+ mutation_lock: [Mutex<BlockManagerLocked>; 256],
- rc: BlockRc,
+ pub(crate) rc: BlockRc,
+ pub resync: BlockResyncManager,
- resync_queue: SledCountedTree,
- resync_notify: Notify,
- resync_errors: SledCountedTree,
+ pub(crate) system: Arc<System>,
+ pub(crate) endpoint: Arc<Endpoint<BlockRpc, Self>>,
- system: Arc<System>,
- endpoint: Arc<Endpoint<BlockRpc, Self>>,
+ pub(crate) metrics: BlockManagerMetrics,
- metrics: BlockManagerMetrics,
+ tx_scrub_command: mpsc::Sender<ScrubWorkerCommand>,
}
// This custom struct contains functions that must only be ran
@@ -108,10 +97,9 @@ struct BlockManagerLocked();
impl BlockManager {
pub fn new(
- db: &sled::Db,
+ db: &db::Db,
data_dir: PathBuf,
compression_level: Option<i32>,
- background_tranquility: u32,
replication: TableShardedReplication,
system: Arc<System>,
) -> Arc<Self> {
@@ -120,215 +108,323 @@ impl BlockManager {
.expect("Unable to open block_local_rc tree");
let rc = BlockRc::new(rc);
- let resync_queue = db
- .open_tree("block_local_resync_queue")
- .expect("Unable to open block_local_resync_queue tree");
- let resync_queue = SledCountedTree::new(resync_queue);
-
- let resync_errors = db
- .open_tree("block_local_resync_errors")
- .expect("Unable to open block_local_resync_errors tree");
- let resync_errors = SledCountedTree::new(resync_errors);
+ let resync = BlockResyncManager::new(db, &system);
let endpoint = system
.netapp
- .endpoint("garage_model/block.rs/Rpc".to_string());
+ .endpoint("garage_block/manager.rs/Rpc".to_string());
- let manager_locked = BlockManagerLocked();
+ let metrics = BlockManagerMetrics::new(resync.queue.clone(), resync.errors.clone());
- let metrics = BlockManagerMetrics::new(resync_queue.clone(), resync_errors.clone());
+ let (scrub_tx, scrub_rx) = mpsc::channel(1);
let block_manager = Arc::new(Self {
replication,
data_dir,
compression_level,
- background_tranquility,
- mutation_lock: Mutex::new(manager_locked),
+ mutation_lock: [(); 256].map(|_| Mutex::new(BlockManagerLocked())),
rc,
- resync_queue,
- resync_notify: Notify::new(),
- resync_errors,
+ resync,
system,
endpoint,
metrics,
+ tx_scrub_command: scrub_tx,
});
block_manager.endpoint.set_handler(block_manager.clone());
- block_manager.clone().spawn_background_worker();
+ // Spawn a bunch of resync workers
+ for index in 0..MAX_RESYNC_WORKERS {
+ let worker = ResyncWorker::new(index, block_manager.clone());
+ block_manager.system.background.spawn_worker(worker);
+ }
+
+ // Spawn scrub worker
+ let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx);
+ block_manager.system.background.spawn_worker(scrub_worker);
block_manager
}
/// Ask nodes that might have a (possibly compressed) block for it
- async fn rpc_get_raw_block(&self, hash: &Hash) -> Result<DataBlock, Error> {
+ /// Return it as a stream with a header
+ async fn rpc_get_raw_block_streaming(
+ &self,
+ hash: &Hash,
+ order_tag: Option<OrderTag>,
+ ) -> Result<(DataBlockHeader, ByteStream), Error> {
let who = self.replication.read_nodes(hash);
- let resps = self
- .system
- .rpc
- .try_call_many(
- &self.endpoint,
- &who[..],
- BlockRpc::GetBlock(*hash),
- RequestStrategy::with_priority(PRIO_NORMAL)
- .with_quorum(1)
- .with_timeout(BLOCK_RW_TIMEOUT)
- .interrupt_after_quorum(true),
- )
- .await?;
+ let who = self.system.rpc.request_order(&who);
+
+ for node in who.iter() {
+ let node_id = NodeID::from(*node);
+ let rpc = self.endpoint.call_streaming(
+ &node_id,
+ BlockRpc::GetBlock(*hash, order_tag),
+ PRIO_NORMAL | PRIO_SECONDARY,
+ );
+ tokio::select! {
+ res = rpc => {
+ let res = match res {
+ Ok(res) => res,
+ Err(e) => {
+ debug!("Node {:?} returned error: {}", node, e);
+ continue;
+ }
+ };
+ let (header, stream) = match res.into_parts() {
+ (Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
+ _ => {
+ debug!("Node {:?} returned a malformed response", node);
+ continue;
+ }
+ };
+ return Ok((header, stream));
+ }
+ _ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
+ debug!("Node {:?} didn't return block in time, trying next.", node);
+ }
+ };
+ }
- for resp in resps {
- if let BlockRpc::PutBlock { data, .. } = resp {
- return Ok(data);
- }
+ Err(Error::Message(format!(
+ "Unable to read block {:?}: no node returned a valid block",
+ hash
+ )))
+ }
+
+ /// Ask nodes that might have a (possibly compressed) block for it
+ /// Return its entire body
+ pub(crate) async fn rpc_get_raw_block(
+ &self,
+ hash: &Hash,
+ order_tag: Option<OrderTag>,
+ ) -> Result<DataBlock, Error> {
+ let who = self.replication.read_nodes(hash);
+ let who = self.system.rpc.request_order(&who);
+
+ for node in who.iter() {
+ let node_id = NodeID::from(*node);
+ let rpc = self.endpoint.call_streaming(
+ &node_id,
+ BlockRpc::GetBlock(*hash, order_tag),
+ PRIO_NORMAL | PRIO_SECONDARY,
+ );
+ tokio::select! {
+ res = rpc => {
+ let res = match res {
+ Ok(res) => res,
+ Err(e) => {
+ debug!("Node {:?} returned error: {}", node, e);
+ continue;
+ }
+ };
+ let (header, stream) = match res.into_parts() {
+ (Ok(BlockRpc::PutBlock { hash: _, header }), Some(stream)) => (header, stream),
+ _ => {
+ debug!("Node {:?} returned a malformed response", node);
+ continue;
+ }
+ };
+ match read_stream_to_end(stream).await {
+ Ok(bytes) => return Ok(DataBlock::from_parts(header, bytes)),
+ Err(e) => {
+ debug!("Error reading stream from node {:?}: {}", node, e);
+ }
+ }
+ }
+ _ = tokio::time::sleep(self.system.rpc.rpc_timeout()) => {
+ debug!("Node {:?} didn't return block in time, trying next.", node);
+ }
+ };
}
+
Err(Error::Message(format!(
- "Unable to read block {:?}: no valid blocks returned",
+ "Unable to read block {:?}: no node returned a valid block",
hash
)))
}
// ---- Public interface ----
+ /// Ask nodes that might have a block for it,
+ /// return it as a stream
+ pub async fn rpc_get_block_streaming(
+ &self,
+ hash: &Hash,
+ order_tag: Option<OrderTag>,
+ ) -> Result<
+ Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static>>,
+ Error,
+ > {
+ let (header, stream) = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
+ match header {
+ DataBlockHeader::Plain => Ok(stream),
+ DataBlockHeader::Compressed => {
+ // Too many things, I hate it.
+ let reader = stream_asyncread(stream);
+ let reader = BufReader::new(reader);
+ let reader = async_compression::tokio::bufread::ZstdDecoder::new(reader);
+ Ok(Box::pin(tokio_util::io::ReaderStream::new(reader)))
+ }
+ }
+ }
+
/// Ask nodes that might have a block for it
- pub async fn rpc_get_block(&self, hash: &Hash) -> Result<Vec<u8>, Error> {
- self.rpc_get_raw_block(hash).await?.verify_get(*hash)
+ pub async fn rpc_get_block(
+ &self,
+ hash: &Hash,
+ order_tag: Option<OrderTag>,
+ ) -> Result<Bytes, Error> {
+ self.rpc_get_raw_block(hash, order_tag)
+ .await?
+ .verify_get(*hash)
}
/// Send block to nodes that should have it
- pub async fn rpc_put_block(&self, hash: Hash, data: Vec<u8>) -> Result<(), Error> {
+ pub async fn rpc_put_block(&self, hash: Hash, data: Bytes) -> Result<(), Error> {
let who = self.replication.write_nodes(&hash);
- let data = DataBlock::from_buffer(data, self.compression_level);
+
+ let (header, bytes) = DataBlock::from_buffer(data, self.compression_level)
+ .await
+ .into_parts();
+ let put_block_rpc =
+ Req::new(BlockRpc::PutBlock { hash, header })?.with_stream_from_buffer(bytes);
+
self.system
.rpc
.try_call_many(
&self.endpoint,
&who[..],
- BlockRpc::PutBlock { hash, data },
- RequestStrategy::with_priority(PRIO_NORMAL)
- .with_quorum(self.replication.write_quorum())
- .with_timeout(BLOCK_RW_TIMEOUT),
+ put_block_rpc,
+ RequestStrategy::with_priority(PRIO_NORMAL | PRIO_SECONDARY)
+ .with_quorum(self.replication.write_quorum()),
)
.await?;
- Ok(())
- }
- /// Launch the repair procedure on the data store
- ///
- /// This will list all blocks locally present, as well as those
- /// that are required because of refcount > 0, and will try
- /// to fix any mismatch between the two.
- pub async fn repair_data_store(&self, must_exit: &watch::Receiver<bool>) -> Result<(), Error> {
- // 1. Repair blocks from RC table.
- for (i, entry) in self.rc.rc.iter().enumerate() {
- let (hash, _) = entry?;
- let hash = Hash::try_from(&hash[..]).unwrap();
- self.put_to_resync(&hash, Duration::from_secs(0))?;
- if i & 0xFF == 0 && *must_exit.borrow() {
- return Ok(());
- }
- }
-
- // 2. Repair blocks actually on disk
- // Lists all blocks on disk and adds them to the resync queue.
- // This allows us to find blocks we are storing but don't actually need,
- // so that we can offload them if necessary and then delete them locally.
- self.for_each_file(
- (),
- move |_, hash| async move {
- self.put_to_resync(&hash, Duration::from_secs(0))
- .map_err(Into::into)
- },
- must_exit,
- )
- .await
- }
-
- /// Verify integrity of each block on disk. Use `speed_limit` to limit the load generated by
- /// this function.
- pub async fn scrub_data_store(
- &self,
- must_exit: &watch::Receiver<bool>,
- tranquility: u32,
- ) -> Result<(), Error> {
- let tranquilizer = Tranquilizer::new(30);
- self.for_each_file(
- tranquilizer,
- move |mut tranquilizer, hash| async move {
- let _ = self.read_block(&hash).await;
- tranquilizer.tranquilize(tranquility).await;
- Ok(tranquilizer)
- },
- must_exit,
- )
- .await
- }
-
- /// Get lenght of resync queue
- pub fn resync_queue_len(&self) -> usize {
- self.resync_queue.len()
+ Ok(())
}
- /// Get number of blocks that have an error
- pub fn resync_errors_len(&self) -> usize {
- self.resync_errors.len()
+ /// Get number of items in the refcount table
+ pub fn rc_len(&self) -> Result<usize, Error> {
+ Ok(self.rc.rc.len()?)
}
- /// Get number of items in the refcount table
- pub fn rc_len(&self) -> usize {
- self.rc.rc.len()
+ /// Send command to start/stop/manager scrub worker
+ pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) {
+ let _ = self.tx_scrub_command.send(cmd).await;
}
//// ----- Managing the reference counter ----
/// Increment the number of time a block is used, putting it to resynchronization if it is
/// required, but not known
- pub fn block_incref(&self, hash: &Hash) -> Result<(), Error> {
- if self.rc.block_incref(hash)? {
+ pub fn block_incref(
+ self: &Arc<Self>,
+ tx: &mut db::Transaction,
+ hash: Hash,
+ ) -> db::TxOpResult<()> {
+ if self.rc.block_incref(tx, &hash)? {
// When the reference counter is incremented, there is
// normally a node that is responsible for sending us the
// data of the block. However that operation may fail,
// so in all cases we add the block here to the todo list
// to check later that it arrived correctly, and if not
// we will fecth it from someone.
- self.put_to_resync(hash, 2 * BLOCK_RW_TIMEOUT)?;
+ let this = self.clone();
+ tokio::spawn(async move {
+ if let Err(e) = this
+ .resync
+ .put_to_resync(&hash, 2 * this.system.rpc.rpc_timeout())
+ {
+ error!("Block {:?} could not be put in resync queue: {}.", hash, e);
+ }
+ });
}
Ok(())
}
/// Decrement the number of time a block is used
- pub fn block_decref(&self, hash: &Hash) -> Result<(), Error> {
- if self.rc.block_decref(hash)? {
+ pub fn block_decref(
+ self: &Arc<Self>,
+ tx: &mut db::Transaction,
+ hash: Hash,
+ ) -> db::TxOpResult<()> {
+ if self.rc.block_decref(tx, &hash)? {
// When the RC is decremented, it might drop to zero,
// indicating that we don't need the block.
// There is a delay before we garbage collect it;
// make sure that it is handled in the resync loop
// after that delay has passed.
- self.put_to_resync(hash, BLOCK_GC_DELAY + Duration::from_secs(10))?;
+ let this = self.clone();
+ tokio::spawn(async move {
+ if let Err(e) = this
+ .resync
+ .put_to_resync(&hash, BLOCK_GC_DELAY + Duration::from_secs(10))
+ {
+ error!("Block {:?} could not be put in resync queue: {}.", hash, e);
+ }
+ });
}
Ok(())
}
// ---- Reading and writing blocks locally ----
+ async fn handle_put_block(
+ &self,
+ hash: Hash,
+ header: DataBlockHeader,
+ stream: Option<ByteStream>,
+ ) -> Result<(), Error> {
+ let stream = stream.ok_or_message("missing stream")?;
+ let bytes = read_stream_to_end(stream).await?;
+ let data = DataBlock::from_parts(header, bytes);
+ self.write_block(&hash, &data).await
+ }
+
/// Write a block to disk
- async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<BlockRpc, Error> {
+ pub(crate) async fn write_block(&self, hash: &Hash, data: &DataBlock) -> Result<(), Error> {
+ let tracer = opentelemetry::global::tracer("garage");
+
let write_size = data.inner_buffer().len() as u64;
- let res = self
- .mutation_lock
- .lock()
+ self.lock_mutate(hash)
.await
.write_block(hash, data, self)
.bound_record_duration(&self.metrics.block_write_duration)
+ .with_context(Context::current_with_span(
+ tracer.start("BlockManagerLocked::write_block"),
+ ))
.await?;
self.metrics.bytes_written.add(write_size);
- Ok(res)
+ Ok(())
+ }
+
+ async fn handle_get_block(&self, hash: &Hash, order_tag: Option<OrderTag>) -> Resp<BlockRpc> {
+ let block = match self.read_block(hash).await {
+ Ok(data) => data,
+ Err(e) => return Resp::new(Err(e)),
+ };
+
+ let (header, data) = block.into_parts();
+
+ let resp = Resp::new(Ok(BlockRpc::PutBlock {
+ hash: *hash,
+ header,
+ }))
+ .with_stream_from_buffer(data);
+
+ if let Some(order_tag) = order_tag {
+ resp.with_order_tag(order_tag)
+ } else {
+ resp
+ }
}
/// Read block from disk, verifying it's integrity
- async fn read_block(&self, hash: &Hash) -> Result<BlockRpc, Error> {
+ pub(crate) async fn read_block(&self, hash: &Hash) -> Result<DataBlock, Error> {
let data = self
.read_block_internal(hash)
.bound_record_duration(&self.metrics.block_read_duration)
@@ -338,7 +434,7 @@ impl BlockManager {
.bytes_read
.add(data.inner_buffer().len() as u64);
- Ok(BlockRpc::PutBlock { hash: *hash, data })
+ Ok(data)
}
async fn read_block_internal(&self, hash: &Hash) -> Result<DataBlock, Error> {
@@ -347,7 +443,8 @@ impl BlockManager {
Ok(c) => c,
Err(e) => {
// Not found but maybe we should have had it ??
- self.put_to_resync(hash, 2 * BLOCK_RW_TIMEOUT)?;
+ self.resync
+ .put_to_resync(hash, 2 * self.system.rpc.rpc_timeout())?;
return Err(Into::into(e));
}
};
@@ -361,37 +458,47 @@ impl BlockManager {
drop(f);
let data = if compressed {
- DataBlock::Compressed(data)
+ DataBlock::Compressed(data.into())
} else {
- DataBlock::Plain(data)
+ DataBlock::Plain(data.into())
};
if data.verify(*hash).is_err() {
self.metrics.corruption_counter.add(1);
- self.mutation_lock
- .lock()
+ self.lock_mutate(hash)
.await
.move_block_to_corrupted(hash, self)
.await?;
- self.put_to_resync(hash, Duration::from_millis(0))?;
+ self.resync.put_to_resync(hash, Duration::from_millis(0))?;
return Err(Error::CorruptData(*hash));
}
Ok(data)
}
- /// Check if this node should have a block, but don't actually have it
- async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
- let BlockStatus { exists, needed } = self
- .mutation_lock
- .lock()
+ /// Check if this node has a block and whether it needs it
+ pub(crate) async fn check_block_status(&self, hash: &Hash) -> Result<BlockStatus, Error> {
+ self.lock_mutate(hash)
.await
.check_block_status(hash, self)
- .await?;
+ .await
+ }
+
+ /// Check if this node should have a block, but don't actually have it
+ async fn need_block(&self, hash: &Hash) -> Result<bool, Error> {
+ let BlockStatus { exists, needed } = self.check_block_status(hash).await?;
Ok(needed.is_nonzero() && !exists)
}
+ /// Delete block if it is not needed anymore
+ pub(crate) async fn delete_if_unneeded(&self, hash: &Hash) -> Result<(), Error> {
+ self.lock_mutate(hash)
+ .await
+ .delete_if_unneeded(hash, self)
+ .await
+ }
+
/// Utility: gives the path of the directory in which a block should be found
fn block_dir(&self, hash: &Hash) -> PathBuf {
let mut path = self.data_dir.clone();
@@ -419,431 +526,38 @@ impl BlockManager {
fs::metadata(&path).await.map(|_| false).map_err(Into::into)
}
- // ---- Resync loop ----
-
- // This part manages a queue of blocks that need to be
- // "resynchronized", i.e. that need to have a check that
- // they are at present if we need them, or that they are
- // deleted once the garbage collection delay has passed.
- //
- // Here are some explanations on how the resync queue works.
- // There are two Sled trees that are used to have information
- // about the status of blocks that need to be resynchronized:
- //
- // - resync_queue: a tree that is ordered first by a timestamp
- // (in milliseconds since Unix epoch) that is the time at which
- // the resync must be done, and second by block hash.
- // The key in this tree is just:
- // concat(timestamp (8 bytes), hash (32 bytes))
- // The value is the same 32-byte hash.
- //
- // - resync_errors: a tree that indicates for each block
- // if the last resync resulted in an error, and if so,
- // the following two informations (see the ErrorCounter struct):
- // - how many consecutive resync errors for this block?
- // - when was the last try?
- // These two informations are used to implement an
- // exponential backoff retry strategy.
- // The key in this tree is the 32-byte hash of the block,
- // and the value is the encoded ErrorCounter value.
- //
- // We need to have these two trees, because the resync queue
- // is not just a queue of items to process, but a set of items
- // that are waiting a specific delay until we can process them
- // (the delay being necessary both internally for the exponential
- // backoff strategy, and exposed as a parameter when adding items
- // to the queue, e.g. to wait until the GC delay has passed).
- // This is why we need one tree ordered by time, and one
- // ordered by identifier of item to be processed (block hash).
- //
- // When the worker wants to process an item it takes from
- // resync_queue, it checks in resync_errors that if there is an
- // exponential back-off delay to await, it has passed before we
- // process the item. If not, the item in the queue is skipped
- // (but added back for later processing after the time of the
- // delay).
- //
- // An alternative that would have seemed natural is to
- // only add items to resync_queue with a processing time that is
- // after the delay, but there are several issues with this:
- // - This requires to synchronize updates to resync_queue and
- // resync_errors (with the current model, there is only one thread,
- // the worker thread, that accesses resync_errors,
- // so no need to synchronize) by putting them both in a lock.
- // This would mean that block_incref might need to take a lock
- // before doing its thing, meaning it has much more chances of
- // not completing successfully if something bad happens to Garage.
- // Currently Garage is not able to recover from block_incref that
- // doesn't complete successfully, because it is necessary to ensure
- // the consistency between the state of the block manager and
- // information in the BlockRef table.
- // - If a resync fails, we put that block in the resync_errors table,
- // and also add it back to resync_queue to be processed after
- // the exponential back-off delay,
- // but maybe the block is already scheduled to be resynced again
- // at another time that is before the exponential back-off delay,
- // and we have no way to check that easily. This means that
- // in all cases, we need to check the resync_errors table
- // in the resync loop at the time when a block is popped from
- // the resync_queue.
- // Overall, the current design is therefore simpler and more robust
- // because it tolerates inconsistencies between the resync_queue
- // and resync_errors table (items being scheduled in resync_queue
- // for times that are earlier than the exponential back-off delay
- // is a natural condition that is handled properly).
-
- fn spawn_background_worker(self: Arc<Self>) {
- // Launch a background workers for background resync loop processing
- let background = self.system.background.clone();
- tokio::spawn(async move {
- tokio::time::sleep(Duration::from_secs(10)).await;
- background.spawn_worker("block resync worker".into(), move |must_exit| {
- self.resync_loop(must_exit)
- });
- });
- }
-
- fn put_to_resync(&self, hash: &Hash, delay: Duration) -> Result<(), sled::Error> {
- let when = now_msec() + delay.as_millis() as u64;
- self.put_to_resync_at(hash, when)
- }
-
- fn put_to_resync_at(&self, hash: &Hash, when: u64) -> Result<(), sled::Error> {
- trace!("Put resync_queue: {} {:?}", when, hash);
- let mut key = u64::to_be_bytes(when).to_vec();
- key.extend(hash.as_ref());
- self.resync_queue.insert(key, hash.as_ref())?;
- self.resync_notify.notify_waiters();
- Ok(())
- }
-
- async fn resync_loop(self: Arc<Self>, mut must_exit: watch::Receiver<bool>) {
- let mut tranquilizer = Tranquilizer::new(30);
-
- while !*must_exit.borrow() {
- match self.resync_iter(&mut must_exit).await {
- Ok(true) => {
- tranquilizer.tranquilize(self.background_tranquility).await;
- }
- Ok(false) => {
- tranquilizer.reset();
- }
- Err(e) => {
- // The errors that we have here are only Sled errors
- // We don't really know how to handle them so just ¯\_(ツ)_/¯
- // (there is kind of an assumption that Sled won't error on us,
- // if it does there is not much we can do -- TODO should we just panic?)
- error!(
- "Could not do a resync iteration: {} (this is a very bad error)",
- e
- );
- tranquilizer.reset();
- }
- }
- }
- }
-
- // The result of resync_iter is:
- // - Ok(true) -> a block was processed (successfully or not)
- // - Ok(false) -> no block was processed, but we are ready for the next iteration
- // - Err(_) -> a Sled error occurred when reading/writing from resync_queue/resync_errors
- async fn resync_iter(
- &self,
- must_exit: &mut watch::Receiver<bool>,
- ) -> Result<bool, sled::Error> {
- if let Some(first_pair_res) = self.resync_queue.iter().next() {
- let (time_bytes, hash_bytes) = first_pair_res?;
-
- let time_msec = u64::from_be_bytes(time_bytes[0..8].try_into().unwrap());
- let now = now_msec();
-
- if now >= time_msec {
- let hash = Hash::try_from(&hash_bytes[..]).unwrap();
-
- if let Some(ec) = self.resync_errors.get(hash.as_slice())? {
- let ec = ErrorCounter::decode(ec);
- if now < ec.next_try() {
- // if next retry after an error is not yet,
- // don't do resync and return early, but still
- // make sure the item is still in queue at expected time
- self.put_to_resync_at(&hash, ec.next_try())?;
- // ec.next_try() > now >= time_msec, so this remove
- // is not removing the one we added just above
- // (we want to do the remove after the insert to ensure
- // that the item is not lost if we crash in-between)
- self.resync_queue.remove(time_bytes)?;
- return Ok(false);
- }
- }
-
- let tracer = opentelemetry::global::tracer("garage");
- let trace_id = gen_uuid();
- let span = tracer
- .span_builder("Resync block")
- .with_trace_id(
- opentelemetry::trace::TraceId::from_hex(&hex::encode(
- &trace_id.as_slice()[..16],
- ))
- .unwrap(),
- )
- .with_attributes(vec![KeyValue::new("block", format!("{:?}", hash))])
- .start(&tracer);
-
- let res = self
- .resync_block(&hash)
- .with_context(Context::current_with_span(span))
- .bound_record_duration(&self.metrics.resync_duration)
- .await;
-
- self.metrics.resync_counter.add(1);
-
- if let Err(e) = &res {
- self.metrics.resync_error_counter.add(1);
- warn!("Error when resyncing {:?}: {}", hash, e);
-
- let err_counter = match self.resync_errors.get(hash.as_slice())? {
- Some(ec) => ErrorCounter::decode(ec).add1(now + 1),
- None => ErrorCounter::new(now + 1),
- };
-
- self.resync_errors
- .insert(hash.as_slice(), err_counter.encode())?;
-
- self.put_to_resync_at(&hash, err_counter.next_try())?;
- // err_counter.next_try() >= now + 1 > now,
- // the entry we remove from the queue is not
- // the entry we inserted with put_to_resync_at
- self.resync_queue.remove(time_bytes)?;
- } else {
- self.resync_errors.remove(hash.as_slice())?;
- self.resync_queue.remove(time_bytes)?;
- }
-
- Ok(true)
- } else {
- let delay = tokio::time::sleep(Duration::from_millis(time_msec - now));
- select! {
- _ = delay.fuse() => {},
- _ = self.resync_notify.notified().fuse() => {},
- _ = must_exit.changed().fuse() => {},
- }
- Ok(false)
- }
- } else {
- // Here we wait either for a notification that an item has been
- // added to the queue, or for a constant delay of 10 secs to expire.
- // The delay avoids a race condition where the notification happens
- // between the time we checked the queue and the first poll
- // to resync_notify.notified(): if that happens, we'll just loop
- // back 10 seconds later, which is fine.
- let delay = tokio::time::sleep(Duration::from_secs(10));
- select! {
- _ = delay.fuse() => {},
- _ = self.resync_notify.notified().fuse() => {},
- _ = must_exit.changed().fuse() => {},
- }
- Ok(false)
- }
- }
-
- async fn resync_block(&self, hash: &Hash) -> Result<(), Error> {
- let BlockStatus { exists, needed } = self
- .mutation_lock
+ async fn lock_mutate(&self, hash: &Hash) -> MutexGuard<'_, BlockManagerLocked> {
+ let tracer = opentelemetry::global::tracer("garage");
+ self.mutation_lock[hash.as_slice()[0] as usize]
.lock()
+ .with_context(Context::current_with_span(
+ tracer.start("Acquire mutation_lock"),
+ ))
.await
- .check_block_status(hash, self)
- .await?;
-
- if exists != needed.is_needed() || exists != needed.is_nonzero() {
- debug!(
- "Resync block {:?}: exists {}, nonzero rc {}, deletable {}",
- hash,
- exists,
- needed.is_nonzero(),
- needed.is_deletable(),
- );
- }
-
- if exists && needed.is_deletable() {
- info!("Resync block {:?}: offloading and deleting", hash);
-
- let mut who = self.replication.write_nodes(hash);
- if who.len() < self.replication.write_quorum() {
- return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
- }
- who.retain(|id| *id != self.system.id);
-
- let msg = Arc::new(BlockRpc::NeedBlockQuery(*hash));
- let who_needs_fut = who.iter().map(|to| {
- self.system.rpc.call_arc(
- &self.endpoint,
- *to,
- msg.clone(),
- RequestStrategy::with_priority(PRIO_BACKGROUND)
- .with_timeout(NEED_BLOCK_QUERY_TIMEOUT),
- )
- });
- let who_needs_resps = join_all(who_needs_fut).await;
-
- let mut need_nodes = vec![];
- for (node, needed) in who.iter().zip(who_needs_resps.into_iter()) {
- match needed.err_context("NeedBlockQuery RPC")? {
- BlockRpc::NeedBlockReply(needed) => {
- if needed {
- need_nodes.push(*node);
- }
- }
- m => {
- return Err(Error::unexpected_rpc_message(m));
- }
- }
- }
-
- if !need_nodes.is_empty() {
- trace!(
- "Block {:?} needed by {} nodes, sending",
- hash,
- need_nodes.len()
- );
-
- for node in need_nodes.iter() {
- self.metrics
- .resync_send_counter
- .add(1, &[KeyValue::new("to", format!("{:?}", node))]);
- }
-
- let put_block_message = self.read_block(hash).await?;
- self.system
- .rpc
- .try_call_many(
- &self.endpoint,
- &need_nodes[..],
- put_block_message,
- RequestStrategy::with_priority(PRIO_BACKGROUND)
- .with_quorum(need_nodes.len())
- .with_timeout(BLOCK_RW_TIMEOUT),
- )
- .await
- .err_context("PutBlock RPC")?;
- }
- info!(
- "Deleting unneeded block {:?}, offload finished ({} / {})",
- hash,
- need_nodes.len(),
- who.len()
- );
-
- self.mutation_lock
- .lock()
- .await
- .delete_if_unneeded(hash, self)
- .await?;
-
- self.rc.clear_deleted_block_rc(hash)?;
- }
-
- if needed.is_nonzero() && !exists {
- info!(
- "Resync block {:?}: fetching absent but needed block (refcount > 0)",
- hash
- );
-
- let block_data = self.rpc_get_raw_block(hash).await?;
-
- self.metrics.resync_recv_counter.add(1);
-
- self.write_block(hash, &block_data).await?;
- }
-
- Ok(())
- }
-
- // ---- Utility: iteration on files in the data directory ----
-
- async fn for_each_file<F, Fut, State>(
- &self,
- state: State,
- mut f: F,
- must_exit: &watch::Receiver<bool>,
- ) -> Result<(), Error>
- where
- F: FnMut(State, Hash) -> Fut + Send,
- Fut: Future<Output = Result<State, Error>> + Send,
- State: Send,
- {
- self.for_each_file_rec(&self.data_dir, state, &mut f, must_exit)
- .await
- .map(|_| ())
- }
-
- fn for_each_file_rec<'a, F, Fut, State>(
- &'a self,
- path: &'a Path,
- mut state: State,
- f: &'a mut F,
- must_exit: &'a watch::Receiver<bool>,
- ) -> BoxFuture<'a, Result<State, Error>>
- where
- F: FnMut(State, Hash) -> Fut + Send,
- Fut: Future<Output = Result<State, Error>> + Send,
- State: Send + 'a,
- {
- async move {
- let mut ls_data_dir = fs::read_dir(path).await?;
- while let Some(data_dir_ent) = ls_data_dir.next_entry().await? {
- if *must_exit.borrow() {
- break;
- }
-
- let name = data_dir_ent.file_name();
- let name = if let Ok(n) = name.into_string() {
- n
- } else {
- continue;
- };
- let ent_type = data_dir_ent.file_type().await?;
-
- let name = name.strip_suffix(".zst").unwrap_or(&name);
- if name.len() == 2 && hex::decode(&name).is_ok() && ent_type.is_dir() {
- state = self
- .for_each_file_rec(&data_dir_ent.path(), state, f, must_exit)
- .await?;
- } else if name.len() == 64 {
- let hash_bytes = if let Ok(h) = hex::decode(&name) {
- h
- } else {
- continue;
- };
- let mut hash = [0u8; 32];
- hash.copy_from_slice(&hash_bytes[..]);
- state = f(state, hash.into()).await?;
- }
- }
- Ok(state)
- }
- .boxed()
}
}
#[async_trait]
-impl EndpointHandler<BlockRpc> for BlockManager {
- async fn handle(
- self: &Arc<Self>,
- message: &BlockRpc,
- _from: NodeID,
- ) -> Result<BlockRpc, Error> {
- match message {
- BlockRpc::PutBlock { hash, data } => self.write_block(hash, data).await,
- BlockRpc::GetBlock(h) => self.read_block(h).await,
- BlockRpc::NeedBlockQuery(h) => self.need_block(h).await.map(BlockRpc::NeedBlockReply),
- m => Err(Error::unexpected_rpc_message(m)),
+impl StreamingEndpointHandler<BlockRpc> for BlockManager {
+ async fn handle(self: &Arc<Self>, mut message: Req<BlockRpc>, _from: NodeID) -> Resp<BlockRpc> {
+ match message.msg() {
+ BlockRpc::PutBlock { hash, header } => Resp::new(
+ self.handle_put_block(*hash, *header, message.take_stream())
+ .await
+ .map(|_| BlockRpc::Ok),
+ ),
+ BlockRpc::GetBlock(h, order_tag) => self.handle_get_block(h, *order_tag).await,
+ BlockRpc::NeedBlockQuery(h) => {
+ Resp::new(self.need_block(h).await.map(BlockRpc::NeedBlockReply))
+ }
+ m => Resp::new(Err(Error::unexpected_rpc_message(m))),
}
}
}
-struct BlockStatus {
- exists: bool,
- needed: RcEntry,
+pub(crate) struct BlockStatus {
+ pub(crate) exists: bool,
+ pub(crate) needed: RcEntry,
}
impl BlockManagerLocked {
@@ -863,7 +577,7 @@ impl BlockManagerLocked {
hash: &Hash,
data: &DataBlock,
mgr: &BlockManager,
- ) -> Result<BlockRpc, Error> {
+ ) -> Result<(), Error> {
let compressed = data.is_compressed();
let data = data.inner_buffer();
@@ -874,8 +588,8 @@ impl BlockManagerLocked {
fs::create_dir_all(&directory).await?;
let to_delete = match (mgr.is_block_compressed(hash).await, compressed) {
- (Ok(true), _) => return Ok(BlockRpc::Ok),
- (Ok(false), false) => return Ok(BlockRpc::Ok),
+ (Ok(true), _) => return Ok(()),
+ (Ok(false), false) => return Ok(()),
(Ok(false), true) => {
let path_to_delete = path.clone();
path.set_extension("zst");
@@ -914,7 +628,7 @@ impl BlockManagerLocked {
dir.sync_all().await?;
drop(dir);
- Ok(BlockRpc::Ok)
+ Ok(())
}
async fn move_block_to_corrupted(&self, hash: &Hash, mgr: &BlockManager) -> Result<(), Error> {
@@ -949,49 +663,16 @@ impl BlockManagerLocked {
}
}
-/// Counts the number of errors when resyncing a block,
-/// and the time of the last try.
-/// Used to implement exponential backoff.
-#[derive(Clone, Copy, Debug)]
-struct ErrorCounter {
- errors: u64,
- last_try: u64,
-}
-
-impl ErrorCounter {
- fn new(now: u64) -> Self {
- Self {
- errors: 1,
- last_try: now,
- }
+async fn read_stream_to_end(mut stream: ByteStream) -> Result<Bytes, Error> {
+ let mut parts: Vec<Bytes> = vec![];
+ while let Some(part) = stream.next().await {
+ parts.push(part.ok_or_message("error in stream")?);
}
- fn decode(data: sled::IVec) -> Self {
- Self {
- errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
- last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
- }
- }
- fn encode(&self) -> Vec<u8> {
- [
- u64::to_be_bytes(self.errors),
- u64::to_be_bytes(self.last_try),
- ]
+ Ok(parts
+ .iter()
+ .map(|x| &x[..])
+ .collect::<Vec<_>>()
.concat()
- }
-
- fn add1(self, now: u64) -> Self {
- Self {
- errors: self.errors + 1,
- last_try: now,
- }
- }
-
- fn delay_msec(&self) -> u64 {
- (RESYNC_RETRY_DELAY.as_millis() as u64)
- << std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
- }
- fn next_try(&self) -> u64 {
- self.last_try + self.delay_msec()
- }
+ .into())
}
diff --git a/src/block/metrics.rs b/src/block/metrics.rs
index f0f541a3..477add66 100644
--- a/src/block/metrics.rs
+++ b/src/block/metrics.rs
@@ -1,6 +1,6 @@
use opentelemetry::{global, metrics::*};
-use garage_util::sled_counter::SledCountedTree;
+use garage_db::counted_tree_hack::CountedTree;
/// TableMetrics reference all counter used for metrics
pub struct BlockManagerMetrics {
@@ -23,7 +23,7 @@ pub struct BlockManagerMetrics {
}
impl BlockManagerMetrics {
- pub fn new(resync_queue: SledCountedTree, resync_errors: SledCountedTree) -> Self {
+ pub fn new(resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
let meter = global::meter("garage_model/block");
Self {
_resync_queue_len: meter
diff --git a/src/block/rc.rs b/src/block/rc.rs
index ec3ea44e..ce6defad 100644
--- a/src/block/rc.rs
+++ b/src/block/rc.rs
@@ -1,5 +1,7 @@
use std::convert::TryInto;
+use garage_db as db;
+
use garage_util::data::*;
use garage_util::error::*;
use garage_util::time::*;
@@ -7,31 +9,41 @@ use garage_util::time::*;
use crate::manager::BLOCK_GC_DELAY;
pub struct BlockRc {
- pub(crate) rc: sled::Tree,
+ pub(crate) rc: db::Tree,
}
impl BlockRc {
- pub(crate) fn new(rc: sled::Tree) -> Self {
+ pub(crate) fn new(rc: db::Tree) -> Self {
Self { rc }
}
/// Increment the reference counter associated to a hash.
/// Returns true if the RC goes from zero to nonzero.
- pub(crate) fn block_incref(&self, hash: &Hash) -> Result<bool, Error> {
- let old_rc = self
- .rc
- .fetch_and_update(&hash, |old| RcEntry::parse_opt(old).increment().serialize())?;
- let old_rc = RcEntry::parse_opt(old_rc);
+ pub(crate) fn block_incref(
+ &self,
+ tx: &mut db::Transaction,
+ hash: &Hash,
+ ) -> db::TxOpResult<bool> {
+ let old_rc = RcEntry::parse_opt(tx.get(&self.rc, &hash)?);
+ match old_rc.increment().serialize() {
+ Some(x) => tx.insert(&self.rc, &hash, x)?,
+ None => unreachable!(),
+ };
Ok(old_rc.is_zero())
}
/// Decrement the reference counter associated to a hash.
/// Returns true if the RC is now zero.
- pub(crate) fn block_decref(&self, hash: &Hash) -> Result<bool, Error> {
- let new_rc = self
- .rc
- .update_and_fetch(&hash, |old| RcEntry::parse_opt(old).decrement().serialize())?;
- let new_rc = RcEntry::parse_opt(new_rc);
+ pub(crate) fn block_decref(
+ &self,
+ tx: &mut db::Transaction,
+ hash: &Hash,
+ ) -> db::TxOpResult<bool> {
+ let new_rc = RcEntry::parse_opt(tx.get(&self.rc, &hash)?).decrement();
+ match new_rc.serialize() {
+ Some(x) => tx.insert(&self.rc, &hash, x)?,
+ None => tx.remove(&self.rc, &hash)?,
+ };
Ok(matches!(new_rc, RcEntry::Deletable { .. }))
}
@@ -44,12 +56,15 @@ impl BlockRc {
/// deletion time has passed
pub(crate) fn clear_deleted_block_rc(&self, hash: &Hash) -> Result<(), Error> {
let now = now_msec();
- self.rc.update_and_fetch(&hash, |rcval| {
- let updated = match RcEntry::parse_opt(rcval) {
- RcEntry::Deletable { at_time } if now > at_time => RcEntry::Absent,
- v => v,
+ self.rc.db().transaction(|mut tx| {
+ let rcval = RcEntry::parse_opt(tx.get(&self.rc, &hash)?);
+ match rcval {
+ RcEntry::Deletable { at_time } if now > at_time => {
+ tx.remove(&self.rc, &hash)?;
+ }
+ _ => (),
};
- updated.serialize()
+ tx.commit(())
})?;
Ok(())
}
diff --git a/src/block/repair.rs b/src/block/repair.rs
new file mode 100644
index 00000000..e2884b69
--- /dev/null
+++ b/src/block/repair.rs
@@ -0,0 +1,466 @@
+use core::ops::Bound;
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::time::Duration;
+
+use async_trait::async_trait;
+use serde::{Deserialize, Serialize};
+use tokio::fs;
+use tokio::select;
+use tokio::sync::mpsc;
+use tokio::sync::watch;
+
+use garage_util::background::*;
+use garage_util::data::*;
+use garage_util::error::*;
+use garage_util::persister::Persister;
+use garage_util::time::*;
+use garage_util::tranquilizer::Tranquilizer;
+
+use crate::manager::*;
+
+// Full scrub every 30 days
+const SCRUB_INTERVAL: Duration = Duration::from_secs(3600 * 24 * 30);
+// Scrub tranquility is initially set to 4, but can be changed in the CLI
+// and the updated version is persisted over Garage restarts
+const INITIAL_SCRUB_TRANQUILITY: u32 = 4;
+
+// ---- ---- ----
+// FIRST KIND OF REPAIR: FINDING MISSING BLOCKS/USELESS BLOCKS
+// This is a one-shot repair operation that can be launched,
+// checks everything, and then exits.
+// ---- ---- ----
+
+pub struct RepairWorker {
+ manager: Arc<BlockManager>,
+ next_start: Option<Hash>,
+ block_iter: Option<BlockStoreIterator>,
+}
+
+impl RepairWorker {
+ pub fn new(manager: Arc<BlockManager>) -> Self {
+ Self {
+ manager,
+ next_start: None,
+ block_iter: None,
+ }
+ }
+}
+
+#[async_trait]
+impl Worker for RepairWorker {
+ fn name(&self) -> String {
+ "Block repair worker".into()
+ }
+
+ fn info(&self) -> Option<String> {
+ match self.block_iter.as_ref() {
+ None => {
+ let idx_bytes = self
+ .next_start
+ .as_ref()
+ .map(|x| x.as_slice())
+ .unwrap_or(&[]);
+ let idx_bytes = if idx_bytes.len() > 4 {
+ &idx_bytes[..4]
+ } else {
+ idx_bytes
+ };
+ Some(format!("Phase 1: {}", hex::encode(idx_bytes)))
+ }
+ Some(bi) => Some(format!("Phase 2: {:.2}% done", bi.progress() * 100.)),
+ }
+ }
+
+ async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
+ match self.block_iter.as_mut() {
+ None => {
+ // Phase 1: Repair blocks from RC table.
+
+ // We have to do this complicated two-step process where we first read a bunch
+ // of hashes from the RC table, and then insert them in the to-resync queue,
+ // because of SQLite. Basically, as long as we have an iterator on a DB table,
+ // we can't do anything else on the DB. The naive approach (which we had previously)
+ // of just iterating on the RC table and inserting items one to one in the resync
+ // queue can't work here, it would just provoke a deadlock in the SQLite adapter code.
+ // This is mostly because the Rust bindings for SQLite assume a worst-case scenario
+ // where SQLite is not compiled in thread-safe mode, so we have to wrap everything
+ // in a mutex (see db/sqlite_adapter.rs and discussion in PR #322).
+ // TODO: maybe do this with tokio::task::spawn_blocking ?
+ let mut batch_of_hashes = vec![];
+ let start_bound = match self.next_start.as_ref() {
+ None => Bound::Unbounded,
+ Some(x) => Bound::Excluded(x.as_slice()),
+ };
+ for entry in self
+ .manager
+ .rc
+ .rc
+ .range::<&[u8], _>((start_bound, Bound::Unbounded))?
+ {
+ let (hash, _) = entry?;
+ let hash = Hash::try_from(&hash[..]).unwrap();
+ batch_of_hashes.push(hash);
+ if batch_of_hashes.len() >= 1000 {
+ break;
+ }
+ }
+ if batch_of_hashes.is_empty() {
+ // move on to phase 2
+ self.block_iter = Some(BlockStoreIterator::new(&self.manager));
+ return Ok(WorkerState::Busy);
+ }
+
+ for hash in batch_of_hashes.into_iter() {
+ self.manager
+ .resync
+ .put_to_resync(&hash, Duration::from_secs(0))?;
+ self.next_start = Some(hash)
+ }
+
+ Ok(WorkerState::Busy)
+ }
+ Some(bi) => {
+ // Phase 2: Repair blocks actually on disk
+ // Lists all blocks on disk and adds them to the resync queue.
+ // This allows us to find blocks we are storing but don't actually need,
+ // so that we can offload them if necessary and then delete them locally.
+ if let Some(hash) = bi.next().await? {
+ self.manager
+ .resync
+ .put_to_resync(&hash, Duration::from_secs(0))?;
+ Ok(WorkerState::Busy)
+ } else {
+ Ok(WorkerState::Done)
+ }
+ }
+ }
+ }
+
+ async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
+ unreachable!()
+ }
+}
+
+// ---- ---- ----
+// SECOND KIND OF REPAIR: SCRUBBING THE DATASTORE
+// This is significantly more complex than the process above,
+// as it is a continuously-running task that triggers automatically
+// every SCRUB_INTERVAL, but can also be triggered manually
+// and whose parameter (esp. speed) can be controlled at runtime.
+// ---- ---- ----
+
+pub struct ScrubWorker {
+ manager: Arc<BlockManager>,
+ rx_cmd: mpsc::Receiver<ScrubWorkerCommand>,
+
+ work: ScrubWorkerState,
+ tranquilizer: Tranquilizer,
+
+ persister: Persister<ScrubWorkerPersisted>,
+ persisted: ScrubWorkerPersisted,
+}
+
+#[derive(Serialize, Deserialize)]
+struct ScrubWorkerPersisted {
+ tranquility: u32,
+ time_last_complete_scrub: u64,
+ corruptions_detected: u64,
+}
+
+enum ScrubWorkerState {
+ Running(BlockStoreIterator),
+ Paused(BlockStoreIterator, u64), // u64 = time when to resume scrub
+ Finished,
+}
+
+impl Default for ScrubWorkerState {
+ fn default() -> Self {
+ ScrubWorkerState::Finished
+ }
+}
+
+#[derive(Debug)]
+pub enum ScrubWorkerCommand {
+ Start,
+ Pause(Duration),
+ Resume,
+ Cancel,
+ SetTranquility(u32),
+}
+
+impl ScrubWorker {
+ pub fn new(manager: Arc<BlockManager>, rx_cmd: mpsc::Receiver<ScrubWorkerCommand>) -> Self {
+ let persister = Persister::new(&manager.system.metadata_dir, "scrub_info");
+ let persisted = match persister.load() {
+ Ok(v) => v,
+ Err(_) => ScrubWorkerPersisted {
+ time_last_complete_scrub: 0,
+ tranquility: INITIAL_SCRUB_TRANQUILITY,
+ corruptions_detected: 0,
+ },
+ };
+ Self {
+ manager,
+ rx_cmd,
+ work: ScrubWorkerState::Finished,
+ tranquilizer: Tranquilizer::new(30),
+ persister,
+ persisted,
+ }
+ }
+
+ async fn handle_cmd(&mut self, cmd: ScrubWorkerCommand) {
+ match cmd {
+ ScrubWorkerCommand::Start => {
+ self.work = match std::mem::take(&mut self.work) {
+ ScrubWorkerState::Finished => {
+ let iterator = BlockStoreIterator::new(&self.manager);
+ ScrubWorkerState::Running(iterator)
+ }
+ work => {
+ error!("Cannot start scrub worker: already running!");
+ work
+ }
+ };
+ }
+ ScrubWorkerCommand::Pause(dur) => {
+ self.work = match std::mem::take(&mut self.work) {
+ ScrubWorkerState::Running(it) | ScrubWorkerState::Paused(it, _) => {
+ ScrubWorkerState::Paused(it, now_msec() + dur.as_millis() as u64)
+ }
+ work => {
+ error!("Cannot pause scrub worker: not running!");
+ work
+ }
+ };
+ }
+ ScrubWorkerCommand::Resume => {
+ self.work = match std::mem::take(&mut self.work) {
+ ScrubWorkerState::Paused(it, _) => ScrubWorkerState::Running(it),
+ work => {
+ error!("Cannot resume scrub worker: not paused!");
+ work
+ }
+ };
+ }
+ ScrubWorkerCommand::Cancel => {
+ self.work = match std::mem::take(&mut self.work) {
+ ScrubWorkerState::Running(_) | ScrubWorkerState::Paused(_, _) => {
+ ScrubWorkerState::Finished
+ }
+ work => {
+ error!("Cannot cancel scrub worker: not running!");
+ work
+ }
+ }
+ }
+ ScrubWorkerCommand::SetTranquility(t) => {
+ self.persisted.tranquility = t;
+ if let Err(e) = self.persister.save_async(&self.persisted).await {
+ error!("Could not save new tranquilitiy value: {}", e);
+ }
+ }
+ }
+ }
+}
+
+#[async_trait]
+impl Worker for ScrubWorker {
+ fn name(&self) -> String {
+ "Block scrub worker".into()
+ }
+
+ fn info(&self) -> Option<String> {
+ let s = match &self.work {
+ ScrubWorkerState::Running(bsi) => format!(
+ "{:.2}% done (tranquility = {})",
+ bsi.progress() * 100.,
+ self.persisted.tranquility
+ ),
+ ScrubWorkerState::Paused(bsi, rt) => {
+ format!(
+ "Paused, {:.2}% done, resumes at {}",
+ bsi.progress() * 100.,
+ msec_to_rfc3339(*rt)
+ )
+ }
+ ScrubWorkerState::Finished => format!(
+ "Last completed scrub: {}",
+ msec_to_rfc3339(self.persisted.time_last_complete_scrub)
+ ),
+ };
+ Some(format!(
+ "{} ; corruptions detected: {}",
+ s, self.persisted.corruptions_detected
+ ))
+ }
+
+ async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
+ match self.rx_cmd.try_recv() {
+ Ok(cmd) => self.handle_cmd(cmd).await,
+ Err(mpsc::error::TryRecvError::Disconnected) => return Ok(WorkerState::Done),
+ Err(mpsc::error::TryRecvError::Empty) => (),
+ };
+
+ match &mut self.work {
+ ScrubWorkerState::Running(bsi) => {
+ self.tranquilizer.reset();
+ if let Some(hash) = bsi.next().await? {
+ match self.manager.read_block(&hash).await {
+ Err(Error::CorruptData(_)) => {
+ error!("Found corrupt data block during scrub: {:?}", hash);
+ self.persisted.corruptions_detected += 1;
+ self.persister.save_async(&self.persisted).await?;
+ }
+ Err(e) => return Err(e),
+ _ => (),
+ };
+ Ok(self
+ .tranquilizer
+ .tranquilize_worker(self.persisted.tranquility))
+ } else {
+ self.persisted.time_last_complete_scrub = now_msec();
+ self.persister.save_async(&self.persisted).await?;
+ self.work = ScrubWorkerState::Finished;
+ self.tranquilizer.clear();
+ Ok(WorkerState::Idle)
+ }
+ }
+ _ => Ok(WorkerState::Idle),
+ }
+ }
+
+ async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
+ let (wait_until, command) = match &self.work {
+ ScrubWorkerState::Running(_) => return WorkerState::Busy,
+ ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),
+ ScrubWorkerState::Finished => (
+ self.persisted.time_last_complete_scrub + SCRUB_INTERVAL.as_millis() as u64,
+ ScrubWorkerCommand::Start,
+ ),
+ };
+
+ let now = now_msec();
+ if now >= wait_until {
+ self.handle_cmd(command).await;
+ return WorkerState::Busy;
+ }
+ let delay = Duration::from_millis(wait_until - now);
+ select! {
+ _ = tokio::time::sleep(delay) => self.handle_cmd(command).await,
+ cmd = self.rx_cmd.recv() => if let Some(cmd) = cmd {
+ self.handle_cmd(cmd).await;
+ } else {
+ return WorkerState::Done;
+ }
+ }
+
+ match &self.work {
+ ScrubWorkerState::Running(_) => WorkerState::Busy,
+ _ => WorkerState::Idle,
+ }
+ }
+}
+
+// ---- ---- ----
+// UTILITY FOR ENUMERATING THE BLOCK STORE
+// ---- ---- ----
+
+struct BlockStoreIterator {
+ path: Vec<ReadingDir>,
+}
+
+enum ReadingDir {
+ Pending(PathBuf),
+ Read {
+ subpaths: Vec<fs::DirEntry>,
+ pos: usize,
+ },
+}
+
+impl BlockStoreIterator {
+ fn new(manager: &BlockManager) -> Self {
+ let root_dir = manager.data_dir.clone();
+ Self {
+ path: vec![ReadingDir::Pending(root_dir)],
+ }
+ }
+
+ /// Returns progress done, between 0 and 1
+ fn progress(&self) -> f32 {
+ if self.path.is_empty() {
+ 1.0
+ } else {
+ let mut ret = 0.0;
+ let mut next_div = 1;
+ for p in self.path.iter() {
+ match p {
+ ReadingDir::Pending(_) => break,
+ ReadingDir::Read { subpaths, pos } => {
+ next_div *= subpaths.len();
+ ret += ((*pos - 1) as f32) / (next_div as f32);
+ }
+ }
+ }
+ ret
+ }
+ }
+
+ async fn next(&mut self) -> Result<Option<Hash>, Error> {
+ loop {
+ let last_path = match self.path.last_mut() {
+ None => return Ok(None),
+ Some(lp) => lp,
+ };
+
+ if let ReadingDir::Pending(path) = last_path {
+ let mut reader = fs::read_dir(&path).await?;
+ let mut subpaths = vec![];
+ while let Some(ent) = reader.next_entry().await? {
+ subpaths.push(ent);
+ }
+ *last_path = ReadingDir::Read { subpaths, pos: 0 };
+ }
+
+ let (subpaths, pos) = match *last_path {
+ ReadingDir::Read {
+ ref subpaths,
+ ref mut pos,
+ } => (subpaths, pos),
+ ReadingDir::Pending(_) => unreachable!(),
+ };
+
+ let data_dir_ent = match subpaths.get(*pos) {
+ None => {
+ self.path.pop();
+ continue;
+ }
+ Some(ent) => {
+ *pos += 1;
+ ent
+ }
+ };
+
+ let name = data_dir_ent.file_name();
+ let name = if let Ok(n) = name.into_string() {
+ n
+ } else {
+ continue;
+ };
+ let ent_type = data_dir_ent.file_type().await?;
+
+ let name = name.strip_suffix(".zst").unwrap_or(&name);
+ if name.len() == 2 && hex::decode(&name).is_ok() && ent_type.is_dir() {
+ let path = data_dir_ent.path();
+ self.path.push(ReadingDir::Pending(path));
+ } else if name.len() == 64 {
+ if let Ok(h) = hex::decode(&name) {
+ let mut hash = [0u8; 32];
+ hash.copy_from_slice(&h);
+ return Ok(Some(hash.into()));
+ }
+ }
+ }
+ }
+}
diff --git a/src/block/resync.rs b/src/block/resync.rs
new file mode 100644
index 00000000..ada3ac54
--- /dev/null
+++ b/src/block/resync.rs
@@ -0,0 +1,589 @@
+use std::collections::HashSet;
+use std::convert::TryInto;
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+use arc_swap::ArcSwap;
+use async_trait::async_trait;
+use serde::{Deserialize, Serialize};
+
+use tokio::select;
+use tokio::sync::{watch, Notify};
+
+use opentelemetry::{
+ trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
+ Context, KeyValue,
+};
+
+use garage_db as db;
+use garage_db::counted_tree_hack::CountedTree;
+
+use garage_util::background::*;
+use garage_util::data::*;
+use garage_util::error::*;
+use garage_util::metrics::RecordDuration;
+use garage_util::persister::Persister;
+use garage_util::time::*;
+use garage_util::tranquilizer::Tranquilizer;
+
+use garage_rpc::system::System;
+use garage_rpc::*;
+
+use garage_table::replication::TableReplication;
+
+use crate::manager::*;
+
+// The delay between the time where a resync operation fails
+// and the time when it is retried, with exponential backoff
+// (multiplied by 2, 4, 8, 16, etc. for every consecutive failure).
+pub(crate) const RESYNC_RETRY_DELAY: Duration = Duration::from_secs(60);
+// The minimum retry delay is 60 seconds = 1 minute
+// The maximum retry delay is 60 seconds * 2^6 = 60 seconds << 6 = 64 minutes (~1 hour)
+pub(crate) const RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER: u64 = 6;
+
+// No more than 4 resync workers can be running in the system
+pub(crate) const MAX_RESYNC_WORKERS: usize = 4;
+// Resync tranquility is initially set to 2, but can be changed in the CLI
+// and the updated version is persisted over Garage restarts
+const INITIAL_RESYNC_TRANQUILITY: u32 = 2;
+
+pub struct BlockResyncManager {
+ pub(crate) queue: CountedTree,
+ pub(crate) notify: Notify,
+ pub(crate) errors: CountedTree,
+
+ busy_set: BusySet,
+
+ persister: Persister<ResyncPersistedConfig>,
+ persisted: ArcSwap<ResyncPersistedConfig>,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy)]
+struct ResyncPersistedConfig {
+ n_workers: usize,
+ tranquility: u32,
+}
+
+enum ResyncIterResult {
+ BusyDidSomething,
+ BusyDidNothing,
+ IdleFor(Duration),
+}
+
+type BusySet = Arc<Mutex<HashSet<Vec<u8>>>>;
+
+struct BusyBlock {
+ time_bytes: Vec<u8>,
+ hash_bytes: Vec<u8>,
+ busy_set: BusySet,
+}
+
+impl BlockResyncManager {
+ pub(crate) fn new(db: &db::Db, system: &System) -> Self {
+ let queue = db
+ .open_tree("block_local_resync_queue")
+ .expect("Unable to open block_local_resync_queue tree");
+ let queue = CountedTree::new(queue).expect("Could not count block_local_resync_queue");
+
+ let errors = db
+ .open_tree("block_local_resync_errors")
+ .expect("Unable to open block_local_resync_errors tree");
+ let errors = CountedTree::new(errors).expect("Could not count block_local_resync_errors");
+
+ let persister = Persister::new(&system.metadata_dir, "resync_cfg");
+ let persisted = match persister.load() {
+ Ok(v) => v,
+ Err(_) => ResyncPersistedConfig {
+ n_workers: 1,
+ tranquility: INITIAL_RESYNC_TRANQUILITY,
+ },
+ };
+
+ Self {
+ queue,
+ notify: Notify::new(),
+ errors,
+ busy_set: Arc::new(Mutex::new(HashSet::new())),
+ persister,
+ persisted: ArcSwap::new(Arc::new(persisted)),
+ }
+ }
+
+ /// Get lenght of resync queue
+ pub fn queue_len(&self) -> Result<usize, Error> {
+ // This currently can't return an error because the CountedTree hack
+ // doesn't error on .len(), but this will change when we remove the hack
+ // (hopefully someday!)
+ Ok(self.queue.len())
+ }
+
+ /// Get number of blocks that have an error
+ pub fn errors_len(&self) -> Result<usize, Error> {
+ // (see queue_len comment)
+ Ok(self.errors.len())
+ }
+
+ // ---- Resync loop ----
+
+ // This part manages a queue of blocks that need to be
+ // "resynchronized", i.e. that need to have a check that
+ // they are at present if we need them, or that they are
+ // deleted once the garbage collection delay has passed.
+ //
+ // Here are some explanations on how the resync queue works.
+ // There are two Sled trees that are used to have information
+ // about the status of blocks that need to be resynchronized:
+ //
+ // - resync.queue: a tree that is ordered first by a timestamp
+ // (in milliseconds since Unix epoch) that is the time at which
+ // the resync must be done, and second by block hash.
+ // The key in this tree is just:
+ // concat(timestamp (8 bytes), hash (32 bytes))
+ // The value is the same 32-byte hash.
+ //
+ // - resync.errors: a tree that indicates for each block
+ // if the last resync resulted in an error, and if so,
+ // the following two informations (see the ErrorCounter struct):
+ // - how many consecutive resync errors for this block?
+ // - when was the last try?
+ // These two informations are used to implement an
+ // exponential backoff retry strategy.
+ // The key in this tree is the 32-byte hash of the block,
+ // and the value is the encoded ErrorCounter value.
+ //
+ // We need to have these two trees, because the resync queue
+ // is not just a queue of items to process, but a set of items
+ // that are waiting a specific delay until we can process them
+ // (the delay being necessary both internally for the exponential
+ // backoff strategy, and exposed as a parameter when adding items
+ // to the queue, e.g. to wait until the GC delay has passed).
+ // This is why we need one tree ordered by time, and one
+ // ordered by identifier of item to be processed (block hash).
+ //
+ // When the worker wants to process an item it takes from
+ // resync.queue, it checks in resync.errors that if there is an
+ // exponential back-off delay to await, it has passed before we
+ // process the item. If not, the item in the queue is skipped
+ // (but added back for later processing after the time of the
+ // delay).
+ //
+ // An alternative that would have seemed natural is to
+ // only add items to resync.queue with a processing time that is
+ // after the delay, but there are several issues with this:
+ // - This requires to synchronize updates to resync.queue and
+ // resync.errors (with the current model, there is only one thread,
+ // the worker thread, that accesses resync.errors,
+ // so no need to synchronize) by putting them both in a lock.
+ // This would mean that block_incref might need to take a lock
+ // before doing its thing, meaning it has much more chances of
+ // not completing successfully if something bad happens to Garage.
+ // Currently Garage is not able to recover from block_incref that
+ // doesn't complete successfully, because it is necessary to ensure
+ // the consistency between the state of the block manager and
+ // information in the BlockRef table.
+ // - If a resync fails, we put that block in the resync.errors table,
+ // and also add it back to resync.queue to be processed after
+ // the exponential back-off delay,
+ // but maybe the block is already scheduled to be resynced again
+ // at another time that is before the exponential back-off delay,
+ // and we have no way to check that easily. This means that
+ // in all cases, we need to check the resync.errors table
+ // in the resync loop at the time when a block is popped from
+ // the resync.queue.
+ // Overall, the current design is therefore simpler and more robust
+ // because it tolerates inconsistencies between the resync.queue
+ // and resync.errors table (items being scheduled in resync.queue
+ // for times that are earlier than the exponential back-off delay
+ // is a natural condition that is handled properly).
+
+ pub(crate) fn put_to_resync(&self, hash: &Hash, delay: Duration) -> db::Result<()> {
+ let when = now_msec() + delay.as_millis() as u64;
+ self.put_to_resync_at(hash, when)
+ }
+
+ pub(crate) fn put_to_resync_at(&self, hash: &Hash, when: u64) -> db::Result<()> {
+ trace!("Put resync_queue: {} {:?}", when, hash);
+ let mut key = u64::to_be_bytes(when).to_vec();
+ key.extend(hash.as_ref());
+ self.queue.insert(key, hash.as_ref())?;
+ self.notify.notify_waiters();
+ Ok(())
+ }
+
+ async fn resync_iter(&self, manager: &BlockManager) -> Result<ResyncIterResult, db::Error> {
+ if let Some(block) = self.get_block_to_resync()? {
+ let time_msec = u64::from_be_bytes(block.time_bytes[0..8].try_into().unwrap());
+ let now = now_msec();
+
+ if now >= time_msec {
+ let hash = Hash::try_from(&block.hash_bytes[..]).unwrap();
+
+ if let Some(ec) = self.errors.get(hash.as_slice())? {
+ let ec = ErrorCounter::decode(&ec);
+ if now < ec.next_try() {
+ // if next retry after an error is not yet,
+ // don't do resync and return early, but still
+ // make sure the item is still in queue at expected time
+ self.put_to_resync_at(&hash, ec.next_try())?;
+ // ec.next_try() > now >= time_msec, so this remove
+ // is not removing the one we added just above
+ // (we want to do the remove after the insert to ensure
+ // that the item is not lost if we crash in-between)
+ self.queue.remove(&block.time_bytes)?;
+ return Ok(ResyncIterResult::BusyDidNothing);
+ }
+ }
+
+ let tracer = opentelemetry::global::tracer("garage");
+ let trace_id = gen_uuid();
+ let span = tracer
+ .span_builder("Resync block")
+ .with_trace_id(
+ opentelemetry::trace::TraceId::from_hex(&hex::encode(
+ &trace_id.as_slice()[..16],
+ ))
+ .unwrap(),
+ )
+ .with_attributes(vec![KeyValue::new("block", format!("{:?}", hash))])
+ .start(&tracer);
+
+ let res = self
+ .resync_block(manager, &hash)
+ .with_context(Context::current_with_span(span))
+ .bound_record_duration(&manager.metrics.resync_duration)
+ .await;
+
+ manager.metrics.resync_counter.add(1);
+
+ if let Err(e) = &res {
+ manager.metrics.resync_error_counter.add(1);
+ warn!("Error when resyncing {:?}: {}", hash, e);
+
+ let err_counter = match self.errors.get(hash.as_slice())? {
+ Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
+ None => ErrorCounter::new(now + 1),
+ };
+
+ self.errors.insert(hash.as_slice(), err_counter.encode())?;
+
+ self.put_to_resync_at(&hash, err_counter.next_try())?;
+ // err_counter.next_try() >= now + 1 > now,
+ // the entry we remove from the queue is not
+ // the entry we inserted with put_to_resync_at
+ self.queue.remove(&block.time_bytes)?;
+ } else {
+ self.errors.remove(hash.as_slice())?;
+ self.queue.remove(&block.time_bytes)?;
+ }
+
+ Ok(ResyncIterResult::BusyDidSomething)
+ } else {
+ Ok(ResyncIterResult::IdleFor(Duration::from_millis(
+ time_msec - now,
+ )))
+ }
+ } else {
+ // Here we wait either for a notification that an item has been
+ // added to the queue, or for a constant delay of 10 secs to expire.
+ // The delay avoids a race condition where the notification happens
+ // between the time we checked the queue and the first poll
+ // to resync_notify.notified(): if that happens, we'll just loop
+ // back 10 seconds later, which is fine.
+ Ok(ResyncIterResult::IdleFor(Duration::from_secs(10)))
+ }
+ }
+
+ fn get_block_to_resync(&self) -> Result<Option<BusyBlock>, db::Error> {
+ let mut busy = self.busy_set.lock().unwrap();
+ for it in self.queue.iter()? {
+ let (time_bytes, hash_bytes) = it?;
+ if !busy.contains(&time_bytes) {
+ busy.insert(time_bytes.clone());
+ return Ok(Some(BusyBlock {
+ time_bytes,
+ hash_bytes,
+ busy_set: self.busy_set.clone(),
+ }));
+ }
+ }
+ Ok(None)
+ }
+
+ async fn resync_block(&self, manager: &BlockManager, hash: &Hash) -> Result<(), Error> {
+ let BlockStatus { exists, needed } = manager.check_block_status(hash).await?;
+
+ if exists != needed.is_needed() || exists != needed.is_nonzero() {
+ debug!(
+ "Resync block {:?}: exists {}, nonzero rc {}, deletable {}",
+ hash,
+ exists,
+ needed.is_nonzero(),
+ needed.is_deletable(),
+ );
+ }
+
+ if exists && needed.is_deletable() {
+ info!("Resync block {:?}: offloading and deleting", hash);
+
+ let mut who = manager.replication.write_nodes(hash);
+ if who.len() < manager.replication.write_quorum() {
+ return Err(Error::Message("Not trying to offload block because we don't have a quorum of nodes to write to".to_string()));
+ }
+ who.retain(|id| *id != manager.system.id);
+
+ let who_needs_resps = manager
+ .system
+ .rpc
+ .call_many(
+ &manager.endpoint,
+ &who,
+ BlockRpc::NeedBlockQuery(*hash),
+ RequestStrategy::with_priority(PRIO_BACKGROUND),
+ )
+ .await?;
+
+ let mut need_nodes = vec![];
+ for (node, needed) in who_needs_resps {
+ match needed.err_context("NeedBlockQuery RPC")? {
+ BlockRpc::NeedBlockReply(needed) => {
+ if needed {
+ need_nodes.push(node);
+ }
+ }
+ m => {
+ return Err(Error::unexpected_rpc_message(m));
+ }
+ }
+ }
+
+ if !need_nodes.is_empty() {
+ trace!(
+ "Block {:?} needed by {} nodes, sending",
+ hash,
+ need_nodes.len()
+ );
+
+ for node in need_nodes.iter() {
+ manager
+ .metrics
+ .resync_send_counter
+ .add(1, &[KeyValue::new("to", format!("{:?}", node))]);
+ }
+
+ let block = manager.read_block(hash).await?;
+ let (header, bytes) = block.into_parts();
+ let put_block_message = Req::new(BlockRpc::PutBlock {
+ hash: *hash,
+ header,
+ })?
+ .with_stream_from_buffer(bytes);
+ manager
+ .system
+ .rpc
+ .try_call_many(
+ &manager.endpoint,
+ &need_nodes[..],
+ put_block_message,
+ RequestStrategy::with_priority(PRIO_BACKGROUND)
+ .with_quorum(need_nodes.len()),
+ )
+ .await
+ .err_context("PutBlock RPC")?;
+ }
+ info!(
+ "Deleting unneeded block {:?}, offload finished ({} / {})",
+ hash,
+ need_nodes.len(),
+ who.len()
+ );
+
+ manager.delete_if_unneeded(hash).await?;
+
+ manager.rc.clear_deleted_block_rc(hash)?;
+ }
+
+ if needed.is_nonzero() && !exists {
+ info!(
+ "Resync block {:?}: fetching absent but needed block (refcount > 0)",
+ hash
+ );
+
+ let block_data = manager.rpc_get_raw_block(hash, None).await?;
+
+ manager.metrics.resync_recv_counter.add(1);
+
+ manager.write_block(hash, &block_data).await?;
+ }
+
+ Ok(())
+ }
+
+ async fn update_persisted(
+ &self,
+ update: impl Fn(&mut ResyncPersistedConfig),
+ ) -> Result<(), Error> {
+ let mut cfg: ResyncPersistedConfig = *self.persisted.load().as_ref();
+ update(&mut cfg);
+ self.persister.save_async(&cfg).await?;
+ self.persisted.store(Arc::new(cfg));
+ self.notify.notify_waiters();
+ Ok(())
+ }
+
+ pub async fn set_n_workers(&self, n_workers: usize) -> Result<(), Error> {
+ if !(1..=MAX_RESYNC_WORKERS).contains(&n_workers) {
+ return Err(Error::Message(format!(
+ "Invalid number of resync workers, must be between 1 and {}",
+ MAX_RESYNC_WORKERS
+ )));
+ }
+ self.update_persisted(|cfg| cfg.n_workers = n_workers).await
+ }
+
+ pub async fn set_tranquility(&self, tranquility: u32) -> Result<(), Error> {
+ self.update_persisted(|cfg| cfg.tranquility = tranquility)
+ .await
+ }
+}
+
+impl Drop for BusyBlock {
+ fn drop(&mut self) {
+ let mut busy = self.busy_set.lock().unwrap();
+ busy.remove(&self.time_bytes);
+ }
+}
+
+pub(crate) struct ResyncWorker {
+ index: usize,
+ manager: Arc<BlockManager>,
+ tranquilizer: Tranquilizer,
+ next_delay: Duration,
+}
+
+impl ResyncWorker {
+ pub(crate) fn new(index: usize, manager: Arc<BlockManager>) -> Self {
+ Self {
+ index,
+ manager,
+ tranquilizer: Tranquilizer::new(30),
+ next_delay: Duration::from_secs(10),
+ }
+ }
+}
+
+#[async_trait]
+impl Worker for ResyncWorker {
+ fn name(&self) -> String {
+ format!("Block resync worker #{}", self.index + 1)
+ }
+
+ fn info(&self) -> Option<String> {
+ let persisted = self.manager.resync.persisted.load();
+
+ if self.index >= persisted.n_workers {
+ return Some("(unused)".into());
+ }
+
+ let mut ret = vec![];
+ ret.push(format!("tranquility = {}", persisted.tranquility));
+
+ let qlen = self.manager.resync.queue_len().unwrap_or(0);
+ if qlen > 0 {
+ ret.push(format!("{} blocks in queue", qlen));
+ }
+
+ let elen = self.manager.resync.errors_len().unwrap_or(0);
+ if elen > 0 {
+ ret.push(format!("{} blocks in error state", elen));
+ }
+
+ Some(ret.join(", "))
+ }
+
+ async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
+ if self.index >= self.manager.resync.persisted.load().n_workers {
+ return Ok(WorkerState::Idle);
+ }
+
+ self.tranquilizer.reset();
+ match self.manager.resync.resync_iter(&self.manager).await {
+ Ok(ResyncIterResult::BusyDidSomething) => Ok(self
+ .tranquilizer
+ .tranquilize_worker(self.manager.resync.persisted.load().tranquility)),
+ Ok(ResyncIterResult::BusyDidNothing) => Ok(WorkerState::Busy),
+ Ok(ResyncIterResult::IdleFor(delay)) => {
+ self.next_delay = delay;
+ Ok(WorkerState::Idle)
+ }
+ Err(e) => {
+ // The errors that we have here are only Sled errors
+ // We don't really know how to handle them so just ¯\_(ツ)_/¯
+ // (there is kind of an assumption that Sled won't error on us,
+ // if it does there is not much we can do -- TODO should we just panic?)
+ // Here we just give the error to the worker manager,
+ // it will print it to the logs and increment a counter
+ Err(e.into())
+ }
+ }
+ }
+
+ async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
+ while self.index >= self.manager.resync.persisted.load().n_workers {
+ self.manager.resync.notify.notified().await
+ }
+
+ select! {
+ _ = tokio::time::sleep(self.next_delay) => (),
+ _ = self.manager.resync.notify.notified() => (),
+ };
+
+ WorkerState::Busy
+ }
+}
+
+/// Counts the number of errors when resyncing a block,
+/// and the time of the last try.
+/// Used to implement exponential backoff.
+#[derive(Clone, Copy, Debug)]
+struct ErrorCounter {
+ errors: u64,
+ last_try: u64,
+}
+
+impl ErrorCounter {
+ fn new(now: u64) -> Self {
+ Self {
+ errors: 1,
+ last_try: now,
+ }
+ }
+
+ fn decode(data: &[u8]) -> Self {
+ Self {
+ errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
+ last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
+ }
+ }
+ fn encode(&self) -> Vec<u8> {
+ [
+ u64::to_be_bytes(self.errors),
+ u64::to_be_bytes(self.last_try),
+ ]
+ .concat()
+ }
+
+ fn add1(self, now: u64) -> Self {
+ Self {
+ errors: self.errors + 1,
+ last_try: now,
+ }
+ }
+
+ fn delay_msec(&self) -> u64 {
+ (RESYNC_RETRY_DELAY.as_millis() as u64)
+ << std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
+ }
+ fn next_try(&self) -> u64 {
+ self.last_try + self.delay_msec()
+ }
+}