aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/book/reference-manual/monitoring.md106
-rw-r--r--src/garage/server.rs1
-rw-r--r--src/rpc/system.rs91
-rw-r--r--src/rpc/system_metrics.rs318
4 files changed, 412 insertions, 104 deletions
diff --git a/doc/book/reference-manual/monitoring.md b/doc/book/reference-manual/monitoring.md
index 97c533d3..f392c133 100644
--- a/doc/book/reference-manual/monitoring.md
+++ b/doc/book/reference-manual/monitoring.md
@@ -27,6 +27,112 @@ Exposes the Garage replication factor configured on the node
garage_replication_factor 3
```
+#### `garage_local_disk_avail` and `garage_local_disk_total` (gauge)
+
+Reports the available and total disk space on each node, for data and metadata separately.
+
+```
+garage_local_disk_avail{volume="data"} 540341960704
+garage_local_disk_avail{volume="metadata"} 540341960704
+garage_local_disk_total{volume="data"} 763063566336
+garage_local_disk_total{volume="metadata"} 763063566336
+```
+
+### Cluster health status metrics
+
+#### `cluster_healthy` (gauge)
+
+Whether all storage nodes are connected (0 or 1)
+
+```
+cluster_healthy 0
+```
+
+#### `cluster_available` (gauge)
+
+Whether all requests can be served, even if some storage nodes are disconnected
+
+```
+cluster_available 1
+```
+
+#### `cluster_connected_nodes` (gauge)
+
+Number of nodes currently connected
+
+```
+cluster_connected_nodes 3
+```
+
+#### `cluster_known_nodes` (gauge)
+
+Number of nodes already seen once in the cluster
+
+```
+cluster_known_nodes 3
+```
+
+#### `cluster_layout_node_connected` (gauge)
+
+Connection status for individual nodes of the cluster layout
+
+```
+cluster_layout_node_connected{id="62b218d848e86a64",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
+cluster_layout_node_connected{id="a11c7cf18af29737",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
+cluster_layout_node_connected{id="a235ac7695e0c54d",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
+cluster_layout_node_connected{id="b10c110e4e854e5a",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 1
+```
+
+#### `cluster_layout_node_disconnected_time` (gauge)
+
+Time (in seconds) since last connection to individual nodes of the cluster layout
+
+```
+cluster_layout_node_disconnected_time{id="62b218d848e86a64",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
+cluster_layout_node_disconnected_time{id="a235ac7695e0c54d",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
+cluster_layout_node_disconnected_time{id="b10c110e4e854e5a",role_capacity="1000000000",role_gateway="0",role_zone="dc1"} 0
+```
+
+#### `cluster_storage_nodes` (gauge)
+
+Number of storage nodes declared in the current layout
+
+```
+cluster_storage_nodes 4
+```
+
+#### `cluster_storage_nodes_ok` (gauge)
+
+Number of storage nodes currently connected
+
+```
+cluster_storage_nodes_ok 3
+```
+
+#### `cluster_partitions` (gauge)
+
+Number of partitions in the layout (this is always 256)
+
+```
+cluster_partitions 256
+```
+
+#### `cluster_partitions_all_ok` (gauge)
+
+Number of partitions for which all storage nodes are connected
+
+```
+cluster_partitions_all_ok 64
+```
+
+#### `cluster_partitions_quorum` (gauge)
+
+Number of partitions for which we have a quorum of connected nodes and all requests can be served
+
+```
+cluster_partitions_quorum 256
+```
+
### Metrics of the API endpoints
#### `api_admin_request_counter` (counter)
diff --git a/src/garage/server.rs b/src/garage/server.rs
index 51b06b8e..6323f957 100644
--- a/src/garage/server.rs
+++ b/src/garage/server.rs
@@ -162,6 +162,7 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
info!("Netapp exited");
// Drop all references so that stuff can terminate properly
+ garage.system.cleanup();
drop(garage);
// Await for all background tasks to end
diff --git a/src/rpc/system.rs b/src/rpc/system.rs
index 998517de..9e475717 100644
--- a/src/rpc/system.rs
+++ b/src/rpc/system.rs
@@ -3,11 +3,10 @@ use std::collections::HashMap;
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
-use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
-use arc_swap::ArcSwap;
+use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use futures::join;
use serde::{Deserialize, Serialize};
@@ -88,7 +87,7 @@ pub struct System {
persist_cluster_layout: Persister<ClusterLayout>,
persist_peer_list: Persister<PeerList>,
- local_status: ArcSwap<NodeStatus>,
+ pub(crate) local_status: RwLock<NodeStatus>,
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
pub netapp: Arc<NetApp>,
@@ -106,10 +105,10 @@ pub struct System {
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
- metrics: SystemMetrics,
+ metrics: ArcSwapOption<SystemMetrics>,
replication_mode: ReplicationMode,
- replication_factor: usize,
+ pub(crate) replication_factor: usize,
/// The ring
pub ring: watch::Receiver<Arc<Ring>>,
@@ -170,7 +169,7 @@ pub struct ClusterHealth {
pub partitions_all_ok: usize,
}
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ClusterHealthStatus {
/// All nodes are available
Healthy,
@@ -280,10 +279,8 @@ impl System {
}
};
- let metrics = SystemMetrics::new(replication_factor);
-
let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
- local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
+ local_status.update_disk_usage(&config.metadata_dir, &config.data_dir);
let ring = Ring::new(cluster_layout, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring));
@@ -357,7 +354,7 @@ impl System {
id: netapp.id.into(),
persist_cluster_layout,
persist_peer_list,
- local_status: ArcSwap::new(Arc::new(local_status)),
+ local_status: RwLock::new(local_status),
node_status: RwLock::new(HashMap::new()),
netapp: netapp.clone(),
peering: peering.clone(),
@@ -377,14 +374,19 @@ impl System {
consul_discovery,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(),
- metrics,
+ metrics: ArcSwapOption::new(None),
ring,
update_ring: Mutex::new(update_ring),
metadata_dir: config.metadata_dir.clone(),
data_dir: config.data_dir.clone(),
});
+
sys.system_endpoint.set_handler(sys.clone());
+
+ let metrics = SystemMetrics::new(sys.clone());
+ sys.metrics.store(Some(Arc::new(metrics)));
+
Ok(sys)
}
@@ -402,6 +404,11 @@ impl System {
);
}
+ pub fn cleanup(&self) {
+ // Break reference cycle
+ self.metrics.store(None);
+ }
+
// ---- Administrative operations (directly available and
// also available through RPC) ----
@@ -546,12 +553,9 @@ impl System {
}
};
+ let hostname = self.local_status.read().unwrap().hostname.clone();
if let Err(e) = c
- .publish_consul_service(
- self.netapp.id,
- &self.local_status.load_full().hostname,
- rpc_public_addr,
- )
+ .publish_consul_service(self.netapp.id, &hostname, rpc_public_addr)
.await
{
error!("Error while publishing Consul service: {}", e);
@@ -573,13 +577,8 @@ impl System {
}
};
- if let Err(e) = publish_kubernetes_node(
- k,
- self.netapp.id,
- &self.local_status.load_full().hostname,
- rpc_public_addr,
- )
- .await
+ let hostname = self.local_status.read().unwrap().hostname.clone();
+ if let Err(e) = publish_kubernetes_node(k, self.netapp.id, &hostname, rpc_public_addr).await
{
error!("Error while publishing node to Kubernetes: {}", e);
}
@@ -596,15 +595,13 @@ impl System {
}
fn update_local_status(&self) {
- let mut new_si: NodeStatus = self.local_status.load().as_ref().clone();
+ let mut local_status = self.local_status.write().unwrap();
let ring = self.ring.borrow();
- new_si.cluster_layout_version = ring.layout.version;
- new_si.cluster_layout_staging_hash = ring.layout.staging_hash;
-
- new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
+ local_status.cluster_layout_version = ring.layout.version;
+ local_status.cluster_layout_staging_hash = ring.layout.staging_hash;
- self.local_status.swap(Arc::new(new_si));
+ local_status.update_disk_usage(&self.metadata_dir, &self.data_dir);
}
// --- RPC HANDLERS ---
@@ -629,7 +626,7 @@ impl System {
from: Uuid,
info: &NodeStatus,
) -> Result<SystemRpc, Error> {
- let local_info = self.local_status.load();
+ let local_info = self.local_status.read().unwrap();
if local_info.replication_factor < info.replication_factor {
error!("Some node have a higher replication factor ({}) than this one ({}). This is not supported and will lead to data corruption. Shutting down for safety.",
@@ -644,6 +641,8 @@ impl System {
tokio::spawn(self.clone().pull_cluster_layout(from));
}
+ drop(local_info);
+
self.node_status
.write()
.unwrap()
@@ -707,8 +706,10 @@ impl System {
while !*stop_signal.borrow() {
let restart_at = Instant::now() + STATUS_EXCHANGE_INTERVAL;
+ // Update local node status that is exchanged.
self.update_local_status();
- let local_status: NodeStatus = self.local_status.load().as_ref().clone();
+
+ let local_status: NodeStatus = self.local_status.read().unwrap().clone();
let _ = self
.rpc
.broadcast(
@@ -904,12 +905,7 @@ impl NodeStatus {
}
}
- fn update_disk_usage(
- &mut self,
- meta_dir: &Path,
- data_dir: &DataDirEnum,
- metrics: &SystemMetrics,
- ) {
+ fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &DataDirEnum) {
use nix::sys::statvfs::statvfs;
let mount_avail = |path: &Path| match statvfs(path) {
Ok(x) => {
@@ -945,27 +941,6 @@ impl NodeStatus {
)
})(),
};
-
- if let Some((avail, total)) = self.meta_disk_avail {
- metrics
- .values
- .meta_disk_avail
- .store(avail, Ordering::Relaxed);
- metrics
- .values
- .meta_disk_total
- .store(total, Ordering::Relaxed);
- }
- if let Some((avail, total)) = self.data_disk_avail {
- metrics
- .values
- .data_disk_avail
- .store(avail, Ordering::Relaxed);
- metrics
- .values
- .data_disk_total
- .store(total, Ordering::Relaxed);
- }
}
}
diff --git a/src/rpc/system_metrics.rs b/src/rpc/system_metrics.rs
index af81b71f..ffbef6df 100644
--- a/src/rpc/system_metrics.rs
+++ b/src/rpc/system_metrics.rs
@@ -1,32 +1,58 @@
-use std::sync::atomic::{AtomicU64, Ordering};
-use std::sync::Arc;
+use std::sync::{Arc, RwLock};
+use std::time::{Duration, Instant};
use opentelemetry::{global, metrics::*, KeyValue};
+use crate::ring::Ring;
+use crate::system::{ClusterHealthStatus, System};
+
/// TableMetrics reference all counter used for metrics
pub struct SystemMetrics {
+ // Static values
pub(crate) _garage_build_info: ValueObserver<u64>,
pub(crate) _replication_factor: ValueObserver<u64>,
+
+ // Disk space values from System::local_status
pub(crate) _disk_avail: ValueObserver<u64>,
pub(crate) _disk_total: ValueObserver<u64>,
- pub(crate) values: Arc<SystemMetricsValues>,
-}
-#[derive(Default)]
-pub struct SystemMetricsValues {
- pub(crate) data_disk_total: AtomicU64,
- pub(crate) data_disk_avail: AtomicU64,
- pub(crate) meta_disk_total: AtomicU64,
- pub(crate) meta_disk_avail: AtomicU64,
+ // Health report from System::health()
+ pub(crate) _cluster_healthy: ValueObserver<u64>,
+ pub(crate) _cluster_available: ValueObserver<u64>,
+ pub(crate) _known_nodes: ValueObserver<u64>,
+ pub(crate) _connected_nodes: ValueObserver<u64>,
+ pub(crate) _storage_nodes: ValueObserver<u64>,
+ pub(crate) _storage_nodes_ok: ValueObserver<u64>,
+ pub(crate) _partitions: ValueObserver<u64>,
+ pub(crate) _partitions_quorum: ValueObserver<u64>,
+ pub(crate) _partitions_all_ok: ValueObserver<u64>,
+
+ // Status report for individual cluster nodes
+ pub(crate) _layout_node_connected: ValueObserver<u64>,
+ pub(crate) _layout_node_disconnected_time: ValueObserver<u64>,
}
impl SystemMetrics {
- pub fn new(replication_factor: usize) -> Self {
+ pub fn new(system: Arc<System>) -> Self {
let meter = global::meter("garage_system");
- let values = Arc::new(SystemMetricsValues::default());
- let values1 = values.clone();
- let values2 = values.clone();
+
+ let health_cache = RwLock::new((Instant::now(), system.health()));
+ let system2 = system.clone();
+ let get_health = Arc::new(move || {
+ {
+ let cache = health_cache.read().unwrap();
+ if cache.0 > Instant::now() - Duration::from_secs(1) {
+ return cache.1;
+ }
+ }
+
+ let health = system2.health();
+ *health_cache.write().unwrap() = (Instant::now(), health);
+ health
+ });
+
Self {
+ // Static values
_garage_build_info: meter
.u64_value_observer("garage_build_info", move |observer| {
observer.observe(
@@ -39,39 +65,239 @@ impl SystemMetrics {
})
.with_description("Garage build info")
.init(),
- _replication_factor: meter
- .u64_value_observer("garage_replication_factor", move |observer| {
- observer.observe(replication_factor as u64, &[])
- })
- .with_description("Garage replication factor setting")
- .init(),
- _disk_avail: meter
- .u64_value_observer("garage_local_disk_avail", move |observer| {
- match values1.data_disk_avail.load(Ordering::Relaxed) {
- 0 => (),
- x => observer.observe(x, &[KeyValue::new("volume", "data")]),
- };
- match values1.meta_disk_avail.load(Ordering::Relaxed) {
- 0 => (),
- x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
- };
- })
- .with_description("Garage available disk space on each node")
- .init(),
- _disk_total: meter
- .u64_value_observer("garage_local_disk_total", move |observer| {
- match values2.data_disk_total.load(Ordering::Relaxed) {
- 0 => (),
- x => observer.observe(x, &[KeyValue::new("volume", "data")]),
- };
- match values2.meta_disk_total.load(Ordering::Relaxed) {
- 0 => (),
- x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
- };
+ _replication_factor: {
+ let replication_factor = system.replication_factor;
+ meter
+ .u64_value_observer("garage_replication_factor", move |observer| {
+ observer.observe(replication_factor as u64, &[])
+ })
+ .with_description("Garage replication factor setting")
+ .init()
+ },
+
+ // Disk space values from System::local_status
+ _disk_avail: {
+ let system = system.clone();
+ meter
+ .u64_value_observer("garage_local_disk_avail", move |observer| {
+ let st = system.local_status.read().unwrap();
+ if let Some((avail, _total)) = st.data_disk_avail {
+ observer.observe(avail, &[KeyValue::new("volume", "data")]);
+ }
+ if let Some((avail, _total)) = st.meta_disk_avail {
+ observer.observe(avail, &[KeyValue::new("volume", "metadata")]);
+ }
+ })
+ .with_description("Garage available disk space on each node")
+ .init()
+ },
+ _disk_total: {
+ let system = system.clone();
+ meter
+ .u64_value_observer("garage_local_disk_total", move |observer| {
+ let st = system.local_status.read().unwrap();
+ if let Some((_avail, total)) = st.data_disk_avail {
+ observer.observe(total, &[KeyValue::new("volume", "data")]);
+ }
+ if let Some((_avail, total)) = st.meta_disk_avail {
+ observer.observe(total, &[KeyValue::new("volume", "metadata")]);
+ }
+ })
+ .with_description("Garage total disk space on each node")
+ .init()
+ },
+
+ // Health report from System::()
+ _cluster_healthy: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_healthy", move |observer| {
+ let h = get_health();
+ if h.status == ClusterHealthStatus::Healthy {
+ observer.observe(1, &[]);
+ } else {
+ observer.observe(0, &[]);
+ }
+ })
+ .with_description("Whether all storage nodes are connected")
+ .init()
+ },
+ _cluster_available: {
+ let get_health = get_health.clone();
+ meter.u64_value_observer("cluster_available", move |observer| {
+ let h = get_health();
+ if h.status != ClusterHealthStatus::Unavailable {
+ observer.observe(1, &[]);
+ } else {
+ observer.observe(0, &[]);
+ }
})
- .with_description("Garage total disk space on each node")
- .init(),
- values,
+ .with_description("Whether all requests can be served, even if some storage nodes are disconnected")
+ .init()
+ },
+ _known_nodes: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_known_nodes", move |observer| {
+ let h = get_health();
+ observer.observe(h.known_nodes as u64, &[]);
+ })
+ .with_description("Number of nodes already seen once in the cluster")
+ .init()
+ },
+ _connected_nodes: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_connected_nodes", move |observer| {
+ let h = get_health();
+ observer.observe(h.connected_nodes as u64, &[]);
+ })
+ .with_description("Number of nodes currently connected")
+ .init()
+ },
+ _storage_nodes: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_storage_nodes", move |observer| {
+ let h = get_health();
+ observer.observe(h.storage_nodes as u64, &[]);
+ })
+ .with_description("Number of storage nodes declared in the current layout")
+ .init()
+ },
+ _storage_nodes_ok: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_storage_nodes_ok", move |observer| {
+ let h = get_health();
+ observer.observe(h.storage_nodes_ok as u64, &[]);
+ })
+ .with_description("Number of storage nodes currently connected")
+ .init()
+ },
+ _partitions: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_partitions", move |observer| {
+ let h = get_health();
+ observer.observe(h.partitions as u64, &[]);
+ })
+ .with_description("Number of partitions in the layout")
+ .init()
+ },
+ _partitions_quorum: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_partitions_quorum", move |observer| {
+ let h = get_health();
+ observer.observe(h.partitions_quorum as u64, &[]);
+ })
+ .with_description(
+ "Number of partitions for which we have a quorum of connected nodes",
+ )
+ .init()
+ },
+ _partitions_all_ok: {
+ let get_health = get_health.clone();
+ meter
+ .u64_value_observer("cluster_partitions_all_ok", move |observer| {
+ let h = get_health();
+ observer.observe(h.partitions_all_ok as u64, &[]);
+ })
+ .with_description(
+ "Number of partitions for which all storage nodes are connected",
+ )
+ .init()
+ },
+
+ // Status report for individual cluster nodes
+ _layout_node_connected: {
+ let system = system.clone();
+ meter
+ .u64_value_observer("cluster_layout_node_connected", move |observer| {
+ let ring: Arc<Ring> = system.ring.borrow().clone();
+ let nodes = system.get_known_nodes();
+ for (id, _, config) in ring.layout.roles.items().iter() {
+ if let Some(role) = &config.0 {
+ let mut kv = vec![
+ KeyValue::new("id", format!("{:?}", id)),
+ KeyValue::new("role_zone", role.zone.clone()),
+ ];
+ match role.capacity {
+ Some(cap) => {
+ kv.push(KeyValue::new("role_capacity", cap as i64));
+ kv.push(KeyValue::new("role_gateway", 0));
+ }
+ None => {
+ kv.push(KeyValue::new("role_gateway", 1));
+ }
+ }
+
+ let value;
+ if let Some(node) = nodes.iter().find(|n| n.id == *id) {
+ value = if node.is_up { 1 } else { 0 };
+ // TODO: if we add address and hostname, and those change, we
+ // get duplicate metrics, due to bad otel aggregation :(
+ // Can probably be fixed when we upgrade opentelemetry
+ // kv.push(KeyValue::new("address", node.addr.to_string()));
+ // kv.push(KeyValue::new(
+ // "hostname",
+ // node.status.hostname.clone(),
+ // ));
+ } else {
+ value = 0;
+ }
+
+ observer.observe(value, &kv);
+ }
+ }
+ })
+ .with_description("Connection status for nodes in the cluster layout")
+ .init()
+ },
+ _layout_node_disconnected_time: {
+ let system = system.clone();
+ meter
+ .u64_value_observer("cluster_layout_node_disconnected_time", move |observer| {
+ let ring: Arc<Ring> = system.ring.borrow().clone();
+ let nodes = system.get_known_nodes();
+ for (id, _, config) in ring.layout.roles.items().iter() {
+ if let Some(role) = &config.0 {
+ let mut kv = vec![
+ KeyValue::new("id", format!("{:?}", id)),
+ KeyValue::new("role_zone", role.zone.clone()),
+ ];
+ match role.capacity {
+ Some(cap) => {
+ kv.push(KeyValue::new("role_capacity", cap as i64));
+ kv.push(KeyValue::new("role_gateway", 0));
+ }
+ None => {
+ kv.push(KeyValue::new("role_gateway", 1));
+ }
+ }
+
+ if let Some(node) = nodes.iter().find(|n| n.id == *id) {
+ // TODO: see comment above
+ // kv.push(KeyValue::new("address", node.addr.to_string()));
+ // kv.push(KeyValue::new(
+ // "hostname",
+ // node.status.hostname.clone(),
+ // ));
+ if node.is_up {
+ observer.observe(0, &kv);
+ } else if let Some(secs) = node.last_seen_secs_ago {
+ observer.observe(secs, &kv);
+ }
+ }
+ }
+ }
+ })
+ .with_description(
+ "Time (in seconds) since last connection to nodes in the cluster layout",
+ )
+ .init()
+ },
}
}
}