aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/api/Cargo.toml12
-rw-r--r--src/api/admin/api_server.rs48
-rw-r--r--src/api/admin/router.rs3
-rw-r--r--src/api/k2v/batch.rs11
-rw-r--r--src/api/k2v/item.rs3
-rw-r--r--src/api/s3/list.rs11
-rw-r--r--src/api/s3/post_object.rs5
-rw-r--r--src/api/s3/put.rs3
-rw-r--r--src/block/Cargo.toml4
-rw-r--r--src/db/Cargo.toml8
-rw-r--r--src/garage/Cargo.toml8
-rw-r--r--src/garage/admin.rs125
-rw-r--r--src/garage/cli/cmd.rs15
-rw-r--r--src/garage/cli/structs.rs5
-rw-r--r--src/garage/tests/common/garage.rs2
-rw-r--r--src/garage/tests/k2v/batch.rs103
-rw-r--r--src/garage/tests/k2v/item.rs37
-rw-r--r--src/garage/tests/s3/website.rs136
-rw-r--r--src/k2v-client/Cargo.toml14
-rw-r--r--src/model/Cargo.toml8
-rw-r--r--src/model/k2v/causality.rs5
-rw-r--r--src/model/k2v/item_table.rs4
-rw-r--r--src/model/k2v/seen.rs5
-rw-r--r--src/rpc/Cargo.toml3
-rw-r--r--src/rpc/lib.rs6
-rw-r--r--src/rpc/system.rs102
-rw-r--r--src/rpc/system_metrics.rs44
-rw-r--r--src/table/Cargo.toml2
-rw-r--r--src/util/Cargo.toml8
-rw-r--r--src/util/data.rs4
-rw-r--r--src/util/time.rs2
-rw-r--r--src/web/Cargo.toml2
32 files changed, 590 insertions, 158 deletions
diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml
index dba0bbef..24c48604 100644
--- a/src/api/Cargo.toml
+++ b/src/api/Cargo.toml
@@ -21,28 +21,28 @@ garage_util = { version = "0.8.1", path = "../util" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
async-trait = "0.1.7"
-base64 = "0.13"
+base64 = "0.21"
bytes = "1.0"
chrono = "0.4"
crypto-common = "0.1"
err-derive = "0.3"
hex = "0.4"
hmac = "0.12"
-idna = "0.2"
-tracing = "0.1.30"
+idna = "0.3"
+tracing = "0.1"
md-5 = "0.10"
nom = "7.1"
sha2 = "0.10"
futures = "0.3"
futures-util = "0.3"
-pin-project = "1.0.11"
+pin-project = "1.0.12"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
tokio-stream = "0.1"
form_urlencoded = "1.0.0"
http = "0.2"
-httpdate = "0.3"
+httpdate = "1.0"
http-range = "0.1"
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }
multer = "2.0"
@@ -52,7 +52,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_bytes = "0.11"
serde_json = "1.0"
quick-xml = { version = "0.21", features = [ "serialize" ] }
-url = "2.1"
+url = "2.3"
opentelemetry = "0.17"
opentelemetry-prometheus = { version = "0.10", optional = true }
diff --git a/src/api/admin/api_server.rs b/src/api/admin/api_server.rs
index 2d325fb1..7a534f32 100644
--- a/src/api/admin/api_server.rs
+++ b/src/api/admin/api_server.rs
@@ -77,6 +77,53 @@ impl AdminApiServer {
.body(Body::empty())?)
}
+ async fn handle_check_website_enabled(
+ &self,
+ req: Request<Body>,
+ ) -> Result<Response<Body>, Error> {
+ let has_domain_header = req.headers().contains_key("domain");
+
+ if !has_domain_header {
+ return Err(Error::bad_request("No domain header found"));
+ }
+
+ let domain = &req
+ .headers()
+ .get("domain")
+ .ok_or_internal_error("Could not parse domain header")?;
+
+ let domain_string = String::from(
+ domain
+ .to_str()
+ .ok_or_bad_request("Invalid characters found in domain header")?,
+ );
+
+ let bucket_id = self
+ .garage
+ .bucket_helper()
+ .resolve_global_bucket_name(&domain_string)
+ .await?
+ .ok_or_else(|| HelperError::NoSuchBucket(domain_string))?;
+
+ let bucket = self
+ .garage
+ .bucket_helper()
+ .get_existing_bucket(bucket_id)
+ .await?;
+
+ let bucket_state = bucket.state.as_option().unwrap();
+ let bucket_website_config = bucket_state.website_config.get();
+
+ match bucket_website_config {
+ Some(_v) => Ok(Response::builder()
+ .status(StatusCode::OK)
+ .body(Body::from("Bucket authorized for website hosting"))?),
+ None => Err(Error::bad_request(
+ "Bucket is not authorized for website hosting",
+ )),
+ }
+ }
+
fn handle_health(&self) -> Result<Response<Body>, Error> {
let health = self.garage.system.health();
@@ -174,6 +221,7 @@ impl ApiHandler for AdminApiServer {
match endpoint {
Endpoint::Options => self.handle_options(&req),
+ Endpoint::CheckWebsiteEnabled => self.handle_check_website_enabled(req).await,
Endpoint::Health => self.handle_health(),
Endpoint::Metrics => self.handle_metrics(),
Endpoint::GetClusterStatus => handle_get_cluster_status(&self.garage).await,
diff --git a/src/api/admin/router.rs b/src/api/admin/router.rs
index 62e6abc3..0dcb1546 100644
--- a/src/api/admin/router.rs
+++ b/src/api/admin/router.rs
@@ -17,6 +17,7 @@ router_match! {@func
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Endpoint {
Options,
+ CheckWebsiteEnabled,
Health,
Metrics,
GetClusterStatus,
@@ -91,6 +92,7 @@ impl Endpoint {
let res = router_match!(@gen_path_parser (req.method(), path, query) [
OPTIONS _ => Options,
+ GET "/check" => CheckWebsiteEnabled,
GET "/health" => Health,
GET "/metrics" => Metrics,
GET "/v0/status" => GetClusterStatus,
@@ -136,6 +138,7 @@ impl Endpoint {
pub fn authorization_type(&self) -> Authorization {
match self {
Self::Health => Authorization::None,
+ Self::CheckWebsiteEnabled => Authorization::None,
Self::Metrics => Authorization::MetricsToken,
_ => Authorization::AdminToken,
}
diff --git a/src/api/k2v/batch.rs b/src/api/k2v/batch.rs
index abc9403c..26d678da 100644
--- a/src/api/k2v/batch.rs
+++ b/src/api/k2v/batch.rs
@@ -1,5 +1,6 @@
use std::sync::Arc;
+use base64::prelude::*;
use hyper::{Body, Request, Response, StatusCode};
use serde::{Deserialize, Serialize};
@@ -26,9 +27,11 @@ pub async fn handle_insert_batch(
for it in items {
let ct = it.ct.map(|s| CausalContext::parse_helper(&s)).transpose()?;
let v = match it.v {
- Some(vs) => {
- DvvsValue::Value(base64::decode(vs).ok_or_bad_request("Invalid base64 value")?)
- }
+ Some(vs) => DvvsValue::Value(
+ BASE64_STANDARD
+ .decode(vs)
+ .ok_or_bad_request("Invalid base64 value")?,
+ ),
None => DvvsValue::Deleted,
};
items2.push((it.pk, it.sk, ct, v));
@@ -358,7 +361,7 @@ impl ReadBatchResponseItem {
.values()
.iter()
.map(|v| match v {
- DvvsValue::Value(x) => Some(base64::encode(x)),
+ DvvsValue::Value(x) => Some(BASE64_STANDARD.encode(x)),
DvvsValue::Deleted => None,
})
.collect::<Vec<_>>();
diff --git a/src/api/k2v/item.rs b/src/api/k2v/item.rs
index 787a7df3..e13a0f30 100644
--- a/src/api/k2v/item.rs
+++ b/src/api/k2v/item.rs
@@ -1,5 +1,6 @@
use std::sync::Arc;
+use base64::prelude::*;
use http::header;
use hyper::{Body, Request, Response, StatusCode};
@@ -81,7 +82,7 @@ impl ReturnFormat {
.iter()
.map(|v| match v {
DvvsValue::Deleted => serde_json::Value::Null,
- DvvsValue::Value(v) => serde_json::Value::String(base64::encode(v)),
+ DvvsValue::Value(v) => serde_json::Value::String(BASE64_STANDARD.encode(v)),
})
.collect::<Vec<_>>();
let json_body =
diff --git a/src/api/s3/list.rs b/src/api/s3/list.rs
index e5f486c8..5cb0d65a 100644
--- a/src/api/s3/list.rs
+++ b/src/api/s3/list.rs
@@ -3,6 +3,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::iter::{Iterator, Peekable};
use std::sync::Arc;
+use base64::prelude::*;
use hyper::{Body, Response};
use garage_util::data::*;
@@ -129,11 +130,11 @@ pub async fn handle_list(
next_continuation_token: match (query.is_v2, &pagination) {
(true, Some(RangeBegin::AfterKey { key })) => Some(s3_xml::Value(format!(
"]{}",
- base64::encode(key.as_bytes())
+ BASE64_STANDARD.encode(key.as_bytes())
))),
(true, Some(RangeBegin::IncludingKey { key, .. })) => Some(s3_xml::Value(format!(
"[{}",
- base64::encode(key.as_bytes())
+ BASE64_STANDARD.encode(key.as_bytes())
))),
_ => None,
},
@@ -583,14 +584,16 @@ impl ListObjectsQuery {
(Some(token), _) => match &token[..1] {
"[" => Ok(RangeBegin::IncludingKey {
key: String::from_utf8(
- base64::decode(token[1..].as_bytes())
+ BASE64_STANDARD
+ .decode(token[1..].as_bytes())
.ok_or_bad_request("Invalid continuation token")?,
)?,
fallback_key: None,
}),
"]" => Ok(RangeBegin::AfterKey {
key: String::from_utf8(
- base64::decode(token[1..].as_bytes())
+ BASE64_STANDARD
+ .decode(token[1..].as_bytes())
.ok_or_bad_request("Invalid continuation token")?,
)?,
}),
diff --git a/src/api/s3/post_object.rs b/src/api/s3/post_object.rs
index d063faa4..da542526 100644
--- a/src/api/s3/post_object.rs
+++ b/src/api/s3/post_object.rs
@@ -4,6 +4,7 @@ use std::ops::RangeInclusive;
use std::sync::Arc;
use std::task::{Context, Poll};
+use base64::prelude::*;
use bytes::Bytes;
use chrono::{DateTime, Duration, Utc};
use futures::{Stream, StreamExt};
@@ -138,7 +139,9 @@ pub async fn handle_post_object(
.get_existing_bucket(bucket_id)
.await?;
- let decoded_policy = base64::decode(&policy).ok_or_bad_request("Invalid policy")?;
+ let decoded_policy = BASE64_STANDARD
+ .decode(&policy)
+ .ok_or_bad_request("Invalid policy")?;
let decoded_policy: Policy =
serde_json::from_slice(&decoded_policy).ok_or_bad_request("Invalid policy")?;
diff --git a/src/api/s3/put.rs b/src/api/s3/put.rs
index c08fe40a..350ab884 100644
--- a/src/api/s3/put.rs
+++ b/src/api/s3/put.rs
@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::sync::Arc;
+use base64::prelude::*;
use futures::prelude::*;
use hyper::body::{Body, Bytes};
use hyper::header::{HeaderMap, HeaderValue};
@@ -207,7 +208,7 @@ fn ensure_checksum_matches(
}
}
if let Some(expected_md5) = content_md5 {
- if expected_md5.trim_matches('"') != base64::encode(data_md5sum) {
+ if expected_md5.trim_matches('"') != BASE64_STANDARD.encode(data_md5sum) {
return Err(Error::bad_request("Unable to validate content-md5"));
} else {
trace!("Successfully validated content-md5");
diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml
index 1e4eb64e..a7e8bc2c 100644
--- a/src/block/Cargo.toml
+++ b/src/block/Cargo.toml
@@ -25,11 +25,11 @@ arc-swap = "1.5"
async-trait = "0.1.7"
bytes = "1.0"
hex = "0.4"
-tracing = "0.1.30"
+tracing = "0.1"
rand = "0.8"
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
-zstd = { version = "0.9", default-features = false }
+zstd = { version = "0.12", default-features = false }
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
diff --git a/src/db/Cargo.toml b/src/db/Cargo.toml
index c479d9d1..95bde6d5 100644
--- a/src/db/Cargo.toml
+++ b/src/db/Cargo.toml
@@ -19,18 +19,18 @@ required-features = ["cli"]
[dependencies]
err-derive = "0.3"
hexdump = "0.1"
-tracing = "0.1.30"
+tracing = "0.1"
heed = { version = "0.11", default-features = false, features = ["lmdb"], optional = true }
-rusqlite = { version = "0.27", optional = true }
+rusqlite = { version = "0.28", optional = true }
sled = { version = "0.34", optional = true }
# cli deps
-clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
+clap = { version = "4.1", optional = true, features = ["derive", "env"] }
pretty_env_logger = { version = "0.4", optional = true }
[dev-dependencies]
-mktemp = "0.4"
+mktemp = "0.5"
[features]
default = [ "sled" ]
diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml
index b43b0242..f938f356 100644
--- a/src/garage/Cargo.toml
+++ b/src/garage/Cargo.toml
@@ -33,10 +33,10 @@ garage_web = { version = "0.8.1", path = "../web" }
backtrace = "0.3"
bytes = "1.0"
bytesize = "1.1"
-timeago = "0.3"
+timeago = "0.4"
parse_duration = "2.1"
hex = "0.4"
-tracing = { version = "0.1.30" }
+tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
rand = "0.8"
async-trait = "0.1.7"
@@ -45,7 +45,7 @@ sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
structopt = { version = "0.3", default-features = false }
-toml = "0.5"
+toml = "0.6"
futures = "0.3"
futures-util = "0.3"
@@ -69,7 +69,7 @@ sha2 = "0.10"
static_init = "1.0"
assert-json-diff = "2.0"
serde_json = "1.0"
-base64 = "0.13"
+base64 = "0.21"
[features]
diff --git a/src/garage/admin.rs b/src/garage/admin.rs
index 305c5c65..4eabebca 100644
--- a/src/garage/admin.rs
+++ b/src/garage/admin.rs
@@ -15,6 +15,7 @@ use garage_util::time::*;
use garage_table::replication::*;
use garage_table::*;
+use garage_rpc::ring::PARTITION_BITS;
use garage_rpc::*;
use garage_block::manager::BlockResyncErrorInfo;
@@ -783,6 +784,7 @@ impl AdminRpcHandler {
for node in ring.layout.node_ids().iter() {
let mut opt = opt.clone();
opt.all_nodes = false;
+ opt.skip_global = true;
writeln!(&mut ret, "\n======================").unwrap();
writeln!(&mut ret, "Stats for node {:?}:", node).unwrap();
@@ -799,6 +801,15 @@ impl AdminRpcHandler {
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
}
}
+
+ writeln!(&mut ret, "\n======================").unwrap();
+ write!(
+ &mut ret,
+ "Cluster statistics:\n\n{}",
+ self.gather_cluster_stats()
+ )
+ .unwrap();
+
Ok(AdminRpc::Ok(ret))
} else {
Ok(AdminRpc::Ok(self.gather_stats_local(opt)?))
@@ -819,22 +830,6 @@ impl AdminRpcHandler {
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
- // Gather ring statistics
- let ring = self.garage.system.ring.borrow().clone();
- let mut ring_nodes = HashMap::new();
- for (_i, loc) in ring.partitions().iter() {
- for n in ring.get_nodes(loc, ring.replication_factor).iter() {
- if !ring_nodes.contains_key(n) {
- ring_nodes.insert(*n, 0usize);
- }
- *ring_nodes.get_mut(n).unwrap() += 1;
- }
- }
- writeln!(&mut ret, "\nRing nodes & partition count:").unwrap();
- for (n, c) in ring_nodes.iter() {
- writeln!(&mut ret, " {:?} {}", n, c).unwrap();
- }
-
// Gather table statistics
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
@@ -881,12 +876,108 @@ impl AdminRpcHandler {
.unwrap();
if !opt.detailed {
- writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap();
+ writeln!(&mut ret, "\nIf values are missing above (marked as NC), consider adding the --detailed flag (this will be slow).").unwrap();
+ }
+
+ if !opt.skip_global {
+ write!(&mut ret, "\n{}", self.gather_cluster_stats()).unwrap();
}
Ok(ret)
}
+ fn gather_cluster_stats(&self) -> String {
+ let mut ret = String::new();
+
+ // Gather storage node and free space statistics
+ let layout = &self.garage.system.ring.borrow().layout;
+ let mut node_partition_count = HashMap::<Uuid, u64>::new();
+ for short_id in layout.ring_assignation_data.iter() {
+ let id = layout.node_id_vec[*short_id as usize];
+ *node_partition_count.entry(id).or_default() += 1;
+ }
+ let node_info = self
+ .garage
+ .system
+ .get_known_nodes()
+ .into_iter()
+ .map(|n| (n.id, n))
+ .collect::<HashMap<_, _>>();
+
+ let mut table = vec![" ID\tHostname\tZone\tCapacity\tPart.\tDataAvail\tMetaAvail".into()];
+ for (id, parts) in node_partition_count.iter() {
+ let info = node_info.get(id);
+ let status = info.map(|x| &x.status);
+ let role = layout.roles.get(id).and_then(|x| x.0.as_ref());
+ let hostname = status.map(|x| x.hostname.as_str()).unwrap_or("?");
+ let zone = role.map(|x| x.zone.as_str()).unwrap_or("?");
+ let capacity = role.map(|x| x.capacity_string()).unwrap_or("?".into());
+ let avail_str = |x| match x {
+ Some((avail, total)) => {
+ let pct = (avail as f64) / (total as f64) * 100.;
+ let avail = bytesize::ByteSize::b(avail);
+ let total = bytesize::ByteSize::b(total);
+ format!("{}/{} ({:.1}%)", avail, total, pct)
+ }
+ None => "?".into(),
+ };
+ let data_avail = avail_str(status.and_then(|x| x.data_disk_avail));
+ let meta_avail = avail_str(status.and_then(|x| x.meta_disk_avail));
+ table.push(format!(
+ " {:?}\t{}\t{}\t{}\t{}\t{}\t{}",
+ id, hostname, zone, capacity, parts, data_avail, meta_avail
+ ));
+ }
+ write!(
+ &mut ret,
+ "Storage nodes:\n{}",
+ format_table_to_string(table)
+ )
+ .unwrap();
+
+ let meta_part_avail = node_partition_count
+ .iter()
+ .filter_map(|(id, parts)| {
+ node_info
+ .get(id)
+ .and_then(|x| x.status.meta_disk_avail)
+ .map(|c| c.0 / *parts)
+ })
+ .collect::<Vec<_>>();
+ let data_part_avail = node_partition_count
+ .iter()
+ .filter_map(|(id, parts)| {
+ node_info
+ .get(id)
+ .and_then(|x| x.status.data_disk_avail)
+ .map(|c| c.0 / *parts)
+ })
+ .collect::<Vec<_>>();
+ if !meta_part_avail.is_empty() && !data_part_avail.is_empty() {
+ let meta_avail =
+ bytesize::ByteSize(meta_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
+ let data_avail =
+ bytesize::ByteSize(data_part_avail.iter().min().unwrap() * (1 << PARTITION_BITS));
+ writeln!(
+ &mut ret,
+ "\nEstimated available storage space cluster-wide (might be lower in practice):"
+ )
+ .unwrap();
+ if meta_part_avail.len() < node_partition_count.len()
+ || data_part_avail.len() < node_partition_count.len()
+ {
+ writeln!(&mut ret, " data: < {}", data_avail).unwrap();
+ writeln!(&mut ret, " metadata: < {}", meta_avail).unwrap();
+ writeln!(&mut ret, "A precise estimate could not be given as information is missing for some storage nodes.").unwrap();
+ } else {
+ writeln!(&mut ret, " data: {}", data_avail).unwrap();
+ writeln!(&mut ret, " metadata: {}", meta_avail).unwrap();
+ }
+ }
+
+ ret
+ }
+
fn gather_table_stats<F, R>(
&self,
t: &Arc<Table<F, R>>,
diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs
index 46e9113c..af7f1aa1 100644
--- a/src/garage/cli/cmd.rs
+++ b/src/garage/cli/cmd.rs
@@ -59,18 +59,29 @@ pub async fn cmd_status(rpc_cli: &Endpoint<SystemRpc, ()>, rpc_host: NodeID) ->
let layout = fetch_layout(rpc_cli, rpc_host).await?;
println!("==== HEALTHY NODES ====");
- let mut healthy_nodes = vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity".to_string()];
+ let mut healthy_nodes =
+ vec!["ID\tHostname\tAddress\tTags\tZone\tCapacity\tDataAvail\tMetaAvail".to_string()];
for adv in status.iter().filter(|adv| adv.is_up) {
match layout.roles.get(&adv.id) {
Some(NodeRoleV(Some(cfg))) => {
+ let data_avail = match &adv.status.data_disk_avail {
+ _ if cfg.capacity.is_none() => "N/A".into(),
+ Some((avail, total)) => {
+ let pct = (*avail as f64) / (*total as f64) * 100.;
+ let avail = bytesize::ByteSize::b(*avail);
+ format!("{} ({:.1}%)", avail, pct)
+ }
+ None => "?".into(),
+ };
healthy_nodes.push(format!(
- "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}",
+ "{id:?}\t{host}\t{addr}\t[{tags}]\t{zone}\t{capacity}\t{data_avail}",
id = adv.id,
host = adv.status.hostname,
addr = adv.addr,
tags = cfg.tags.join(","),
zone = cfg.zone,
capacity = cfg.capacity_string(),
+ data_avail = data_avail,
));
}
_ => {
diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs
index 661a71f0..01ae92da 100644
--- a/src/garage/cli/structs.rs
+++ b/src/garage/cli/structs.rs
@@ -504,6 +504,11 @@ pub struct StatsOpt {
/// Gather detailed statistics (this can be long)
#[structopt(short = "d", long = "detailed")]
pub detailed: bool,
+
+ /// Don't show global cluster stats (internal use in RPC)
+ #[structopt(skip)]
+ #[serde(default)]
+ pub skip_global: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
diff --git a/src/garage/tests/common/garage.rs b/src/garage/tests/common/garage.rs
index 44d727f9..8f994f49 100644
--- a/src/garage/tests/common/garage.rs
+++ b/src/garage/tests/common/garage.rs
@@ -25,6 +25,7 @@ pub struct Instance {
pub s3_port: u16,
pub k2v_port: u16,
pub web_port: u16,
+ pub admin_port: u16,
}
impl Instance {
@@ -105,6 +106,7 @@ api_bind_addr = "127.0.0.1:{admin_port}"
s3_port: port,
k2v_port: port + 1,
web_port: port + 3,
+ admin_port: port + 4,
}
}
diff --git a/src/garage/tests/k2v/batch.rs b/src/garage/tests/k2v/batch.rs
index 6abba1c5..595d0ba8 100644
--- a/src/garage/tests/k2v/batch.rs
+++ b/src/garage/tests/k2v/batch.rs
@@ -3,6 +3,7 @@ use std::collections::HashMap;
use crate::common;
use assert_json_diff::assert_json_eq;
+use base64::prelude::*;
use serde_json::json;
use super::json_body;
@@ -36,12 +37,12 @@ async fn test_batch() {
{{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}},
{{"pk": "root", "sk": "e", "ct": null, "v": "{}"}}
]"#,
- base64::encode(values.get(&"a").unwrap()),
- base64::encode(values.get(&"b").unwrap()),
- base64::encode(values.get(&"c").unwrap()),
- base64::encode(values.get(&"d.1").unwrap()),
- base64::encode(values.get(&"d.2").unwrap()),
- base64::encode(values.get(&"e").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"a").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"b").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"c").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"d.1").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"d.2").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"e").unwrap()),
)
.into_bytes(),
)
@@ -120,12 +121,12 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]},
- {"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
+ {"sk": "a", "ct": ct.get("a").unwrap(), "v": [BASE64_STANDARD.encode(values.get("a").unwrap())]},
+ {"sk": "b", "ct": ct.get("b").unwrap(), "v": [BASE64_STANDARD.encode(values.get("b").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]}
],
"more": false,
"nextStart": null,
@@ -141,10 +142,10 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]}
],
"more": false,
"nextStart": null,
@@ -160,9 +161,9 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -178,8 +179,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
- {"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap())]},
+ {"sk": "b", "ct": ct.get("b").unwrap(), "v": [BASE64_STANDARD.encode(values.get("b").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -195,8 +196,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap())]},
- {"sk": "b", "ct": ct.get("b").unwrap(), "v": [base64::encode(values.get("b").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap())]},
+ {"sk": "b", "ct": ct.get("b").unwrap(), "v": [BASE64_STANDARD.encode(values.get("b").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -212,7 +213,7 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]}
+ {"sk": "a", "ct": ct.get("a").unwrap(), "v": [BASE64_STANDARD.encode(values.get("a").unwrap())]}
],
"more": true,
"nextStart": "b",
@@ -228,8 +229,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap())]}
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap())]}
],
"more": false,
"nextStart": null,
@@ -255,10 +256,10 @@ async fn test_batch() {
{{"pk": "root", "sk": "d.2", "ct": null, "v": "{}"}}
]"#,
ct.get(&"b").unwrap(),
- base64::encode(values.get(&"c'").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"c'").unwrap()),
ct.get(&"d.1").unwrap(),
- base64::encode(values.get(&"d.1'").unwrap()),
- base64::encode(values.get(&"d.2'").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"d.1'").unwrap()),
+ BASE64_STANDARD.encode(values.get(&"d.2'").unwrap()),
)
.into_bytes(),
)
@@ -333,11 +334,11 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "a", "ct": ct.get("a").unwrap(), "v": [base64::encode(values.get("a").unwrap())]},
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
+ {"sk": "a", "ct": ct.get("a").unwrap(), "v": [BASE64_STANDARD.encode(values.get("a").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap()), BASE64_STANDARD.encode(values.get("c'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]}
],
"more": false,
"nextStart": null,
@@ -353,8 +354,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -370,7 +371,7 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -386,7 +387,7 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
],
"more": true,
"nextStart": "d.2",
@@ -402,7 +403,7 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -418,8 +419,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -435,8 +436,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -452,8 +453,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [base64::encode(values.get("d.1'").unwrap())]},
- {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [base64::encode(values.get("d.2").unwrap()), base64::encode(values.get("d.2'").unwrap())]},
+ {"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.1'").unwrap())]},
+ {"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [BASE64_STANDARD.encode(values.get("d.2").unwrap()), BASE64_STANDARD.encode(values.get("d.2'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -563,8 +564,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]}
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap()), BASE64_STANDARD.encode(values.get("c'").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]}
],
"more": false,
"nextStart": null,
@@ -580,8 +581,8 @@ async fn test_batch() {
"tombstones": false,
"singleItem": false,
"items": [
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]},
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap()), BASE64_STANDARD.encode(values.get("c'").unwrap())]},
],
"more": false,
"nextStart": null,
@@ -599,10 +600,10 @@ async fn test_batch() {
"items": [
{"sk": "a", "ct": ct.get("a").unwrap(), "v": [null]},
{"sk": "b", "ct": ct.get("b").unwrap(), "v": [null]},
- {"sk": "c", "ct": ct.get("c").unwrap(), "v": [base64::encode(values.get("c").unwrap()), base64::encode(values.get("c'").unwrap())]},
+ {"sk": "c", "ct": ct.get("c").unwrap(), "v": [BASE64_STANDARD.encode(values.get("c").unwrap()), BASE64_STANDARD.encode(values.get("c'").unwrap())]},
{"sk": "d.1", "ct": ct.get("d.1").unwrap(), "v": [null]},
{"sk": "d.2", "ct": ct.get("d.2").unwrap(), "v": [null]},
- {"sk": "e", "ct": ct.get("e").unwrap(), "v": [base64::encode(values.get("e").unwrap())]},
+ {"sk": "e", "ct": ct.get("e").unwrap(), "v": [BASE64_STANDARD.encode(values.get("e").unwrap())]},
],
"more": false,
"nextStart": null,
diff --git a/src/garage/tests/k2v/item.rs b/src/garage/tests/k2v/item.rs
index 2641386f..588836c7 100644
--- a/src/garage/tests/k2v/item.rs
+++ b/src/garage/tests/k2v/item.rs
@@ -3,6 +3,7 @@ use std::time::Duration;
use crate::common;
use assert_json_diff::assert_json_eq;
+use base64::prelude::*;
use serde_json::json;
use super::json_body;
@@ -222,7 +223,10 @@ async fn test_items_and_indices() {
let res_json = json_body(res).await;
assert_json_eq!(
res_json,
- [base64::encode(&content2), base64::encode(&content3)]
+ [
+ BASE64_STANDARD.encode(&content2),
+ BASE64_STANDARD.encode(&content3)
+ ]
);
// ReadIndex -- now there should be some stuff
@@ -411,7 +415,7 @@ async fn test_item_return_format() {
"application/json"
);
let res_body = json_body(res).await;
- assert_json_eq!(res_body, json!([base64::encode(&single_value)]));
+ assert_json_eq!(res_body, json!([BASE64_STANDARD.encode(&single_value)]));
// f2: binary
let res = ctx
@@ -452,7 +456,7 @@ async fn test_item_return_format() {
"application/json"
);
let res_body = json_body(res).await;
- assert_json_eq!(res_body, json!([base64::encode(&single_value)]));
+ assert_json_eq!(res_body, json!([BASE64_STANDARD.encode(&single_value)]));
// -- Test with a second, concurrent value --
let res = ctx
@@ -488,8 +492,8 @@ async fn test_item_return_format() {
assert_json_eq!(
res_body,
json!([
- base64::encode(&single_value),
- base64::encode(&concurrent_value)
+ BASE64_STANDARD.encode(&single_value),
+ BASE64_STANDARD.encode(&concurrent_value)
])
);
@@ -512,8 +516,8 @@ async fn test_item_return_format() {
assert_json_eq!(
res_body,
json!([
- base64::encode(&single_value),
- base64::encode(&concurrent_value)
+ BASE64_STANDARD.encode(&single_value),
+ BASE64_STANDARD.encode(&concurrent_value)
])
);
@@ -550,8 +554,8 @@ async fn test_item_return_format() {
assert_json_eq!(
res_body,
json!([
- base64::encode(&single_value),
- base64::encode(&concurrent_value)
+ BASE64_STANDARD.encode(&single_value),
+ BASE64_STANDARD.encode(&concurrent_value)
])
);
@@ -587,7 +591,10 @@ async fn test_item_return_format() {
"application/json"
);
let res_body = json_body(res).await;
- assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
+ assert_json_eq!(
+ res_body,
+ json!([BASE64_STANDARD.encode(&concurrent_value), null])
+ );
// f1: not specified
let res = ctx
@@ -612,7 +619,10 @@ async fn test_item_return_format() {
.unwrap()
.to_string();
let res_body = json_body(res).await;
- assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
+ assert_json_eq!(
+ res_body,
+ json!([BASE64_STANDARD.encode(&concurrent_value), null])
+ );
// f2: binary
let res = ctx
@@ -644,7 +654,10 @@ async fn test_item_return_format() {
"application/json"
);
let res_body = json_body(res).await;
- assert_json_eq!(res_body, json!([base64::encode(&concurrent_value), null]));
+ assert_json_eq!(
+ res_body,
+ json!([BASE64_STANDARD.encode(&concurrent_value), null])
+ );
// -- Delete everything --
let res = ctx
diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs
index 244a2fa0..f57e31ee 100644
--- a/src/garage/tests/s3/website.rs
+++ b/src/garage/tests/s3/website.rs
@@ -1,5 +1,8 @@
use crate::common;
use crate::common::ext::*;
+use crate::k2v::json_body;
+
+use assert_json_diff::assert_json_eq;
use aws_sdk_s3::{
model::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
types::ByteStream,
@@ -9,6 +12,7 @@ use hyper::{
body::{to_bytes, Body},
Client,
};
+use serde_json::json;
const BODY: &[u8; 16] = b"<h1>bonjour</h1>";
const BODY_ERR: &[u8; 6] = b"erreur";
@@ -49,6 +53,28 @@ async fn test_website() {
BODY.as_ref()
); /* check that we do not leak body */
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .header("domain", format!("{}", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST);
+ let res_body = json_body(admin_resp).await;
+ assert_json_eq!(
+ res_body,
+ json!({
+ "code": "InvalidRequest",
+ "message": "Bad request: Bucket is not authorized for website hosting",
+ "region": "garage-integ-test",
+ "path": "/check",
+ })
+ );
+
ctx.garage
.command()
.args(["bucket", "website", "--allow", BCKT_NAME])
@@ -62,6 +88,22 @@ async fn test_website() {
BODY.as_ref()
);
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .header("domain", format!("{}", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let mut admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::OK);
+ assert_eq!(
+ to_bytes(admin_resp.body_mut()).await.unwrap().as_ref(),
+ b"Bucket authorized for website hosting"
+ );
+
ctx.garage
.command()
.args(["bucket", "website", "--deny", BCKT_NAME])
@@ -74,6 +116,28 @@ async fn test_website() {
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
BODY.as_ref()
); /* check that we do not leak body */
+
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .header("domain", format!("{}", BCKT_NAME))
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST);
+ let res_body = json_body(admin_resp).await;
+ assert_json_eq!(
+ res_body,
+ json!({
+ "code": "InvalidRequest",
+ "message": "Bad request: Bucket is not authorized for website hosting",
+ "region": "garage-integ-test",
+ "path": "/check",
+ })
+ );
}
#[tokio::test]
@@ -322,3 +386,75 @@ async fn test_website_s3_api() {
);
}
}
+
+#[tokio::test]
+async fn test_website_check_website_enabled() {
+ let ctx = common::context();
+
+ let client = Client::new();
+
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST);
+ let res_body = json_body(admin_resp).await;
+ assert_json_eq!(
+ res_body,
+ json!({
+ "code": "InvalidRequest",
+ "message": "Bad request: No domain header found",
+ "region": "garage-integ-test",
+ "path": "/check",
+ })
+ );
+
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .header("domain", "foobar")
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::NOT_FOUND);
+ let res_body = json_body(admin_resp).await;
+ assert_json_eq!(
+ res_body,
+ json!({
+ "code": "NoSuchBucket",
+ "message": "Bucket not found: foobar",
+ "region": "garage-integ-test",
+ "path": "/check",
+ })
+ );
+
+ let admin_req = || {
+ Request::builder()
+ .method("GET")
+ .uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
+ .header("domain", "☹")
+ .body(Body::empty())
+ .unwrap()
+ };
+
+ let admin_resp = client.request(admin_req()).await.unwrap();
+ assert_eq!(admin_resp.status(), StatusCode::BAD_REQUEST);
+ let res_body = json_body(admin_resp).await;
+ assert_json_eq!(
+ res_body,
+ json!({
+ "code": "InvalidRequest",
+ "message": "Bad request: Invalid characters found in domain header: failed to convert header to a str",
+ "region": "garage-integ-test",
+ "path": "/check",
+ })
+ );
+}
diff --git a/src/k2v-client/Cargo.toml b/src/k2v-client/Cargo.toml
index 7ab38e02..88d52747 100644
--- a/src/k2v-client/Cargo.toml
+++ b/src/k2v-client/Cargo.toml
@@ -9,20 +9,20 @@ repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
readme = "../../README.md"
[dependencies]
-base64 = "0.13.0"
-http = "0.2.6"
+base64 = "0.21"
+http = "0.2"
log = "0.4"
rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] }
rusoto_credential = "0.48.0"
rusoto_signature = "0.48.0"
hyper-rustls = { version = "0.23", default-features = false, features = [ "http1", "http2", "tls12" ] }
-serde = "1.0.137"
-serde_json = "1.0.81"
-thiserror = "1.0.31"
-tokio = "1.17.0"
+serde = "1.0"
+serde_json = "1.0"
+thiserror = "1.0"
+tokio = "1.24"
# cli deps
-clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
+clap = { version = "4.1", optional = true, features = ["derive", "env"] }
garage_util = { version = "0.8.1", path = "../util", optional = true }
diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml
index 323c2d64..d1c7cd29 100644
--- a/src/model/Cargo.toml
+++ b/src/model/Cargo.toml
@@ -22,13 +22,13 @@ garage_util = { version = "0.8.1", path = "../util" }
async-trait = "0.1.7"
arc-swap = "1.0"
-blake2 = "0.9"
+blake2 = "0.10"
err-derive = "0.3"
hex = "0.4"
-base64 = "0.13"
-tracing = "0.1.30"
+base64 = "0.21"
+tracing = "0.1"
rand = "0.8"
-zstd = { version = "0.9", default-features = false }
+zstd = { version = "0.12", default-features = false }
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
diff --git a/src/model/k2v/causality.rs b/src/model/k2v/causality.rs
index b1ec8035..c80ebd39 100644
--- a/src/model/k2v/causality.rs
+++ b/src/model/k2v/causality.rs
@@ -7,6 +7,7 @@
//! "causality token", is used in the API and must be sent along with
//! each write or delete operation to indicate the previously seen
//! versions that we want to overwrite or delete.
+use base64::prelude::*;
use std::collections::BTreeMap;
use std::convert::TryInto;
@@ -67,13 +68,13 @@ impl CausalContext {
bytes.extend(u64::to_be_bytes(i));
}
- base64::encode_config(bytes, base64::URL_SAFE_NO_PAD)
+ BASE64_URL_SAFE_NO_PAD.encode(bytes)
}
/// Parse from base64-encoded binary representation.
/// Returns None on error.
pub fn parse(s: &str) -> Option<Self> {
- let bytes = base64::decode_config(s, base64::URL_SAFE_NO_PAD).ok()?;
+ let bytes = BASE64_URL_SAFE_NO_PAD.decode(s).ok()?;
if bytes.len() % 16 != 8 || bytes.len() < 8 {
return None;
}
diff --git a/src/model/k2v/item_table.rs b/src/model/k2v/item_table.rs
index bc2b1aef..28646f37 100644
--- a/src/model/k2v/item_table.rs
+++ b/src/model/k2v/item_table.rs
@@ -176,9 +176,9 @@ impl Crdt for DvvsEntry {
impl PartitionKey for K2VItemPartition {
fn hash(&self) -> Hash {
- use blake2::{Blake2b, Digest};
+ use blake2::{Blake2b512, Digest};
- let mut hasher = Blake2b::new();
+ let mut hasher = Blake2b512::new();
hasher.update(self.bucket_id.as_slice());
hasher.update(self.partition_key.as_bytes());
let mut hash = [0u8; 32];
diff --git a/src/model/k2v/seen.rs b/src/model/k2v/seen.rs
index 314d0f9e..51098710 100644
--- a/src/model/k2v/seen.rs
+++ b/src/model/k2v/seen.rs
@@ -9,6 +9,7 @@
use std::collections::BTreeMap;
+use base64::prelude::*;
use serde::{Deserialize, Serialize};
use garage_util::data::Uuid;
@@ -78,12 +79,12 @@ impl RangeSeenMarker {
let bytes = nonversioned_encode(&self)?;
let bytes = zstd::stream::encode_all(&mut &bytes[..], zstd::DEFAULT_COMPRESSION_LEVEL)?;
- Ok(base64::encode(&bytes))
+ Ok(BASE64_STANDARD.encode(&bytes))
}
/// Decode from msgpack+zstd+b64 representation, returns None on error.
pub fn decode(s: &str) -> Option<Self> {
- let bytes = base64::decode(&s).ok()?;
+ let bytes = BASE64_STANDARD.decode(&s).ok()?;
let bytes = zstd::stream::decode_all(&mut &bytes[..]).ok()?;
nonversioned_decode(&bytes).ok()
}
diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml
index e9a0929a..87ae15ac 100644
--- a/src/rpc/Cargo.toml
+++ b/src/rpc/Cargo.toml
@@ -20,9 +20,10 @@ arc-swap = "1.0"
bytes = "1.0"
gethostname = "0.2"
hex = "0.4"
-tracing = "0.1.30"
+tracing = "0.1"
rand = "0.8"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
+systemstat = "0.2.3"
async-trait = "0.1.7"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
diff --git a/src/rpc/lib.rs b/src/rpc/lib.rs
index a8cc0030..5aec92c0 100644
--- a/src/rpc/lib.rs
+++ b/src/rpc/lib.rs
@@ -3,6 +3,9 @@
#[macro_use]
extern crate tracing;
+mod metrics;
+mod system_metrics;
+
#[cfg(feature = "consul-discovery")]
mod consul;
#[cfg(feature = "kubernetes-discovery")]
@@ -13,9 +16,6 @@ pub mod replication_mode;
pub mod ring;
pub mod system;
-mod metrics;
pub mod rpc_helper;
pub use rpc_helper::*;
-
-pub mod system_metrics;
diff --git a/src/rpc/system.rs b/src/rpc/system.rs
index 90f6a4c2..e0ced8cc 100644
--- a/src/rpc/system.rs
+++ b/src/rpc/system.rs
@@ -3,6 +3,7 @@ use std::collections::HashMap;
use std::io::{Read, Write};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
+use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
@@ -38,7 +39,6 @@ use crate::replication_mode::*;
use crate::ring::*;
use crate::rpc_helper::*;
-#[cfg(feature = "metrics")]
use crate::system_metrics::*;
const DISCOVERY_INTERVAL: Duration = Duration::from_secs(60);
@@ -106,7 +106,7 @@ pub struct System {
consul_discovery: Option<ConsulDiscovery>,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: Option<KubernetesDiscoveryConfig>,
- #[cfg(feature = "metrics")]
+
metrics: SystemMetrics,
replication_mode: ReplicationMode,
@@ -118,18 +118,28 @@ pub struct System {
/// Path to metadata directory
pub metadata_dir: PathBuf,
+ /// Path to data directory
+ pub data_dir: PathBuf,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeStatus {
/// Hostname of the node
pub hostname: String,
+
/// Replication factor configured on the node
pub replication_factor: usize,
/// Cluster layout version
pub cluster_layout_version: u64,
/// Hash of cluster layout staging data
pub cluster_layout_staging_hash: Hash,
+
+ /// Disk usage on partition containing metadata directory (tuple: `(avail, total)`)
+ #[serde(default)]
+ pub meta_disk_avail: Option<(u64, u64)>,
+ /// Disk usage on partition containing data directory (tuple: `(avail, total)`)
+ #[serde(default)]
+ pub data_disk_avail: Option<(u64, u64)>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -271,18 +281,11 @@ impl System {
}
};
- let local_status = NodeStatus {
- hostname: gethostname::gethostname()
- .into_string()
- .unwrap_or_else(|_| "<invalid utf-8>".to_string()),
- replication_factor,
- cluster_layout_version: cluster_layout.version,
- cluster_layout_staging_hash: cluster_layout.staging_hash,
- };
-
- #[cfg(feature = "metrics")]
let metrics = SystemMetrics::new(replication_factor);
+ let mut local_status = NodeStatus::initial(replication_factor, &cluster_layout);
+ local_status.update_disk_usage(&config.metadata_dir, &config.data_dir, &metrics);
+
let ring = Ring::new(cluster_layout, replication_factor);
let (update_ring, ring) = watch::channel(Arc::new(ring));
@@ -373,12 +376,12 @@ impl System {
consul_discovery,
#[cfg(feature = "kubernetes-discovery")]
kubernetes_discovery: config.kubernetes_discovery.clone(),
- #[cfg(feature = "metrics")]
metrics,
ring,
update_ring: Mutex::new(update_ring),
metadata_dir: config.metadata_dir.clone(),
+ data_dir: config.data_dir.clone(),
});
sys.system_endpoint.set_handler(sys.clone());
Ok(sys)
@@ -416,12 +419,7 @@ impl System {
.get(&n.id.into())
.cloned()
.map(|(_, st)| st)
- .unwrap_or(NodeStatus {
- hostname: "?".to_string(),
- replication_factor: 0,
- cluster_layout_version: 0,
- cluster_layout_staging_hash: Hash::from([0u8; 32]),
- }),
+ .unwrap_or(NodeStatus::unknown()),
})
.collect::<Vec<_>>();
known_nodes
@@ -600,6 +598,9 @@ impl System {
let ring = self.ring.borrow();
new_si.cluster_layout_version = ring.layout.version;
new_si.cluster_layout_staging_hash = ring.layout.staging_hash;
+
+ new_si.update_disk_usage(&self.metadata_dir, &self.data_dir, &self.metrics);
+
self.local_status.swap(Arc::new(new_si));
}
@@ -864,6 +865,69 @@ impl EndpointHandler<SystemRpc> for System {
}
}
+impl NodeStatus {
+ fn initial(replication_factor: usize, layout: &ClusterLayout) -> Self {
+ NodeStatus {
+ hostname: gethostname::gethostname()
+ .into_string()
+ .unwrap_or_else(|_| "<invalid utf-8>".to_string()),
+ replication_factor,
+ cluster_layout_version: layout.version,
+ cluster_layout_staging_hash: layout.staging_hash,
+ meta_disk_avail: None,
+ data_disk_avail: None,
+ }
+ }
+
+ fn unknown() -> Self {
+ NodeStatus {
+ hostname: "?".to_string(),
+ replication_factor: 0,
+ cluster_layout_version: 0,
+ cluster_layout_staging_hash: Hash::from([0u8; 32]),
+ meta_disk_avail: None,
+ data_disk_avail: None,
+ }
+ }
+
+ fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &Path, metrics: &SystemMetrics) {
+ use systemstat::{Platform, System};
+ let mounts = System::new().mounts().unwrap_or_default();
+
+ let mount_avail = |path: &Path| {
+ mounts
+ .iter()
+ .filter(|x| path.starts_with(&x.fs_mounted_on))
+ .max_by_key(|x| x.fs_mounted_on.len())
+ .map(|x| (x.avail.as_u64(), x.total.as_u64()))
+ };
+
+ self.meta_disk_avail = mount_avail(meta_dir);
+ self.data_disk_avail = mount_avail(data_dir);
+
+ if let Some((avail, total)) = self.meta_disk_avail {
+ metrics
+ .values
+ .meta_disk_avail
+ .store(avail, Ordering::Relaxed);
+ metrics
+ .values
+ .meta_disk_total
+ .store(total, Ordering::Relaxed);
+ }
+ if let Some((avail, total)) = self.data_disk_avail {
+ metrics
+ .values
+ .data_disk_avail
+ .store(avail, Ordering::Relaxed);
+ metrics
+ .values
+ .data_disk_total
+ .store(total, Ordering::Relaxed);
+ }
+ }
+}
+
fn get_default_ip() -> Option<IpAddr> {
pnet_datalink::interfaces()
.iter()
diff --git a/src/rpc/system_metrics.rs b/src/rpc/system_metrics.rs
index d96b67e4..83f5fa97 100644
--- a/src/rpc/system_metrics.rs
+++ b/src/rpc/system_metrics.rs
@@ -1,14 +1,31 @@
+use std::sync::atomic::{AtomicU64, Ordering};
+use std::sync::Arc;
+
use opentelemetry::{global, metrics::*, KeyValue};
/// TableMetrics reference all counter used for metrics
pub struct SystemMetrics {
pub(crate) _garage_build_info: ValueObserver<u64>,
pub(crate) _replication_factor: ValueObserver<u64>,
+ pub(crate) _disk_avail: ValueObserver<u64>,
+ pub(crate) _disk_total: ValueObserver<u64>,
+ pub(crate) values: Arc<SystemMetricsValues>,
+}
+
+#[derive(Default)]
+pub struct SystemMetricsValues {
+ pub(crate) data_disk_total: AtomicU64,
+ pub(crate) data_disk_avail: AtomicU64,
+ pub(crate) meta_disk_total: AtomicU64,
+ pub(crate) meta_disk_avail: AtomicU64,
}
impl SystemMetrics {
pub fn new(replication_factor: usize) -> Self {
let meter = global::meter("garage_system");
+ let values = Arc::new(SystemMetricsValues::default());
+ let values1 = values.clone();
+ let values2 = values.clone();
Self {
_garage_build_info: meter
.u64_value_observer("garage_build_info", move |observer| {
@@ -28,6 +45,33 @@ impl SystemMetrics {
})
.with_description("Garage replication factor setting")
.init(),
+ _disk_avail: meter
+ .u64_value_observer("garage_local_disk_avail", move |observer| {
+ match values1.data_disk_avail.load(Ordering::Relaxed) {
+ 0 => (),
+ x => observer.observe(x, &[KeyValue::new("volume", "data")]),
+ };
+ match values1.meta_disk_avail.load(Ordering::Relaxed) {
+ 0 => (),
+ x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
+ };
+ })
+ .with_description("Garage available disk space on each node")
+ .init(),
+ _disk_total: meter
+ .u64_value_observer("garage_local_disk_total", move |observer| {
+ match values2.data_disk_total.load(Ordering::Relaxed) {
+ 0 => (),
+ x => observer.observe(x, &[KeyValue::new("volume", "data")]),
+ };
+ match values2.meta_disk_total.load(Ordering::Relaxed) {
+ 0 => (),
+ x => observer.observe(x, &[KeyValue::new("volume", "metadata")]),
+ };
+ })
+ .with_description("Garage total disk space on each node")
+ .init(),
+ values,
}
}
}
diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml
index 3911c945..a8127f50 100644
--- a/src/table/Cargo.toml
+++ b/src/table/Cargo.toml
@@ -25,7 +25,7 @@ arc-swap = "1.0"
bytes = "1.0"
hex = "0.4"
hexdump = "0.1"
-tracing = "0.1.30"
+tracing = "0.1"
rand = "0.8"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml
index 1017b1ce..abeccbbd 100644
--- a/src/util/Cargo.toml
+++ b/src/util/Cargo.toml
@@ -18,7 +18,7 @@ garage_db = { version = "0.8.1", path = "../db" }
arc-swap = "1.0"
async-trait = "0.1"
-blake2 = "0.9"
+blake2 = "0.10"
bytes = "1.0"
digest = "0.10"
err-derive = "0.3"
@@ -27,7 +27,7 @@ hexdump = "0.1"
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
hex = "0.4"
lazy_static = "1.4"
-tracing = "0.1.30"
+tracing = "0.1"
rand = "0.8"
sha2 = "0.10"
@@ -35,7 +35,7 @@ chrono = "0.4"
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_json = "1.0"
-toml = "0.5"
+toml = "0.6"
futures = "0.3"
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
@@ -48,7 +48,7 @@ hyper = "0.14"
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
[dev-dependencies]
-mktemp = "0.4"
+mktemp = "0.5"
[features]
k2v = []
diff --git a/src/util/data.rs b/src/util/data.rs
index 3f61e301..bdd8daee 100644
--- a/src/util/data.rs
+++ b/src/util/data.rs
@@ -115,9 +115,9 @@ pub fn sha256sum(data: &[u8]) -> Hash {
/// Compute the blake2 of a slice
pub fn blake2sum(data: &[u8]) -> Hash {
- use blake2::{Blake2b, Digest};
+ use blake2::{Blake2b512, Digest};
- let mut hasher = Blake2b::new();
+ let mut hasher = Blake2b512::new();
hasher.update(data);
let mut hash = [0u8; 32];
hash.copy_from_slice(&hasher.finalize()[..32]);
diff --git a/src/util/time.rs b/src/util/time.rs
index 257b4d2a..42f41a44 100644
--- a/src/util/time.rs
+++ b/src/util/time.rs
@@ -25,6 +25,6 @@ pub fn increment_logical_clock_2(prev: u64, prev2: u64) -> u64 {
pub fn msec_to_rfc3339(msecs: u64) -> String {
let secs = msecs as i64 / 1000;
let nanos = (msecs as i64 % 1000) as u32 * 1_000_000;
- let timestamp = Utc.timestamp(secs, nanos);
+ let timestamp = Utc.timestamp_opt(secs, nanos).unwrap();
timestamp.to_rfc3339_opts(SecondsFormat::Millis, true)
}
diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml
index dbc5e5fb..19eaed17 100644
--- a/src/web/Cargo.toml
+++ b/src/web/Cargo.toml
@@ -20,7 +20,7 @@ garage_util = { version = "0.8.1", path = "../util" }
garage_table = { version = "0.8.1", path = "../table" }
err-derive = "0.3"
-tracing = "0.1.30"
+tracing = "0.1"
percent-encoding = "2.1.0"
futures = "0.3"