aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Auvolat <alex@adnab.me>2021-12-17 11:53:13 +0100
committerAlex Auvolat <alex@adnab.me>2022-01-04 12:46:41 +0100
commitb1cfd16913e6957739958ef729b87c1bf3674a5d (patch)
tree781245de967e0c7a14cbdc0f2a610787f3b852d4
parent5db600e2316b80102e3fd4df9e8974c9586aec9c (diff)
downloadgarage-b1cfd16913e6957739958ef729b87c1bf3674a5d.tar.gz
garage-b1cfd16913e6957739958ef729b87c1bf3674a5d.zip
New buckets for 0.6.0: small fixes, including:
- ensure bucket names are correct aws s3 names - when making aliases, ensure timestamps of links in both ways are the same - fix small remarks by trinity - don't have a separate website_access field
-rw-r--r--Cargo.lock1
-rw-r--r--src/api/api_server.rs8
-rw-r--r--src/api/s3_bucket.rs2
-rw-r--r--src/api/s3_website.rs2
-rw-r--r--src/garage/Cargo.toml1
-rw-r--r--src/garage/admin.rs155
-rw-r--r--src/garage/cli/cmd.rs2
-rw-r--r--src/garage/cli/util.rs2
-rw-r--r--src/model/bucket_alias_table.rs43
-rw-r--r--src/model/bucket_helper.rs12
-rw-r--r--src/model/bucket_table.rs9
-rw-r--r--src/model/key_table.rs2
-rw-r--r--src/model/migrate.rs32
-rw-r--r--src/model/permission.rs3
-rw-r--r--src/util/crdt/lww.rs7
-rw-r--r--src/util/crdt/lww_map.rs62
-rw-r--r--src/util/time.rs5
-rw-r--r--src/web/web_server.rs2
18 files changed, 243 insertions, 107 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 8630855f..40d2a29f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -403,6 +403,7 @@ dependencies = [
"rand",
"rmp-serde 0.15.5",
"serde",
+ "serde_bytes",
"sled",
"structopt",
"tokio",
diff --git a/src/api/api_server.rs b/src/api/api_server.rs
index cd866c9f..42987e78 100644
--- a/src/api/api_server.rs
+++ b/src/api/api_server.rs
@@ -109,11 +109,9 @@ async fn handler_inner(garage: Arc<Garage>, req: Request<Body>) -> Result<Respon
let endpoint = Endpoint::from_request(&req, bucket.map(ToOwned::to_owned))?;
- let bucket_name = match endpoint.authorization_type() {
- Authorization::None => {
- return handle_request_without_bucket(garage, req, api_key, endpoint).await
- }
- Authorization::Read(bucket) | Authorization::Write(bucket) => bucket.to_string(),
+ let bucket_name = match endpoint.get_bucket() {
+ None => return handle_request_without_bucket(garage, req, api_key, endpoint).await,
+ Some(bucket) => bucket.to_string(),
};
let bucket_id = resolve_bucket(&garage, &bucket_name, &api_key).await?;
diff --git a/src/api/s3_bucket.rs b/src/api/s3_bucket.rs
index dc131a31..4a497c67 100644
--- a/src/api/s3_bucket.rs
+++ b/src/api/s3_bucket.rs
@@ -67,7 +67,7 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Respo
if let Some(alias_ent) = alias_ent {
if let Some(alias_p) = alias_ent.state.get().as_option() {
if alias_p.bucket_id == *bucket_id {
- aliases.insert(alias_ent.name.clone(), *bucket_id);
+ aliases.insert(alias_ent.name().to_string(), *bucket_id);
}
}
}
diff --git a/src/api/s3_website.rs b/src/api/s3_website.rs
index 8a93e8c5..e76afbf4 100644
--- a/src/api/s3_website.rs
+++ b/src/api/s3_website.rs
@@ -25,7 +25,6 @@ pub async fn handle_delete_website(
.ok_or(Error::NotFound)?;
if let crdt::Deletable::Present(param) = &mut bucket.state {
- param.website_access.update(false);
param.website_config.update(None);
garage.bucket_table.insert(&bucket).await?;
} else {
@@ -57,7 +56,6 @@ pub async fn handle_put_website(
conf.validate()?;
if let crdt::Deletable::Present(param) = &mut bucket.state {
- param.website_access.update(true);
param
.website_config
.update(Some(ByteBuf::from(body.to_vec())));
diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml
index 44cacde3..cd6564ce 100644
--- a/src/garage/Cargo.toml
+++ b/src/garage/Cargo.toml
@@ -35,6 +35,7 @@ sled = "0.34"
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
+serde_bytes = "0.11"
structopt = { version = "0.3", default-features = false }
toml = "0.5"
diff --git a/src/garage/admin.rs b/src/garage/admin.rs
index 2eb0f187..9ea5c19e 100644
--- a/src/garage/admin.rs
+++ b/src/garage/admin.rs
@@ -4,6 +4,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
+use serde_bytes::ByteBuf;
use garage_util::crdt::*;
use garage_util::data::*;
@@ -27,6 +28,8 @@ use crate::repair::Repair;
pub const ADMIN_RPC_PATH: &str = "garage/admin_rpc.rs/Rpc";
+macro_rules! INVALID_BUCKET_NAME_MESSAGE { () => { "Invalid bucket name: {}. See AWS documentation for constraints on S3 bucket names:\nhttps://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html" }; }
+
#[derive(Debug, Serialize, Deserialize)]
pub enum AdminRpc {
BucketOperation(BucketOperation),
@@ -142,14 +145,14 @@ impl AdminRpcHandler {
}));
alias
}
- None => BucketAlias::new(name.clone(), bucket.id),
+ None => BucketAlias::new(name.clone(), bucket.id)
+ .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), name))?,
};
- bucket
- .state
- .as_option_mut()
- .unwrap()
- .aliases
- .update_in_place(name.clone(), true);
+ bucket.state.as_option_mut().unwrap().aliases.merge_raw(
+ name,
+ alias.state.timestamp(),
+ &true,
+ );
self.garage.bucket_table.insert(&bucket).await?;
self.garage.bucket_alias_table.insert(&alias).await?;
Ok(AdminRpc::Ok(format!("Bucket {} was created.", name)))
@@ -222,7 +225,7 @@ impl AdminRpcHandler {
// 2. delete bucket alias
bucket_alias.state.update(Deletable::Deleted);
self.garage.bucket_alias_table.insert(&bucket_alias).await?;
- // 3. delete bucket alias
+ // 3. delete bucket
bucket.state = Deletable::delete();
self.garage.bucket_table.insert(&bucket).await?;
@@ -259,15 +262,36 @@ impl AdminRpcHandler {
}
}
- key_param.local_aliases = key_param
- .local_aliases
- .update_mutator(query.new_name.clone(), Deletable::present(bucket_id));
- self.garage.key_table.insert(&key).await?;
+ if !is_valid_bucket_name(&query.new_name) {
+ return Err(Error::Message(format!(
+ INVALID_BUCKET_NAME_MESSAGE!(),
+ query.new_name
+ )));
+ }
+ // Checks ok, add alias
let mut bucket_p = bucket.state.as_option_mut().unwrap();
- bucket_p.local_aliases = bucket_p
- .local_aliases
- .update_mutator((key.key_id.clone(), query.new_name.clone()), true);
+ let bucket_p_local_alias_key = (key.key_id.clone(), query.new_name.clone());
+
+ // Calculate the timestamp to assign to this aliasing in the two local_aliases maps
+ // (the one from key to bucket, and the reverse one stored in the bucket iself)
+ // so that merges on both maps in case of a concurrent operation resolve
+ // to the same alias being set
+ let alias_ts = increment_logical_clock_2(
+ key_param.local_aliases.get_timestamp(&query.new_name),
+ bucket_p
+ .local_aliases
+ .get_timestamp(&bucket_p_local_alias_key),
+ );
+
+ key_param.local_aliases = LwwMap::raw_item(
+ query.new_name.clone(),
+ alias_ts,
+ Deletable::present(bucket_id),
+ );
+ self.garage.key_table.insert(&key).await?;
+
+ bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, true);
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!(
@@ -275,40 +299,47 @@ impl AdminRpcHandler {
query.new_name, bucket_id, key.key_id
)))
} else {
- let mut alias = self
+ let alias = self
.garage
.bucket_alias_table
.get(&EmptyKey, &query.new_name)
- .await?
- .unwrap_or(BucketAlias {
- name: query.new_name.clone(),
- state: Lww::new(Deletable::delete()),
- });
+ .await?;
- if let Some(existing_alias) = alias.state.get().as_option() {
- if existing_alias.bucket_id == bucket_id {
- return Ok(AdminRpc::Ok(format!(
- "Alias {} already points to bucket {:?}",
- query.new_name, bucket_id
- )));
- } else {
- return Err(Error::Message(format!(
- "Alias {} already exists and points to different bucket: {:?}",
- query.new_name, existing_alias.bucket_id
- )));
+ if let Some(existing_alias) = alias.as_ref() {
+ if let Some(p) = existing_alias.state.get().as_option() {
+ if p.bucket_id == bucket_id {
+ return Ok(AdminRpc::Ok(format!(
+ "Alias {} already points to bucket {:?}",
+ query.new_name, bucket_id
+ )));
+ } else {
+ return Err(Error::Message(format!(
+ "Alias {} already exists and points to different bucket: {:?}",
+ query.new_name, p.bucket_id
+ )));
+ }
}
}
// Checks ok, add alias
- alias
- .state
- .update(Deletable::present(AliasParams { bucket_id }));
+ let mut bucket_p = bucket.state.as_option_mut().unwrap();
+
+ let alias_ts = increment_logical_clock_2(
+ bucket_p.aliases.get_timestamp(&query.new_name),
+ alias.as_ref().map(|a| a.state.timestamp()).unwrap_or(0),
+ );
+
+ let alias = match alias {
+ None => BucketAlias::new(query.new_name.clone(), bucket_id)
+ .ok_or_message(format!(INVALID_BUCKET_NAME_MESSAGE!(), query.new_name))?,
+ Some(mut a) => {
+ a.state = Lww::raw(alias_ts, Deletable::present(AliasParams { bucket_id }));
+ a
+ }
+ };
self.garage.bucket_alias_table.insert(&alias).await?;
- let mut bucket_p = bucket.state.as_option_mut().unwrap();
- bucket_p.aliases = bucket_p
- .aliases
- .update_mutator(query.new_name.clone(), true);
+ bucket_p.aliases = LwwMap::raw_item(query.new_name.clone(), alias_ts, true);
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!(
@@ -336,14 +367,14 @@ impl AdminRpcHandler {
.bucket_helper()
.get_existing_bucket(bucket_id)
.await?;
- let mut bucket_state = bucket.state.as_option_mut().unwrap();
+ let mut bucket_p = bucket.state.as_option_mut().unwrap();
- let has_other_aliases = bucket_state
+ let has_other_aliases = bucket_p
.aliases
.items()
.iter()
.any(|(_, _, active)| *active)
- || bucket_state
+ || bucket_p
.local_aliases
.items()
.iter()
@@ -352,15 +383,22 @@ impl AdminRpcHandler {
return Err(Error::Message(format!("Bucket {} doesn't have other aliases, please delete it instead of just unaliasing.", query.name)));
}
+ // Checks ok, remove alias
let mut key_param = key.state.as_option_mut().unwrap();
- key_param.local_aliases = key_param
- .local_aliases
- .update_mutator(query.name.clone(), Deletable::delete());
+ let bucket_p_local_alias_key = (key.key_id.clone(), query.name.clone());
+
+ let alias_ts = increment_logical_clock_2(
+ key_param.local_aliases.get_timestamp(&query.name),
+ bucket_p
+ .local_aliases
+ .get_timestamp(&bucket_p_local_alias_key),
+ );
+
+ key_param.local_aliases =
+ LwwMap::raw_item(query.name.clone(), alias_ts, Deletable::delete());
self.garage.key_table.insert(&key).await?;
- bucket_state.local_aliases = bucket_state
- .local_aliases
- .update_mutator((key.key_id.clone(), query.name.clone()), false);
+ bucket_p.local_aliases = LwwMap::raw_item(bucket_p_local_alias_key, alias_ts, false);
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!(
@@ -401,12 +439,17 @@ impl AdminRpcHandler {
.get(&EmptyKey, &query.name)
.await?
.ok_or_message("Internal error: alias not found")?;
- alias.state.update(Deletable::delete());
+
+ // Checks ok, remove alias
+ let alias_ts = increment_logical_clock_2(
+ alias.state.timestamp(),
+ bucket_state.aliases.get_timestamp(&query.name),
+ );
+
+ alias.state = Lww::raw(alias_ts, Deletable::delete());
self.garage.bucket_alias_table.insert(&alias).await?;
- bucket_state.aliases = bucket_state
- .aliases
- .update_mutator(query.name.clone(), false);
+ bucket_state.aliases = LwwMap::raw_item(query.name.clone(), alias_ts, false);
self.garage.bucket_table.insert(&bucket).await?;
Ok(AdminRpc::Ok(format!("Bucket alias {} deleted", query.name)))
@@ -494,7 +537,13 @@ impl AdminRpcHandler {
));
}
- bucket_state.website_access.update(query.allow);
+ let website = if query.allow {
+ Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec()))
+ } else {
+ None
+ };
+
+ bucket_state.website_config.update(website);
self.garage.bucket_table.insert(&bucket).await?;
let msg = if query.allow {
diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs
index 1c64f9b5..834261e4 100644
--- a/src/garage/cli/cmd.rs
+++ b/src/garage/cli/cmd.rs
@@ -167,7 +167,7 @@ pub async fn cmd_admin(
let mut table = vec![];
for alias in bl {
if let Some(p) = alias.state.get().as_option() {
- table.push(format!("\t{}\t{:?}", alias.name, p.bucket_id));
+ table.push(format!("\t{}\t{:?}", alias.name(), p.bucket_id));
}
}
format_table(table);
diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs
index ad48c301..b4ea14d1 100644
--- a/src/garage/cli/util.rs
+++ b/src/garage/cli/util.rs
@@ -82,7 +82,7 @@ pub fn print_bucket_info(bucket: &Bucket, relevant_keys: &HashMap<String, Key>)
match &bucket.state {
Deletable::Deleted => println!("Bucket is deleted."),
Deletable::Present(p) => {
- println!("Website access: {}", p.website_access.get());
+ println!("Website access: {}", p.website_config.get().is_some());
println!("\nGlobal aliases:");
for (alias, _, active) in p.aliases.items().iter() {
diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs
index 52484c5b..904a5255 100644
--- a/src/model/bucket_alias_table.rs
+++ b/src/model/bucket_alias_table.rs
@@ -8,7 +8,7 @@ use garage_util::data::*;
/// in the global namespace.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketAlias {
- pub name: String,
+ name: String,
pub state: crdt::Lww<crdt::Deletable<AliasParams>>,
}
@@ -22,15 +22,22 @@ impl AutoCrdt for AliasParams {
}
impl BucketAlias {
- pub fn new(name: String, bucket_id: Uuid) -> Self {
- BucketAlias {
- name,
- state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })),
+ pub fn new(name: String, bucket_id: Uuid) -> Option<Self> {
+ if !is_valid_bucket_name(&name) {
+ None
+ } else {
+ Some(BucketAlias {
+ name,
+ state: crdt::Lww::new(crdt::Deletable::present(AliasParams { bucket_id })),
+ })
}
}
pub fn is_deleted(&self) -> bool {
self.state.get().is_deleted()
}
+ pub fn name(&self) -> &str {
+ &self.name
+ }
}
impl Crdt for BucketAlias {
@@ -62,3 +69,29 @@ impl TableSchema for BucketAliasTable {
filter.apply(entry.is_deleted())
}
}
+
+/// Check if a bucket name is valid.
+///
+/// The requirements are listed here:
+///
+/// <https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html>
+///
+/// In the case of Garage, bucket names must not be hex-encoded
+/// 32 byte string, which is excluded thanks to the
+/// maximum length of 63 bytes given in the spec.
+pub fn is_valid_bucket_name(n: &str) -> bool {
+ // Bucket names must be between 3 and 63 characters
+ n.len() >= 3 && n.len() <= 63
+ // Bucket names must be composed of lowercase letters, numbers,
+ // dashes and dots
+ && n.chars().all(|c| matches!(c, '.' | '-' | 'a'..='z' | '0'..='9'))
+ // Bucket names must start and end with a letter or a number
+ && !n.starts_with(&['-', '.'][..])
+ && !n.ends_with(&['-', '.'][..])
+ // Bucket names must not be formated as an IP address
+ && n.parse::<std::net::IpAddr>().is_err()
+ // Bucket names must not start wih "xn--"
+ && !n.starts_with("xn--")
+ // Bucket names must not end with "-s3alias"
+ && !n.ends_with("-s3alias")
+}
diff --git a/src/model/bucket_helper.rs b/src/model/bucket_helper.rs
index c1280afa..b55ebc4b 100644
--- a/src/model/bucket_helper.rs
+++ b/src/model/bucket_helper.rs
@@ -8,12 +8,21 @@ use crate::garage::Garage;
pub struct BucketHelper<'a>(pub(crate) &'a Garage);
-#[allow(clippy::ptr_arg)]
impl<'a> BucketHelper<'a> {
+ #[allow(clippy::ptr_arg)]
pub async fn resolve_global_bucket_name(
&self,
bucket_name: &String,
) -> Result<Option<Uuid>, Error> {
+ // Bucket names in Garage are aliases, true bucket identifiers
+ // are 32-byte UUIDs. This function resolves bucket names into
+ // their full identifier by looking up in the bucket_alias_table.
+ // This function also allows buckets to be identified by their
+ // full UUID (hex-encoded). Here, if the name to be resolved is a
+ // hex string of the correct length, it is directly parsed as a bucket
+ // identifier which is returned. There is no risk of this conflicting
+ // with an actual bucket name: bucket names are max 63 chars long by
+ // the AWS spec, and hex-encoded UUIDs are 64 chars long.
let hexbucket = hex::decode(bucket_name.as_str())
.ok()
.map(|by| Uuid::try_from(&by))
@@ -37,7 +46,6 @@ impl<'a> BucketHelper<'a> {
}
}
- #[allow(clippy::ptr_arg)]
pub async fn get_existing_bucket(&self, bucket_id: Uuid) -> Result<Bucket, Error> {
self.0
.bucket_table
diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs
index 6ae719ae..00e03899 100644
--- a/src/model/bucket_table.rs
+++ b/src/model/bucket_table.rs
@@ -8,6 +8,8 @@ use garage_util::time::*;
use crate::permission::BucketKeyPerm;
+pub const DEFAULT_WEBSITE_CONFIGURATION: &[u8] = b""; // TODO (an XML WebsiteConfiguration document per the AWS spec)
+
/// A bucket is a collection of objects
///
/// Its parameters are not directly accessible as:
@@ -29,9 +31,8 @@ pub struct BucketParams {
/// Map of key with access to the bucket, and what kind of access they give
pub authorized_keys: crdt::Map<String, BucketKeyPerm>,
/// Whether this bucket is allowed for website access
- /// (under all of its global alias names)
- pub website_access: crdt::Lww<bool>,
- /// The website configuration XML document
+ /// (under all of its global alias names),
+ /// and if so, the website configuration XML document
pub website_config: crdt::Lww<Option<ByteBuf>>,
/// Map of aliases that are or have been given to this bucket
/// in the global namespace
@@ -50,7 +51,6 @@ impl BucketParams {
BucketParams {
creation_date: now_msec(),
authorized_keys: crdt::Map::new(),
- website_access: crdt::Lww::new(false),
website_config: crdt::Lww::new(None),
aliases: crdt::LwwMap::new(),
local_aliases: crdt::LwwMap::new(),
@@ -61,7 +61,6 @@ impl BucketParams {
impl Crdt for BucketParams {
fn merge(&mut self, o: &Self) {
self.authorized_keys.merge(&o.authorized_keys);
- self.website_access.merge(&o.website_access);
self.website_config.merge(&o.website_config);
self.aliases.merge(&o.aliases);
self.local_aliases.merge(&o.local_aliases);
diff --git a/src/model/key_table.rs b/src/model/key_table.rs
index 526ed496..3285e355 100644
--- a/src/model/key_table.rs
+++ b/src/model/key_table.rs
@@ -190,7 +190,7 @@ impl TableSchema for KeyTable {
local_aliases: crdt::LwwMap::new(),
})
};
- let name = crdt::Lww::migrate_from_raw(old_k.name.timestamp(), old_k.name.get().clone());
+ let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
Some(Key {
key_id: old_k.key_id,
secret_key: old_k.secret_key,
diff --git a/src/model/migrate.rs b/src/model/migrate.rs
index 35ff1807..e4469e64 100644
--- a/src/model/migrate.rs
+++ b/src/model/migrate.rs
@@ -1,5 +1,7 @@
use std::sync::Arc;
+use serde_bytes::ByteBuf;
+
use garage_table::util::EmptyKey;
use garage_util::crdt::*;
use garage_util::data::*;
@@ -38,6 +40,16 @@ impl Migrate {
old_bucket: &old_bucket::Bucket,
old_bucket_p: &old_bucket::BucketParams,
) -> Result<(), Error> {
+ let bucket_id = blake2sum(old_bucket.name.as_bytes());
+
+ let new_name = if is_valid_bucket_name(&old_bucket.name) {
+ old_bucket.name.clone()
+ } else {
+ // if old bucket name was not valid, replace it by
+ // a hex-encoded name derived from its identifier
+ hex::encode(&bucket_id.as_slice()[..16])
+ };
+
let mut new_ak = Map::new();
for (k, ts, perm) in old_bucket_p.authorized_keys.items().iter() {
new_ak.put(
@@ -52,27 +64,27 @@ impl Migrate {
}
let mut aliases = LwwMap::new();
- aliases.update_in_place(old_bucket.name.clone(), true);
+ aliases.update_in_place(new_name.clone(), true);
+
+ let website = if *old_bucket_p.website.get() {
+ Some(ByteBuf::from(DEFAULT_WEBSITE_CONFIGURATION.to_vec()))
+ } else {
+ None
+ };
let new_bucket = Bucket {
- id: blake2sum(old_bucket.name.as_bytes()),
+ id: bucket_id,
state: Deletable::Present(BucketParams {
creation_date: now_msec(),
authorized_keys: new_ak.clone(),
- website_access: Lww::new(*old_bucket_p.website.get()),
- website_config: Lww::new(None),
+ website_config: Lww::new(website),
aliases,
local_aliases: LwwMap::new(),
}),
};
self.garage.bucket_table.insert(&new_bucket).await?;
- let new_alias = BucketAlias {
- name: old_bucket.name.clone(),
- state: Lww::new(Deletable::Present(AliasParams {
- bucket_id: new_bucket.id,
- })),
- };
+ let new_alias = BucketAlias::new(new_name.clone(), new_bucket.id).unwrap();
self.garage.bucket_alias_table.insert(&new_alias).await?;
for (k, perm) in new_ak.items().iter() {
diff --git a/src/model/permission.rs b/src/model/permission.rs
index 04bb2bc5..ebb24a32 100644
--- a/src/model/permission.rs
+++ b/src/model/permission.rs
@@ -34,6 +34,9 @@ impl Crdt for BucketKeyPerm {
if !other.allow_write {
self.allow_write = false;
}
+ if !other.allow_owner {
+ self.allow_owner = false;
+ }
}
_ => (),
}
diff --git a/src/util/crdt/lww.rs b/src/util/crdt/lww.rs
index bc686e05..99bd8e7c 100644
--- a/src/util/crdt/lww.rs
+++ b/src/util/crdt/lww.rs
@@ -61,7 +61,7 @@ where
///
/// Compared to new, the CRDT's timestamp is not set to now
/// but must be set to the previous, non-compatible, CRDT's timestamp.
- pub fn migrate_from_raw(ts: u64, value: T) -> Self {
+ pub fn raw(ts: u64, value: T) -> Self {
Self { ts, v: value }
}
@@ -77,6 +77,11 @@ where
self.v = new_value;
}
+ /// Get the timestamp currently associated with the value
+ pub fn timestamp(&self) -> u64 {
+ self.ts
+ }
+
/// Get the CRDT value
pub fn get(&self) -> &T {
&self.v
diff --git a/src/util/crdt/lww_map.rs b/src/util/crdt/lww_map.rs
index 21cb6e12..f3a90591 100644
--- a/src/util/crdt/lww_map.rs
+++ b/src/util/crdt/lww_map.rs
@@ -37,11 +37,12 @@ where
pub fn new() -> Self {
Self { vals: vec![] }
}
+
/// Used to migrate from a map defined in an incompatible format. This produces
/// a map that contains a single item with the specified timestamp (copied from
/// the incompatible format). Do this as many times as you have items to migrate,
/// and put them all together using the CRDT merge operator.
- pub fn migrate_from_raw_item(k: K, ts: u64, v: V) -> Self {
+ pub fn raw_item(k: K, ts: u64, v: V) -> Self {
Self {
vals: vec![(k, ts, v)],
}
@@ -74,9 +75,37 @@ where
Self { vals: new_vals }
}
+ /// Updates a value in place in the map (this generates
+ /// a new timestamp)
pub fn update_in_place(&mut self, k: K, new_v: V) {
self.merge(&self.update_mutator(k, new_v));
}
+
+ /// Updates a value in place in the map, from a
+ /// (key, timestamp, value) triple, only if the given
+ /// timestamp is larger than the timestamp currently
+ /// in the map
+ pub fn merge_raw(&mut self, k: &K, ts2: u64, v2: &V) {
+ match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
+ Ok(i) => {
+ let (_, ts1, _v1) = &self.vals[i];
+ match ts2.cmp(ts1) {
+ Ordering::Greater => {
+ self.vals[i].1 = ts2;
+ self.vals[i].2 = v2.clone();
+ }
+ Ordering::Equal => {
+ self.vals[i].2.merge(v2);
+ }
+ Ordering::Less => (),
+ }
+ }
+ Err(i) => {
+ self.vals.insert(i, (k.clone(), ts2, v2.clone()));
+ }
+ }
+ }
+
/// Takes all of the values of the map and returns them. The current map is reset to the
/// empty map. This is very usefull to produce in-place a new map that contains only a delta
/// that modifies a certain value:
@@ -103,10 +132,12 @@ where
let vals = std::mem::take(&mut self.vals);
Self { vals }
}
+
/// Removes all values from the map
pub fn clear(&mut self) {
self.vals.clear();
}
+
/// Get a reference to the value assigned to a key
pub fn get(&self, k: &K) -> Option<&V> {
match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
@@ -114,6 +145,16 @@ where
Err(_) => None,
}
}
+
+ /// Get the timestamp of the value assigned to a key, or 0 if
+ /// no value is assigned
+ pub fn get_timestamp(&self, k: &K) -> u64 {
+ match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
+ Ok(i) => self.vals[i].1,
+ Err(_) => 0,
+ }
+ }
+
/// Gets a reference to all of the items, as a slice. Usefull to iterate on all map values.
/// In most case you will want to ignore the timestamp (second item of the tuple).
pub fn items(&self) -> &[(K, u64, V)] {
@@ -138,24 +179,7 @@ where
{
fn merge(&mut self, other: &Self) {
for (k, ts2, v2) in other.vals.iter() {
- match self.vals.binary_search_by(|(k2, _, _)| k2.cmp(k)) {
- Ok(i) => {
- let (_, ts1, _v1) = &self.vals[i];
- match ts2.cmp(ts1) {
- Ordering::Greater => {
- self.vals[i].1 = *ts2;
- self.vals[i].2 = v2.clone();
- }
- Ordering::Equal => {
- self.vals[i].2.merge(v2);
- }
- Ordering::Less => (),
- }
- }
- Err(i) => {
- self.vals.insert(i, (k.clone(), *ts2, v2.clone()));
- }
- }
+ self.merge_raw(k, *ts2, v2);
}
}
}
diff --git a/src/util/time.rs b/src/util/time.rs
index d9192443..257b4d2a 100644
--- a/src/util/time.rs
+++ b/src/util/time.rs
@@ -15,6 +15,11 @@ pub fn increment_logical_clock(prev: u64) -> u64 {
std::cmp::max(prev + 1, now_msec())
}
+/// Increment two logical clocks
+pub fn increment_logical_clock_2(prev: u64, prev2: u64) -> u64 {
+ std::cmp::max(prev2 + 1, std::cmp::max(prev + 1, now_msec()))
+}
+
/// Convert a timestamp represented as milliseconds since UNIX Epoch to
/// its RFC3339 representation, such as "2021-01-01T12:30:00Z"
pub fn msec_to_rfc3339(msecs: u64) -> String {
diff --git a/src/web/web_server.rs b/src/web/web_server.rs
index 6152f282..cc6eed57 100644
--- a/src/web/web_server.rs
+++ b/src/web/web_server.rs
@@ -99,7 +99,7 @@ async fn serve_file(garage: Arc<Garage>, req: Request<Body>) -> Result<Response<
.filter(|b| {
b.state
.as_option()
- .map(|x| *x.website_access.get())
+ .map(|x| x.website_config.get().is_some())
.unwrap_or(false)
})
.ok_or(Error::NotFound)?;