diff options
-rw-r--r-- | README.md | 27 | ||||
-rwxr-xr-x | script/dev-bucket.sh | 16 | ||||
-rwxr-xr-x | script/dev-cluster.sh (renamed from example/dev-cluster.sh) | 7 | ||||
-rwxr-xr-x | script/dev-configure.sh | 15 | ||||
-rwxr-xr-x | script/dev-env.sh | 18 | ||||
-rw-r--r-- | src/api/s3_list.rs | 4 | ||||
-rw-r--r-- | src/garage/admin_rpc.rs | 6 | ||||
-rw-r--r-- | src/model/block.rs | 4 | ||||
-rw-r--r-- | src/model/block_ref_table.rs | 6 | ||||
-rw-r--r-- | src/model/bucket_table.rs | 9 | ||||
-rw-r--r-- | src/model/key_table.rs | 15 | ||||
-rw-r--r-- | src/model/object_table.rs | 7 | ||||
-rw-r--r-- | src/model/version_table.rs | 6 | ||||
-rw-r--r-- | src/table/lib.rs | 3 | ||||
-rw-r--r-- | src/table/schema.rs | 51 | ||||
-rw-r--r-- | src/table/util.rs | 35 | ||||
-rwxr-xr-x | test_delete.sh | 6 | ||||
-rwxr-xr-x | test_read.sh | 13 | ||||
-rwxr-xr-x | test_write.sh | 7 |
19 files changed, 152 insertions, 103 deletions
@@ -20,27 +20,14 @@ Our main use case is to provide a distributed storage layer for small-scale self We propose the following quickstart to setup a full dev. environment as quickly as possible: - 1. Setup a rust/cargo environment + 1. Setup a rust/cargo environment and install s3cmd. eg. `dnf install rust cargo s3cmd` 2. Run `cargo build` to build the project - 3. Run `./example/dev-cluster.sh` to launch a test cluster (feel free to read the script) - 4. Set a convenient alias `alias grg=./target/debug/garage` - 5. Get your node IDs with `grg status` - 6. Configure them, eg. `grg node configure -d dc1 -n 10 dd79867e0f5a9e08` - 7. Create a bucket, eg. `grg bucket create éprouvette` - 8. Create a key, eg. `grg key new --name opérateur` - 9. Bind the key with the bucket, eg. `grg bucket allow éprouvette --read --write --key GK108acc0d179b13826e54442b` - 10. Install s3cmd, eg. `dnf install s3cmd` - 11. s3cmd example command: - -```bash -s3cmd \ - --host 127.0.0.1:3900 \ - --access_key=GK108acc0d179b13826e54442b \ - --secret_key=f52aac5722c48f038ddf8612d1e91e8d0a9535048f1f1cd402cd0416f9f8807f \ - --region=garage \ - --no-ssl \ - ls s3://éprouvette -``` + 3. Run `./script/dev-cluster.sh` to launch a test cluster (feel free to read the script) + 4. Run `./script/dev-configure.sh` to configure your test cluster with default values (same datacenter, 100 tokens) + 5. Run `./script/dev-bucket.sh` to create a bucket named `éprouvette` and an API key that will be stored in `/tmp/garage.s3` + 6. Run `source ./script/dev-env.sh` to configure your CLI environment + 7. You can use `garage` to manage the cluster. Try `garage --help`. + 8. You can use `s3grg` to add, remove, and delete files. Try `s3grg --help`, `s3grg put /proc/cpuinfo s3://éprouvette/cpuinfo.txt`, `s3grg ls s3://éprouvette`. `s3grg` is a wrapper on `s3cmd` configured with the previously generated API key (the one in `/tmp/garage.s3`). Now you should be ready to start hacking on garage! diff --git a/script/dev-bucket.sh b/script/dev-bucket.sh new file mode 100755 index 00000000..f07263f5 --- /dev/null +++ b/script/dev-bucket.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +SCRIPT_FOLDER="`dirname \"$0\"`" +REPO_FOLDER="${SCRIPT_FOLDER}/../" +GARAGE_DEBUG="${REPO_FOLDER}/target/debug/" +GARAGE_RELEASE="${REPO_FOLDER}/target/release/" +PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH" + +garage bucket create éprouvette +KEY_INFO=`garage key new --name opérateur` +ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'` +SECRET_KEY=`echo $KEY_INFO|grep -Po 'secret_key: "[a-f0-9]+'|grep -Po '[a-f0-9]+$'` +garage bucket allow éprouvette --read --write --key $ACCESS_KEY +echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3 + +echo "Bucket s3://éprouvette created. Credentials stored in /tmp/garage.s3." diff --git a/example/dev-cluster.sh b/script/dev-cluster.sh index d0d4326e..9ad4b6b4 100755 --- a/example/dev-cluster.sh +++ b/script/dev-cluster.sh @@ -13,6 +13,9 @@ export RUST_BACKTRACE=1 export RUST_LOG=garage=info MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m" +WHICH_GARAGE=$(which garage || exit 1) +echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n" + for count in $(seq 1 3); do CONF_PATH="/tmp/config.$count.toml" LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m" @@ -24,8 +27,8 @@ data_dir = "/tmp/garage-data-$count" rpc_bind_addr = "127.0.0.$count:3901" # the port other Garage nodes will use to talk to this node bootstrap_peers = [ "127.0.0.1:3901", - "127.0.0.2:3901", - "127.0.0.3:3901" + "127.0.0.2:3901", + "127.0.0.3:3901" ] max_concurrent_rpc_requests = 12 data_replication_factor = 3 diff --git a/script/dev-configure.sh b/script/dev-configure.sh new file mode 100755 index 00000000..8b7392c6 --- /dev/null +++ b/script/dev-configure.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +SCRIPT_FOLDER="`dirname \"$0\"`" +REPO_FOLDER="${SCRIPT_FOLDER}/../" +GARAGE_DEBUG="${REPO_FOLDER}/target/debug/" +GARAGE_RELEASE="${REPO_FOLDER}/target/release/" +PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH" + +garage status \ + | grep UNCONFIGURED \ + | grep -Po '^[0-9a-f]+' \ + | while read id; do + garage node configure -d dc1 -n 100 $id + done + diff --git a/script/dev-env.sh b/script/dev-env.sh new file mode 100755 index 00000000..7e8ffc50 --- /dev/null +++ b/script/dev-env.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +SCRIPT_FOLDER="`dirname \"${BASH_SOURCE[0]}\"`" +REPO_FOLDER="${SCRIPT_FOLDER}/../" +GARAGE_DEBUG="${REPO_FOLDER}/target/debug/" +GARAGE_RELEASE="${REPO_FOLDER}/target/release/" +PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH" + +ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f1` +SECRET_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` + +alias s3grg="s3cmd \ + --host 127.0.0.1:3900 \ + --access_key=$ACCESS_KEY \ + --secret_key=$SECRET_KEY \ + --region=garage \ + --no-ssl" + diff --git a/src/api/s3_list.rs b/src/api/s3_list.rs index f2b49a1d..3b739a8a 100644 --- a/src/api/s3_list.rs +++ b/src/api/s3_list.rs @@ -10,6 +10,8 @@ use garage_util::error::Error; use garage_model::garage::Garage; use garage_model::object_table::*; +use garage_table::DeletedFilter; + use crate::encoding::*; #[derive(Debug)] @@ -41,7 +43,7 @@ pub async fn handle_list( .get_range( &bucket.to_string(), Some(next_chunk_start.clone()), - Some(()), + Some(DeletedFilter::NotDeleted), max_keys + 1, ) .await?; diff --git a/src/garage/admin_rpc.rs b/src/garage/admin_rpc.rs index b29f2f77..778e4a1d 100644 --- a/src/garage/admin_rpc.rs +++ b/src/garage/admin_rpc.rs @@ -67,7 +67,7 @@ impl AdminRpcHandler { let bucket_names = self .garage .bucket_table - .get_range(&EmptyKey, None, Some(()), 10000) + .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) .await? .iter() .map(|b| b.name.to_string()) @@ -101,7 +101,7 @@ impl AdminRpcHandler { let objects = self .garage .object_table - .get_range(&query.name, None, Some(()), 10) + .get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10) .await?; if !objects.is_empty() { return Err(Error::BadRPC(format!("Bucket {} is not empty", query.name))); @@ -170,7 +170,7 @@ impl AdminRpcHandler { let key_ids = self .garage .key_table - .get_range(&EmptyKey, None, Some(()), 10000) + .get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000) .await? .iter() .map(|k| (k.key_id.to_string(), k.name.to_string())) diff --git a/src/model/block.rs b/src/model/block.rs index 4e8bb7d9..6a5d9c5b 100644 --- a/src/model/block.rs +++ b/src/model/block.rs @@ -20,7 +20,7 @@ use garage_rpc::rpc_client::*; use garage_rpc::rpc_server::*; use garage_table::table_sharded::TableShardedReplication; -use garage_table::TableReplication; +use garage_table::{TableReplication, DeletedFilter}; use crate::block_ref_table::*; @@ -306,7 +306,7 @@ impl BlockManager { let garage = self.garage.load_full().unwrap(); let active_refs = garage .block_ref_table - .get_range(&hash, None, Some(()), 1) + .get_range(&hash, None, Some(DeletedFilter::NotDeleted), 1) .await?; let needed_by_others = !active_refs.is_empty(); if needed_by_others { diff --git a/src/model/block_ref_table.rs b/src/model/block_ref_table.rs index a00438c0..5a7d9aa1 100644 --- a/src/model/block_ref_table.rs +++ b/src/model/block_ref_table.rs @@ -47,7 +47,7 @@ impl TableSchema for BlockRefTable { type P = Hash; type S = UUID; type E = BlockRef; - type Filter = (); + type Filter = DeletedFilter; async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> { let block = &old.as_ref().or(new.as_ref()).unwrap().block; @@ -62,7 +62,7 @@ impl TableSchema for BlockRefTable { Ok(()) } - fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool { - !entry.deleted + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted) } } diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 28234d82..35c0cc27 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -75,7 +75,7 @@ impl Entry<EmptyKey, String> for Bucket { } fn merge(&mut self, other: &Self) { - if other.timestamp < self.timestamp { + if other.timestamp > self.timestamp { *self = other.clone(); return; } @@ -104,18 +104,19 @@ impl Entry<EmptyKey, String> for Bucket { pub struct BucketTable; + #[async_trait] impl TableSchema for BucketTable { type P = EmptyKey; type S = String; type E = Bucket; - type Filter = (); + type Filter = DeletedFilter; async fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) -> Result<(), Error> { Ok(()) } - fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool { - !entry.deleted + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted) } } diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 76d163b5..05b938ce 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -104,6 +104,11 @@ impl Entry<EmptyKey, String> for Key { } fn merge(&mut self, other: &Self) { + if other.name_timestamp > self.name_timestamp { + self.name_timestamp = other.name_timestamp; + self.name = other.name.clone(); + } + if other.deleted { self.deleted = true; } @@ -111,10 +116,6 @@ impl Entry<EmptyKey, String> for Key { self.authorized_buckets.clear(); return; } - if other.name_timestamp > self.name_timestamp { - self.name_timestamp = other.name_timestamp; - self.name = other.name.clone(); - } for ab in other.authorized_buckets.iter() { match self @@ -142,13 +143,13 @@ impl TableSchema for KeyTable { type P = EmptyKey; type S = String; type E = Key; - type Filter = (); + type Filter = DeletedFilter; async fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) -> Result<(), Error> { Ok(()) } - fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool { - !entry.deleted + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted) } } diff --git a/src/model/object_table.rs b/src/model/object_table.rs index 719a222c..929b63f0 100644 --- a/src/model/object_table.rs +++ b/src/model/object_table.rs @@ -196,7 +196,7 @@ impl TableSchema for ObjectTable { type P = String; type S = String; type E = Object; - type Filter = (); + type Filter = DeletedFilter; async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> { let version_table = self.version_table.clone(); @@ -228,8 +228,9 @@ impl TableSchema for ObjectTable { Ok(()) } - fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool { - entry.versions.iter().any(|v| v.is_data()) + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + let deleted = !entry.versions.iter().any(|v| v.is_data()); + filter.apply(deleted) } fn try_migrate(bytes: &[u8]) -> Option<Self::E> { diff --git a/src/model/version_table.rs b/src/model/version_table.rs index 6054e389..0d831998 100644 --- a/src/model/version_table.rs +++ b/src/model/version_table.rs @@ -117,7 +117,7 @@ impl TableSchema for VersionTable { type P = Hash; type S = EmptyKey; type E = Version; - type Filter = (); + type Filter = DeletedFilter; async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> { let block_ref_table = self.block_ref_table.clone(); @@ -139,7 +139,7 @@ impl TableSchema for VersionTable { Ok(()) } - fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool { - !entry.deleted + fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { + filter.apply(entry.deleted) } } diff --git a/src/table/lib.rs b/src/table/lib.rs index ac129146..7684fe9d 100644 --- a/src/table/lib.rs +++ b/src/table/lib.rs @@ -4,10 +4,13 @@ extern crate log; pub mod schema; +pub mod util; + pub mod table; pub mod table_fullcopy; pub mod table_sharded; pub mod table_sync; pub use schema::*; +pub use util::*; pub use table::*; diff --git a/src/table/schema.rs b/src/table/schema.rs index 1914320e..49cede0a 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -8,54 +8,46 @@ pub trait PartitionKey { fn hash(&self) -> Hash; } -pub trait SortKey { - fn sort_key(&self) -> &[u8]; -} - -pub trait Entry<P: PartitionKey, S: SortKey>: - PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync -{ - fn partition_key(&self) -> &P; - fn sort_key(&self) -> &S; - - fn merge(&mut self, other: &Self); -} - -#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct EmptyKey; -impl SortKey for EmptyKey { - fn sort_key(&self) -> &[u8] { - &[] - } -} -impl PartitionKey for EmptyKey { +impl PartitionKey for String { fn hash(&self) -> Hash { - [0u8; 32].into() + hash(self.as_bytes()) } } -impl PartitionKey for String { +impl PartitionKey for Hash { fn hash(&self) -> Hash { - hash(self.as_bytes()) + self.clone() } } + + +pub trait SortKey { + fn sort_key(&self) -> &[u8]; +} + impl SortKey for String { fn sort_key(&self) -> &[u8] { self.as_bytes() } } -impl PartitionKey for Hash { - fn hash(&self) -> Hash { - self.clone() - } -} impl SortKey for Hash { fn sort_key(&self) -> &[u8] { self.as_slice() } } + +pub trait Entry<P: PartitionKey, S: SortKey>: + PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync +{ + fn partition_key(&self) -> &P; + fn sort_key(&self) -> &S; + + fn merge(&mut self, other: &Self); +} + + #[async_trait] pub trait TableSchema: Send + Sync { type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; @@ -74,3 +66,4 @@ pub trait TableSchema: Send + Sync { true } } + diff --git a/src/table/util.rs b/src/table/util.rs new file mode 100644 index 00000000..043a457c --- /dev/null +++ b/src/table/util.rs @@ -0,0 +1,35 @@ +use serde::{Deserialize, Serialize}; + +use garage_util::data::*; + +use crate::schema::*; + +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct EmptyKey; +impl SortKey for EmptyKey { + fn sort_key(&self) -> &[u8] { + &[] + } +} +impl PartitionKey for EmptyKey { + fn hash(&self) -> Hash { + [0u8; 32].into() + } +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub enum DeletedFilter { + All, + Deleted, + NotDeleted, +} + +impl DeletedFilter { + pub fn apply(&self, deleted: bool) -> bool { + match self { + DeletedFilter::All => true, + DeletedFilter::Deleted => deleted, + DeletedFilter::NotDeleted => !deleted, + } + } +} diff --git a/test_delete.sh b/test_delete.sh deleted file mode 100755 index 21054a0f..00000000 --- a/test_delete.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -for FILE in $(find target); do - curl localhost:3900/$FILE -X DELETE -H 'Host: garage' -done - diff --git a/test_read.sh b/test_read.sh deleted file mode 100755 index 5ca3fed3..00000000 --- a/test_read.sh +++ /dev/null @@ -1,13 +0,0 @@ - -#!/bin/bash - -for FILE in $(find target/debug/deps); do - SHA2=$(curl localhost:3900/$FILE -H 'Host: garage' 2>/dev/null | sha256sum | cut -d ' ' -f 1) - SHA2REF=$(sha256sum $FILE | cut -d ' ' -f 1) - if [ "$SHA2" = "$SHA2REF" ]; then - echo "OK $FILE" - else - echo "!!!! ERROR $FILE !!!!" - fi -done - diff --git a/test_write.sh b/test_write.sh deleted file mode 100755 index 12a3cb50..00000000 --- a/test_write.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for FILE in $(find target/debug/deps); do - echo -n "$FILE " - curl localhost:3900/$FILE -X PUT -H 'Host: garage' -H 'Content-Type: application/blob' --data-binary "@$FILE" || echo "ERROR" -done - |