From 0a76db1b8cf6a7d7180d35fbfae82e135e7fbf59 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Mon, 30 Oct 2023 18:07:40 +0100 Subject: WIP traits for the storage --- src/main.rs | 1 + src/storage/mod.rs | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 src/storage/mod.rs (limited to 'src') diff --git a/src/main.rs b/src/main.rs index 4ca07d0..2f6d512 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,6 +7,7 @@ mod lmtp; mod login; mod mail; mod server; +mod storage; mod time; use std::path::PathBuf; diff --git a/src/storage/mod.rs b/src/storage/mod.rs new file mode 100644 index 0000000..399a416 --- /dev/null +++ b/src/storage/mod.rs @@ -0,0 +1,32 @@ +pub trait RowStore { + fn new_row_ref(partition: &str, sort: &str) -> K; +} + +pub trait RowRef { + fn set_value(&self, content: &[u8]) -> RowValue; + async fn get(&self) -> Result; + async fn rm(&self) -> Result<(), E>; + async fn obs(&self) -> Result, ()>; +} + +pub trait RowValue { + fn row_ref(&self) -> RowRef; + fn content(&self) -> Vec; + async fn put(&self) -> Result<(), E>; +} + +/* + async fn get_many_keys(&self, keys: &[K]) -> Result, ()>; + async fn put_many_keys(&self, values: &[V]) -> Result<(), ()>; +}*/ + +pub trait BlobStore { + fn new_blob_ref(key: &str) -> BlobRef; + async fn list(&self) -> (); +} + +pub trait BlobRef { + async fn put(&self, key: &str, body: &[u8]) -> (); + async fn copy(&self, dst: &BlobRef) -> (); + async fn rm(&self, key: &str); +} -- cgit v1.2.3 From 95685ba9a75df0f00458b8260fb87bae82e0dfa3 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 09:20:36 +0100 Subject: a first naive version of the storage interface --- src/main.rs | 4 +++ src/storage/garage.rs | 0 src/storage/in_memory.rs | 0 src/storage/mod.rs | 85 ++++++++++++++++++++++++++++++++++++++---------- 4 files changed, 72 insertions(+), 17 deletions(-) create mode 100644 src/storage/garage.rs create mode 100644 src/storage/in_memory.rs (limited to 'src') diff --git a/src/main.rs b/src/main.rs index 2f6d512..7e1626d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,7 @@ +#![feature(async_fn_in_trait)] +#![feature(return_position_impl_trait_in_trait)] +// should be stabilized soon https://github.com/rust-lang/rust/pull/115822 + mod bayou; mod config; mod cryptoblob; diff --git a/src/storage/garage.rs b/src/storage/garage.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 399a416..1a77a55 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -1,32 +1,83 @@ -pub trait RowStore { - fn new_row_ref(partition: &str, sort: &str) -> K; +/* + * T1 : Filter + * T2 : Range + * T3 : Atom + */ + +/* + * My idea: we can encapsulate the causality token + * into the object system so it is not exposed. + * + * This abstraction goal is to leverage all the semantic of Garage K2V+S3, + * to be as tailored as possible to it ; it aims to be a zero-cost abstraction + * compared to when we where directly using the K2V+S3 client. + */ + + +mod garage; + +pub enum Selector<'a> { + Range{ begin: &'a str, end: &'a str }, + Filter(u64), +} + +pub enum Alternative { + Tombstone, + Value(Vec), +} +type ConcurrentValues = Vec; + +pub enum Error { + NotFound, + Internal, +} + +// ------ Rows +pub trait RowStore { + fn new_ref(partition: &str, sort: &str) -> impl RowRef; + fn new_ref_batch(partition: &str, filter: Selector) -> impl RowRefBatch; } pub trait RowRef { - fn set_value(&self, content: &[u8]) -> RowValue; - async fn get(&self) -> Result; - async fn rm(&self) -> Result<(), E>; - async fn obs(&self) -> Result, ()>; + fn to_value(&self, content: &[u8]) -> impl RowValue; + async fn get(&self) -> Result; + async fn rm(&self) -> Result<(), Error>; + async fn poll(&self) -> Result, Error>; } pub trait RowValue { - fn row_ref(&self) -> RowRef; - fn content(&self) -> Vec; - async fn put(&self) -> Result<(), E>; + fn row_ref(&self) -> impl RowRef; + fn content(&self) -> ConcurrentValues; + async fn persist(&self) -> Result<(), Error>; } -/* - async fn get_many_keys(&self, keys: &[K]) -> Result, ()>; - async fn put_many_keys(&self, values: &[V]) -> Result<(), ()>; -}*/ +// ------ Row batch +pub trait RowRefBatch { + fn to_values(&self, content: Vec<&[u8]>) -> impl RowValueBatch; + fn into_independant(&self) -> Vec; + async fn get(&self) -> Result; + async fn rm(&self) -> Result<(), Error>; +} +pub trait RowValueBatch { + fn into_independant(&self) -> Vec; + fn content(&self) -> Vec; + async fn persist(&self) -> Result<(), Error>; +} + +// ----- Blobs pub trait BlobStore { - fn new_blob_ref(key: &str) -> BlobRef; + fn new_ref(key: &str) -> impl BlobRef; async fn list(&self) -> (); } pub trait BlobRef { - async fn put(&self, key: &str, body: &[u8]) -> (); - async fn copy(&self, dst: &BlobRef) -> (); - async fn rm(&self, key: &str); + fn set_value(&self, content: &[u8]) -> impl BlobValue; + async fn get(&self) -> impl BlobValue; + async fn copy(&self, dst: &impl BlobRef) -> (); + async fn rm(&self, key: &str) -> (); +} + +pub trait BlobValue { + async fn persist(); } -- cgit v1.2.3 From c3bb2b62a862c09d52226a82a032061676a0cb77 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 09:25:09 +0100 Subject: rework interface --- src/storage/mod.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 1a77a55..6b410a2 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -32,12 +32,15 @@ pub enum Error { Internal, } -// ------ Rows +// ------ Store pub trait RowStore { - fn new_ref(partition: &str, sort: &str) -> impl RowRef; - fn new_ref_batch(partition: &str, filter: Selector) -> impl RowRefBatch; + fn new_row(&self, partition: &str, sort: &str) -> impl RowRef; + fn new_row_batch(&self, partition: &str, filter: Selector) -> impl RowRefBatch; + fn new_blob(&self, key: &str) -> impl BlobRef; + fn new_blob_list(&self) -> Vec; } +// ------- Row pub trait RowRef { fn to_value(&self, content: &[u8]) -> impl RowValue; async fn get(&self) -> Result; @@ -66,11 +69,6 @@ pub trait RowValueBatch { } // ----- Blobs -pub trait BlobStore { - fn new_ref(key: &str) -> impl BlobRef; - async fn list(&self) -> (); -} - pub trait BlobRef { fn set_value(&self, content: &[u8]) -> impl BlobValue; async fn get(&self) -> impl BlobValue; -- cgit v1.2.3 From 92fea414d9d113761b788e409a025ad9cff06071 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 15:15:57 +0100 Subject: v2 api storage --- src/main.rs | 2 - src/storage/garage.rs | 96 ++++++++++++++++++++++++++++++++++++++++++++++++ src/storage/in_memory.rs | 54 +++++++++++++++++++++++++++ src/storage/mod.rs | 74 ++++++++++++++----------------------- 4 files changed, 178 insertions(+), 48 deletions(-) (limited to 'src') diff --git a/src/main.rs b/src/main.rs index 7e1626d..8d2a140 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,4 @@ #![feature(async_fn_in_trait)] -#![feature(return_position_impl_trait_in_trait)] -// should be stabilized soon https://github.com/rust-lang/rust/pull/115822 mod bayou; mod config; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index e69de29..91c4fa2 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -0,0 +1,96 @@ +use crate::storage::*; + +pub struct GrgCreds {} +pub struct GrgStore {} +pub struct GrgRef {} +pub struct GrgValue {} + +pub struct GrgTypes {} +impl RowRealization for GrgTypes { + type Store=GrgStore; + type Ref=GrgRef; + type Value=GrgValue; +} + +impl RowBuilder for GrgCreds { + fn row_store(&self) -> GrgStore { + unimplemented!(); + } +} + +impl RowStore for GrgStore { + fn new_row(&self, partition: &str, sort: &str) -> GrgRef { + unimplemented!(); + } +} + +impl RowRef for GrgRef { + fn set_value(&self, content: Vec) -> GrgValue { + unimplemented!(); + } + async fn fetch(&self) -> Result { + unimplemented!(); + } + async fn rm(&self) -> Result<(), Error> { + unimplemented!(); + } + async fn poll(&self) -> Result, Error> { + unimplemented!(); + } +} + +impl RowValue for GrgValue { + fn to_ref(&self) -> GrgRef { + unimplemented!(); + } + fn content(&self) -> ConcurrentValues { + unimplemented!(); + } + async fn push(&self) -> Result<(), Error> { + unimplemented!(); + } +} + + + + +/* +/// A custom S3 region, composed of a region name and endpoint. +/// We use this instead of rusoto_signature::Region so that we can +/// derive Hash and Eq + + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct Region { + pub name: String, + pub endpoint: String, +} + +impl Region { + pub fn as_rusoto_region(&self) -> rusoto_signature::Region { + rusoto_signature::Region::Custom { + name: self.name.clone(), + endpoint: self.endpoint.clone(), + } + } +} +*/ + +/* +pub struct Garage { + pub s3_region: Region, + pub k2v_region: Region, + + pub aws_access_key_id: String, + pub aws_secret_access_key: String, + pub bucket: String, +} + +impl StoreBuilder<> for Garage { + fn row_store(&self) -> +} + +pub struct K2V {} +impl RowStore for K2V { + +}*/ diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index e69de29..a2e9e96 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -0,0 +1,54 @@ +use crate::storage::*; + +pub struct MemCreds {} +pub struct MemStore {} +pub struct MemRef {} +pub struct MemValue {} + +pub struct MemTypes {} +impl RowRealization for MemTypes { + type Store=MemStore; + type Ref=MemRef; + type Value=MemValue; +} + +impl RowBuilder for MemCreds { + fn row_store(&self) -> MemStore { + unimplemented!(); + } +} + +impl RowStore for MemStore { + fn new_row(&self, partition: &str, sort: &str) -> MemRef { + unimplemented!(); + } +} + +impl RowRef for MemRef { + fn set_value(&self, content: Vec) -> MemValue { + unimplemented!(); + } + async fn fetch(&self) -> Result { + unimplemented!(); + } + async fn rm(&self) -> Result<(), Error> { + unimplemented!(); + } + async fn poll(&self) -> Result, Error> { + unimplemented!(); + } +} + +impl RowValue for MemValue { + fn to_ref(&self) -> MemRef { + unimplemented!(); + } + fn content(&self) -> ConcurrentValues { + unimplemented!(); + } + async fn push(&self) -> Result<(), Error> { + unimplemented!(); + } +} + + diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 6b410a2..4ef2d61 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -1,19 +1,14 @@ /* - * T1 : Filter - * T2 : Range - * T3 : Atom - */ - -/* - * My idea: we can encapsulate the causality token - * into the object system so it is not exposed. * * This abstraction goal is to leverage all the semantic of Garage K2V+S3, * to be as tailored as possible to it ; it aims to be a zero-cost abstraction * compared to when we where directly using the K2V+S3 client. + * + * My idea: we can encapsulate the causality token + * into the object system so it is not exposed. */ - +mod in_memory; mod garage; pub enum Selector<'a> { @@ -27,55 +22,42 @@ pub enum Alternative { } type ConcurrentValues = Vec; +#[derive(Debug)] pub enum Error { NotFound, Internal, } -// ------ Store -pub trait RowStore { - fn new_row(&self, partition: &str, sort: &str) -> impl RowRef; - fn new_row_batch(&self, partition: &str, filter: Selector) -> impl RowRefBatch; - fn new_blob(&self, key: &str) -> impl BlobRef; - fn new_blob_list(&self) -> Vec; +pub trait RowRealization: Sized { + type Store: RowStore; + type Ref: RowRef; + type Value: RowValue; } -// ------- Row -pub trait RowRef { - fn to_value(&self, content: &[u8]) -> impl RowValue; - async fn get(&self) -> Result; - async fn rm(&self) -> Result<(), Error>; - async fn poll(&self) -> Result, Error>; +// ------ Row Builder +pub trait RowBuilder +{ + fn row_store(&self) -> R::Store; } -pub trait RowValue { - fn row_ref(&self) -> impl RowRef; - fn content(&self) -> ConcurrentValues; - async fn persist(&self) -> Result<(), Error>; +// ------ Row Store +pub trait RowStore +{ + fn new_row(&self, partition: &str, sort: &str) -> R::Ref; } -// ------ Row batch -pub trait RowRefBatch { - fn to_values(&self, content: Vec<&[u8]>) -> impl RowValueBatch; - fn into_independant(&self) -> Vec; - async fn get(&self) -> Result; +// ------- Row Item +pub trait RowRef +{ + fn set_value(&self, content: Vec) -> R::Value; + async fn fetch(&self) -> Result; async fn rm(&self) -> Result<(), Error>; + async fn poll(&self) -> Result, Error>; } -pub trait RowValueBatch { - fn into_independant(&self) -> Vec; - fn content(&self) -> Vec; - async fn persist(&self) -> Result<(), Error>; -} - -// ----- Blobs -pub trait BlobRef { - fn set_value(&self, content: &[u8]) -> impl BlobValue; - async fn get(&self) -> impl BlobValue; - async fn copy(&self, dst: &impl BlobRef) -> (); - async fn rm(&self, key: &str) -> (); -} - -pub trait BlobValue { - async fn persist(); +pub trait RowValue +{ + fn to_ref(&self) -> R::Ref; + fn content(&self) -> ConcurrentValues; + async fn push(&self) -> Result<(), Error>; } -- cgit v1.2.3 From 3026b217774a51e01cca1ae584fba8c6398754cc Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 15:36:06 +0100 Subject: integration to login with an enum --- src/login/mod.rs | 23 ++++++++++++++++++++--- src/storage/mod.rs | 7 +++++-- 2 files changed, 25 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index 3fab90a..f403bcb 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -15,6 +15,9 @@ use rusoto_credential::{AwsCredentials, StaticProvider}; use rusoto_s3::S3Client; use crate::cryptoblob::*; +use crate::storage::*; +use crate::storage::in_memory::MemTypes; +use crate::storage::garage::GrgTypes; /// The trait LoginProvider defines the interface for a login provider that allows /// to retrieve storage and cryptographic credentials for access to a user account @@ -23,12 +26,17 @@ use crate::cryptoblob::*; pub trait LoginProvider { /// The login method takes an account's password as an input to decypher /// decryption keys and obtain full access to the user's account. - async fn login(&self, username: &str, password: &str) -> Result; + async fn login(&self, username: &str, password: &str) -> Result; /// The public_login method takes an account's email address and returns /// public credentials for adding mails to the user's inbox. async fn public_login(&self, email: &str) -> Result; } +pub enum AnyCredentials { + InMemory(Credentials), + Garage(Credentials), +} + /// ArcLoginProvider is simply an alias on a structure that is used /// in many places in the code pub type ArcLoginProvider = Arc; @@ -36,9 +44,9 @@ pub type ArcLoginProvider = Arc; /// The struct Credentials represent all of the necessary information to interact /// with a user account's data after they are logged in. #[derive(Clone, Debug)] -pub struct Credentials { +pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: StorageCredentials, + pub storage: T::Builder, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } @@ -106,6 +114,7 @@ impl Region { // ---- +/* impl Credentials { pub fn k2v_client(&self) -> Result { self.storage.k2v_client() @@ -116,6 +125,14 @@ impl Credentials { pub fn bucket(&self) -> &str { self.storage.bucket.as_str() } +}*/ +impl From for Credentials { + fn from(ac: AnyCredentials) -> Self { + match ac { + AnyCredentials::InMemory(c) => c, + AnyCredentials::Garage(c) => c, + } + } } impl StorageCredentials { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 4ef2d61..bc26379 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -8,8 +8,8 @@ * into the object system so it is not exposed. */ -mod in_memory; -mod garage; +pub mod in_memory; +pub mod garage; pub enum Selector<'a> { Range{ begin: &'a str, end: &'a str }, @@ -29,11 +29,14 @@ pub enum Error { } pub trait RowRealization: Sized { + type Builder: RowBuilder; type Store: RowStore; type Ref: RowRef; type Value: RowValue; } +pub trait StorageEngine: RowRealization {} + // ------ Row Builder pub trait RowBuilder { -- cgit v1.2.3 From 8ac3a8ce8ba268a3261e23694b8b62afa6a3ae37 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 16:45:29 +0100 Subject: implement an AnyCredentials --- src/login/mod.rs | 33 ++++++++++++++++----------------- src/mail/user.rs | 6 ++++-- src/storage/garage.rs | 3 ++- src/storage/in_memory.rs | 3 ++- src/storage/mod.rs | 14 ++++++-------- 5 files changed, 30 insertions(+), 29 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index f403bcb..5bd976e 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -16,8 +16,6 @@ use rusoto_s3::S3Client; use crate::cryptoblob::*; use crate::storage::*; -use crate::storage::in_memory::MemTypes; -use crate::storage::garage::GrgTypes; /// The trait LoginProvider defines the interface for a login provider that allows /// to retrieve storage and cryptographic credentials for access to a user account @@ -32,19 +30,28 @@ pub trait LoginProvider { async fn public_login(&self, email: &str) -> Result; } -pub enum AnyCredentials { - InMemory(Credentials), - Garage(Credentials), -} - /// ArcLoginProvider is simply an alias on a structure that is used /// in many places in the code pub type ArcLoginProvider = Arc; +pub enum AnyCredentials { + InMemory(Credentials), + Garage(Credentials), +} +impl AnyCredentials where X: Sto +{ + fn to_gen(&self) -> Credentials { + match self { + Self::InMemory(u) => u, + Self::Garage(u) => u, + } + } +} + /// The struct Credentials represent all of the necessary information to interact /// with a user account's data after they are logged in. #[derive(Clone, Debug)] -pub struct Credentials { +pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) pub storage: T::Builder, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V @@ -114,7 +121,7 @@ impl Region { // ---- -/* + impl Credentials { pub fn k2v_client(&self) -> Result { self.storage.k2v_client() @@ -125,14 +132,6 @@ impl Credentials { pub fn bucket(&self) -> &str { self.storage.bucket.as_str() } -}*/ -impl From for Credentials { - fn from(ac: AnyCredentials) -> Self { - match ac { - AnyCredentials::InMemory(c) => c, - AnyCredentials::Garage(c) => c, - } - } } impl StorageCredentials { diff --git a/src/mail/user.rs b/src/mail/user.rs index 5523c2a..9d94563 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -30,9 +30,11 @@ pub const INBOX: &str = "INBOX"; const MAILBOX_LIST_PK: &str = "mailboxes"; const MAILBOX_LIST_SK: &str = "list"; -pub struct User { +use crate::storage::*; + +pub struct User { pub username: String, - pub creds: Credentials, + pub creds: Credentials, pub k2v: K2vClient, pub mailboxes: std::sync::Mutex>>, diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 91c4fa2..b883623 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -6,7 +6,8 @@ pub struct GrgRef {} pub struct GrgValue {} pub struct GrgTypes {} -impl RowRealization for GrgTypes { +impl Sto for GrgTypes { + type Builder=GrgCreds; type Store=GrgStore; type Ref=GrgRef; type Value=GrgValue; diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index a2e9e96..56df266 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -6,7 +6,8 @@ pub struct MemRef {} pub struct MemValue {} pub struct MemTypes {} -impl RowRealization for MemTypes { +impl Sto for MemTypes { + type Builder=MemCreds; type Store=MemStore; type Ref=MemRef; type Value=MemValue; diff --git a/src/storage/mod.rs b/src/storage/mod.rs index bc26379..2e4f757 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -28,29 +28,27 @@ pub enum Error { Internal, } -pub trait RowRealization: Sized { - type Builder: RowBuilder; +pub trait Sto: Sized { + type Builder: RowStore; type Store: RowStore; type Ref: RowRef; type Value: RowValue; } -pub trait StorageEngine: RowRealization {} - // ------ Row Builder -pub trait RowBuilder +pub trait RowBuilder { fn row_store(&self) -> R::Store; } // ------ Row Store -pub trait RowStore +pub trait RowStore { fn new_row(&self, partition: &str, sort: &str) -> R::Ref; } // ------- Row Item -pub trait RowRef +pub trait RowRef { fn set_value(&self, content: Vec) -> R::Value; async fn fetch(&self) -> Result; @@ -58,7 +56,7 @@ pub trait RowRef async fn poll(&self) -> Result, Error>; } -pub trait RowValue +pub trait RowValue { fn to_ref(&self) -> R::Ref; fn content(&self) -> ConcurrentValues; -- cgit v1.2.3 From cf8b9ac28d6813bd589f363ad3659dd215bd7cea Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 1 Nov 2023 17:18:58 +0100 Subject: mask implementation to the rest of the code --- src/login/mod.rs | 20 +++----------------- src/mail/user.rs | 4 ++-- src/storage/mod.rs | 20 +++++++++++++++++++- 3 files changed, 24 insertions(+), 20 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index 5bd976e..e87a17d 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -24,7 +24,7 @@ use crate::storage::*; pub trait LoginProvider { /// The login method takes an account's password as an input to decypher /// decryption keys and obtain full access to the user's account. - async fn login(&self, username: &str, password: &str) -> Result; + async fn login(&self, username: &str, password: &str) -> Result; /// The public_login method takes an account's email address and returns /// public credentials for adding mails to the user's inbox. async fn public_login(&self, email: &str) -> Result; @@ -34,26 +34,12 @@ pub trait LoginProvider { /// in many places in the code pub type ArcLoginProvider = Arc; -pub enum AnyCredentials { - InMemory(Credentials), - Garage(Credentials), -} -impl AnyCredentials where X: Sto -{ - fn to_gen(&self) -> Credentials { - match self { - Self::InMemory(u) => u, - Self::Garage(u) => u, - } - } -} - /// The struct Credentials represent all of the necessary information to interact /// with a user account's data after they are logged in. #[derive(Clone, Debug)] -pub struct Credentials { +pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: T::Builder, + pub storage: AnyEngine, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } diff --git a/src/mail/user.rs b/src/mail/user.rs index 9d94563..360786d 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -32,9 +32,9 @@ const MAILBOX_LIST_SK: &str = "list"; use crate::storage::*; -pub struct User { +pub struct User { pub username: String, - pub creds: Credentials, + pub creds: Credentials, pub k2v: K2vClient, pub mailboxes: std::sync::Mutex>>, diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 2e4f757..c0835e6 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -29,12 +29,30 @@ pub enum Error { } pub trait Sto: Sized { - type Builder: RowStore; + type Builder: RowBuilder; type Store: RowStore; type Ref: RowRef; type Value: RowValue; } +pub struct Engine { + bucket: String, + row: T::Builder, +} + +pub enum AnyEngine { + InMemory(Engine), + Garage(Engine), +} +impl AnyEngine { + fn engine(&self) -> &Engine { + match self { + Self::InMemory(x) => x, + Self::Garage(x) => x, + } + } +} + // ------ Row Builder pub trait RowBuilder { -- cgit v1.2.3 From 26f14df3f460320b2e2d31deb9d3cef90f43790c Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 09:42:50 +0100 Subject: we are doomed with static types --- src/mail/incoming.rs | 5 +++-- src/mail/user.rs | 2 -- src/storage/mod.rs | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index b7d2f48..7094b42 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -23,6 +23,7 @@ use crate::mail::unique_ident::*; use crate::mail::user::User; use crate::mail::IMF; use crate::time::now_msec; +use crate::storage::Sto; const INCOMING_PK: &str = "incoming"; const INCOMING_LOCK_SK: &str = "lock"; @@ -139,14 +140,14 @@ async fn incoming_mail_watch_process_internal( Ok(()) } -async fn handle_incoming_mail( +async fn handle_incoming_mail( user: &Arc, s3: &S3Client, inbox: &Arc, lock_held: &watch::Receiver, ) -> Result<()> { let lor = ListObjectsV2Request { - bucket: user.creds.storage.bucket.clone(), + bucket: user.creds.storage.engine::().bucket.clone(), max_keys: Some(1000), prefix: Some("incoming/".into()), ..Default::default() diff --git a/src/mail/user.rs b/src/mail/user.rs index 360786d..5523c2a 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -30,8 +30,6 @@ pub const INBOX: &str = "INBOX"; const MAILBOX_LIST_PK: &str = "mailboxes"; const MAILBOX_LIST_SK: &str = "list"; -use crate::storage::*; - pub struct User { pub username: String, pub creds: Credentials, diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c0835e6..ee475ee 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -36,8 +36,8 @@ pub trait Sto: Sized { } pub struct Engine { - bucket: String, - row: T::Builder, + pub bucket: String, + pub row: T::Builder, } pub enum AnyEngine { @@ -45,7 +45,7 @@ pub enum AnyEngine { Garage(Engine), } impl AnyEngine { - fn engine(&self) -> &Engine { + pub fn engine(&self) -> &Engine { match self { Self::InMemory(x) => x, Self::Garage(x) => x, -- cgit v1.2.3 From 415f51ac4cfc723bbf6f0c08d57fb86e96c665a2 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 09:57:58 +0100 Subject: sadly switch to dynamic dispatch --- src/storage/garage.rs | 16 ++++------------ src/storage/in_memory.rs | 16 ++++------------ src/storage/mod.rs | 48 ++++++++++++++++-------------------------------- 3 files changed, 24 insertions(+), 56 deletions(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index b883623..965953e 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -5,27 +5,19 @@ pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} -pub struct GrgTypes {} -impl Sto for GrgTypes { - type Builder=GrgCreds; - type Store=GrgStore; - type Ref=GrgRef; - type Value=GrgValue; -} - -impl RowBuilder for GrgCreds { +impl IRowBuilder for GrgCreds { fn row_store(&self) -> GrgStore { unimplemented!(); } } -impl RowStore for GrgStore { +impl IRowStore for GrgStore { fn new_row(&self, partition: &str, sort: &str) -> GrgRef { unimplemented!(); } } -impl RowRef for GrgRef { +impl IRowRef for GrgRef { fn set_value(&self, content: Vec) -> GrgValue { unimplemented!(); } @@ -40,7 +32,7 @@ impl RowRef for GrgRef { } } -impl RowValue for GrgValue { +impl IRowValue for GrgValue { fn to_ref(&self) -> GrgRef { unimplemented!(); } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 56df266..dc3d1e1 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -5,27 +5,19 @@ pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} -pub struct MemTypes {} -impl Sto for MemTypes { - type Builder=MemCreds; - type Store=MemStore; - type Ref=MemRef; - type Value=MemValue; -} - -impl RowBuilder for MemCreds { +impl IRowBuilder for MemCreds { fn row_store(&self) -> MemStore { unimplemented!(); } } -impl RowStore for MemStore { +impl IRowStore for MemStore { fn new_row(&self, partition: &str, sort: &str) -> MemRef { unimplemented!(); } } -impl RowRef for MemRef { +impl IRowRef for MemRef { fn set_value(&self, content: Vec) -> MemValue { unimplemented!(); } @@ -40,7 +32,7 @@ impl RowRef for MemRef { } } -impl RowValue for MemValue { +impl IRowValue for MemValue { fn to_ref(&self) -> MemRef { unimplemented!(); } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index ee475ee..82f7c6a 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -28,55 +28,39 @@ pub enum Error { Internal, } -pub trait Sto: Sized { - type Builder: RowBuilder; - type Store: RowStore; - type Ref: RowRef; - type Value: RowValue; -} - -pub struct Engine { +pub struct Engine { pub bucket: String, - pub row: T::Builder, -} - -pub enum AnyEngine { - InMemory(Engine), - Garage(Engine), -} -impl AnyEngine { - pub fn engine(&self) -> &Engine { - match self { - Self::InMemory(x) => x, - Self::Garage(x) => x, - } - } + pub row: RowBuilder, } // ------ Row Builder -pub trait RowBuilder +pub trait IRowBuilder { - fn row_store(&self) -> R::Store; + fn row_store(&self) -> RowStore; } +pub type RowBuilder = Box; // ------ Row Store -pub trait RowStore +pub trait IRowStore { - fn new_row(&self, partition: &str, sort: &str) -> R::Ref; + fn new_row(&self, partition: &str, sort: &str) -> RowRef; } +type RowStore = Box; // ------- Row Item -pub trait RowRef +pub trait IRowRef { - fn set_value(&self, content: Vec) -> R::Value; - async fn fetch(&self) -> Result; + fn set_value(&self, content: Vec) -> RowValue; + async fn fetch(&self) -> Result; async fn rm(&self) -> Result<(), Error>; - async fn poll(&self) -> Result, Error>; + async fn poll(&self) -> Result, Error>; } +type RowRef = Box; -pub trait RowValue +pub trait IRowValue { - fn to_ref(&self) -> R::Ref; + fn to_ref(&self) -> RowRef; fn content(&self) -> ConcurrentValues; async fn push(&self) -> Result<(), Error>; } +type RowValue = Box; -- cgit v1.2.3 From 9aa58194d44fef8b0b916f6c96edd124ce13bf7b Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 10:38:47 +0100 Subject: try dynamic dispatch --- src/login/mod.rs | 2 +- src/mail/incoming.rs | 5 ++--- src/storage/garage.rs | 17 +++++++++-------- src/storage/in_memory.rs | 17 +++++++++-------- src/storage/mod.rs | 28 +++++++++++++++++++++++----- 5 files changed, 44 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index e87a17d..e934112 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -39,7 +39,7 @@ pub type ArcLoginProvider = Arc; #[derive(Clone, Debug)] pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: AnyEngine, + pub storage: Engine, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 7094b42..b7d2f48 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -23,7 +23,6 @@ use crate::mail::unique_ident::*; use crate::mail::user::User; use crate::mail::IMF; use crate::time::now_msec; -use crate::storage::Sto; const INCOMING_PK: &str = "incoming"; const INCOMING_LOCK_SK: &str = "lock"; @@ -140,14 +139,14 @@ async fn incoming_mail_watch_process_internal( Ok(()) } -async fn handle_incoming_mail( +async fn handle_incoming_mail( user: &Arc, s3: &S3Client, inbox: &Arc, lock_held: &watch::Receiver, ) -> Result<()> { let lor = ListObjectsV2Request { - bucket: user.creds.storage.engine::().bucket.clone(), + bucket: user.creds.storage.bucket.clone(), max_keys: Some(1000), prefix: Some("incoming/".into()), ..Default::default() diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 965953e..f2cc216 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,45 +1,46 @@ use crate::storage::*; +#[derive(Clone, Debug)] pub struct GrgCreds {} pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} impl IRowBuilder for GrgCreds { - fn row_store(&self) -> GrgStore { + fn row_store(&self) -> RowStore { unimplemented!(); } } impl IRowStore for GrgStore { - fn new_row(&self, partition: &str, sort: &str) -> GrgRef { + fn new_row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } } impl IRowRef for GrgRef { - fn set_value(&self, content: Vec) -> GrgValue { + fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); } - async fn fetch(&self) -> Result { + fn fetch(&self) -> AsyncResult { unimplemented!(); } - async fn rm(&self) -> Result<(), Error> { + fn rm(&self) -> AsyncResult<()> { unimplemented!(); } - async fn poll(&self) -> Result, Error> { + fn poll(&self) -> AsyncResult> { unimplemented!(); } } impl IRowValue for GrgValue { - fn to_ref(&self) -> GrgRef { + fn to_ref(&self) -> RowRef { unimplemented!(); } fn content(&self) -> ConcurrentValues { unimplemented!(); } - async fn push(&self) -> Result<(), Error> { + fn push(&self) -> AsyncResult<()> { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index dc3d1e1..fe7c93f 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,45 +1,46 @@ use crate::storage::*; +#[derive(Clone, Debug)] pub struct MemCreds {} pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} impl IRowBuilder for MemCreds { - fn row_store(&self) -> MemStore { + fn row_store(&self) -> RowStore { unimplemented!(); } } impl IRowStore for MemStore { - fn new_row(&self, partition: &str, sort: &str) -> MemRef { + fn new_row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } } impl IRowRef for MemRef { - fn set_value(&self, content: Vec) -> MemValue { + fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); } - async fn fetch(&self) -> Result { + fn fetch(&self) -> AsyncResult { unimplemented!(); } - async fn rm(&self) -> Result<(), Error> { + fn rm(&self) -> AsyncResult<()> { unimplemented!(); } - async fn poll(&self) -> Result, Error> { + fn poll(&self) -> AsyncResult> { unimplemented!(); } } impl IRowValue for MemValue { - fn to_ref(&self) -> MemRef { + fn to_ref(&self) -> RowRef { unimplemented!(); } fn content(&self) -> ConcurrentValues { unimplemented!(); } - async fn push(&self) -> Result<(), Error> { + fn push(&self) -> AsyncResult<()> { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 82f7c6a..b5c8518 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -8,6 +8,8 @@ * into the object system so it is not exposed. */ +use futures::future::BoxFuture; + pub mod in_memory; pub mod garage; @@ -32,13 +34,29 @@ pub struct Engine { pub bucket: String, pub row: RowBuilder, } +impl Clone for Engine { + fn clone(&self) -> Self { + Engine { + bucket: "test".into(), + row: Box::new(in_memory::MemCreds{}) + } + } +} +impl std::fmt::Debug for Engine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Engine").field("bucket", &self.bucket).finish() + } +} + +// A result +pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; // ------ Row Builder pub trait IRowBuilder { fn row_store(&self) -> RowStore; } -pub type RowBuilder = Box; +pub type RowBuilder = Box; // ------ Row Store pub trait IRowStore @@ -51,9 +69,9 @@ type RowStore = Box; pub trait IRowRef { fn set_value(&self, content: Vec) -> RowValue; - async fn fetch(&self) -> Result; - async fn rm(&self) -> Result<(), Error>; - async fn poll(&self) -> Result, Error>; + fn fetch(&self) -> AsyncResult; + fn rm(&self) -> AsyncResult<()>; + fn poll(&self) -> AsyncResult>; } type RowRef = Box; @@ -61,6 +79,6 @@ pub trait IRowValue { fn to_ref(&self) -> RowRef; fn content(&self) -> ConcurrentValues; - async fn push(&self) -> Result<(), Error>; + fn push(&self) -> AsyncResult<()>; } type RowValue = Box; -- cgit v1.2.3 From 73a6a0c014fd850a97eba175abe1ef8e2d0220b4 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 10:45:41 +0100 Subject: example usage of boxed futures --- src/storage/in_memory.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index fe7c93f..dd255ad 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,3 +1,4 @@ +use futures::FutureExt; use crate::storage::*; #[derive(Clone, Debug)] @@ -29,7 +30,9 @@ impl IRowRef for MemRef { unimplemented!(); } fn poll(&self) -> AsyncResult> { - unimplemented!(); + async { + Ok(None) + }.boxed() } } -- cgit v1.2.3 From 1f28832deaff3a2319cc88d5a83ffe506b784fc8 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 10:55:40 +0100 Subject: start replacing engine --- src/login/mod.rs | 4 ++-- src/storage/garage.rs | 2 +- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index e934112..6c948cc 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -109,8 +109,8 @@ impl Region { impl Credentials { - pub fn k2v_client(&self) -> Result { - self.storage.k2v_client() + pub fn k2v_client(&self) -> Result { + self.storage.row.row_store() } pub fn s3_client(&self) -> Result { self.storage.s3_client() diff --git a/src/storage/garage.rs b/src/storage/garage.rs index f2cc216..c2ca1d3 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -7,7 +7,7 @@ pub struct GrgRef {} pub struct GrgValue {} impl IRowBuilder for GrgCreds { - fn row_store(&self) -> RowStore { + fn row_store(&self) -> Result { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index dd255ad..6fa8138 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -8,7 +8,7 @@ pub struct MemRef {} pub struct MemValue {} impl IRowBuilder for MemCreds { - fn row_store(&self) -> RowStore { + fn row_store(&self) -> Result { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index b5c8518..c20853b 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -54,7 +54,7 @@ pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; // ------ Row Builder pub trait IRowBuilder { - fn row_store(&self) -> RowStore; + fn row_store(&self) -> Result; } pub type RowBuilder = Box; @@ -63,7 +63,7 @@ pub trait IRowStore { fn new_row(&self, partition: &str, sort: &str) -> RowRef; } -type RowStore = Box; +pub type RowStore = Box; // ------- Row Item pub trait IRowRef -- cgit v1.2.3 From 553ea25f1854706b60ce6f087545968533ef6140 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 11:51:03 +0100 Subject: gradually implement our interface --- src/login/mod.rs | 2 +- src/mail/mailbox.rs | 9 +++---- src/storage/garage.rs | 8 +++++-- src/storage/in_memory.rs | 10 +++++--- src/storage/mod.rs | 61 ++++++++++++++++++++++++++++++++++++------------ 5 files changed, 65 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index 6c948cc..afade28 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -109,7 +109,7 @@ impl Region { impl Credentials { - pub fn k2v_client(&self) -> Result { + pub fn k2v_client(&self) -> Result { self.storage.row.row_store() } pub fn s3_client(&self) -> Result { diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index d92140d..614382e 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -14,6 +14,7 @@ use crate::login::Credentials; use crate::mail::uidindex::*; use crate::mail::unique_ident::*; use crate::mail::IMF; +use crate::storage::{RowStore, BlobStore}; use crate::time::now_msec; pub struct Mailbox { @@ -50,8 +51,8 @@ impl Mailbox { id, bucket: creds.bucket().to_string(), encryption_key: creds.keys.master.clone(), - k2v: creds.k2v_client()?, - s3: creds.s3_client()?, + k2v: creds.storage.builders.row_store()?, + s3: creds.storage.builders.blob_store()?, uid_index, mail_path, }); @@ -186,8 +187,8 @@ struct MailboxInternal { mail_path: String, encryption_key: Key, - k2v: K2vClient, - s3: S3Client, + k2v: RowStore, + s3: BlobStore, uid_index: Bayou, } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index c2ca1d3..dfee88d 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -6,8 +6,12 @@ pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} -impl IRowBuilder for GrgCreds { - fn row_store(&self) -> Result { +impl IBuilder for GrgCreds { + fn row_store(&self) -> Result { + unimplemented!(); + } + + fn blob_store(&self) -> Result { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 6fa8138..80e7fdf 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -2,13 +2,17 @@ use futures::FutureExt; use crate::storage::*; #[derive(Clone, Debug)] -pub struct MemCreds {} +pub struct FullMem {} pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} -impl IRowBuilder for MemCreds { - fn row_store(&self) -> Result { +impl IBuilder for FullMem { + fn row_store(&self) -> Result { + unimplemented!(); + } + + fn blob_store(&self) -> Result { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c20853b..a2bdd43 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -25,20 +25,30 @@ pub enum Alternative { type ConcurrentValues = Vec; #[derive(Debug)] -pub enum Error { +pub enum StorageError { NotFound, Internal, } +impl std::fmt::Display for StorageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("Storage Error: "); + match self { + Self::NotFound => f.write_str("Item not found"), + Self::Internal => f.write_str("An internal error occured"), + } + } +} +impl std::error::Error for StorageError {} pub struct Engine { pub bucket: String, - pub row: RowBuilder, + pub builders: Builder, } impl Clone for Engine { fn clone(&self) -> Self { Engine { bucket: "test".into(), - row: Box::new(in_memory::MemCreds{}) + builders: Box::new(in_memory::FullMem{}) } } } @@ -48,24 +58,22 @@ impl std::fmt::Debug for Engine { } } -// A result -pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; +// Utils +pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; -// ------ Row Builder -pub trait IRowBuilder -{ - fn row_store(&self) -> Result; +pub trait IBuilder { + fn row_store(&self) -> Result; + fn blob_store(&self) -> Result; } -pub type RowBuilder = Box; +pub type Builder = Box; -// ------ Row Store +// ------ Row pub trait IRowStore { fn new_row(&self, partition: &str, sort: &str) -> RowRef; } -pub type RowStore = Box; +pub type RowStore = Box; -// ------- Row Item pub trait IRowRef { fn set_value(&self, content: Vec) -> RowValue; @@ -73,7 +81,7 @@ pub trait IRowRef fn rm(&self) -> AsyncResult<()>; fn poll(&self) -> AsyncResult>; } -type RowRef = Box; +pub type RowRef = Box; pub trait IRowValue { @@ -81,4 +89,27 @@ pub trait IRowValue fn content(&self) -> ConcurrentValues; fn push(&self) -> AsyncResult<()>; } -type RowValue = Box; +pub type RowValue = Box; + +// ------- Blob +pub trait IBlobStore +{ + fn new_blob(&self, key: &str) -> BlobRef; + fn list(&self) -> AsyncResult>; +} +pub type BlobStore = Box; + +pub trait IBlobRef +{ + fn set_value(&self, content: Vec) -> BlobValue; + fn fetch(&self) -> AsyncResult; + fn copy(&self, dst: &BlobRef) -> AsyncResult<()>; + fn rm(&self) -> AsyncResult<()>; +} +pub type BlobRef = Box; + +pub trait IBlobValue { + fn to_ref(&self) -> BlobRef; + fn push(&self) -> AsyncResult<()>; +} +pub type BlobValue = Box; -- cgit v1.2.3 From 3b363b2a7803564231e001c215ab427c99c9435b Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 12:18:43 +0100 Subject: implement equality+cmp for builders based on url --- src/login/mod.rs | 13 +++++-------- src/mail/user.rs | 3 ++- src/storage/garage.rs | 8 ++++++-- src/storage/in_memory.rs | 8 ++++++-- src/storage/mod.rs | 43 +++++++++++++++++++++++++------------------ 5 files changed, 44 insertions(+), 31 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index afade28..f4bf4d2 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -39,7 +39,7 @@ pub type ArcLoginProvider = Arc; #[derive(Clone, Debug)] pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: Engine, + pub storage: Builders, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } @@ -109,14 +109,11 @@ impl Region { impl Credentials { - pub fn k2v_client(&self) -> Result { - self.storage.row.row_store() + pub fn row_client(&self) -> Result { + Ok(self.storage.row_store()?) } - pub fn s3_client(&self) -> Result { - self.storage.s3_client() - } - pub fn bucket(&self) -> &str { - self.storage.bucket.as_str() + pub fn blob_client(&self) -> Result { + Ok(self.storage.blob_store()?) } } diff --git a/src/mail/user.rs b/src/mail/user.rs index 5523c2a..2104455 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -13,6 +13,7 @@ use crate::mail::incoming::incoming_mail_watch_process; use crate::mail::mailbox::Mailbox; use crate::mail::uidindex::ImapUidvalidity; use crate::mail::unique_ident::{gen_ident, UniqueIdent}; +use crate::storage; use crate::time::now_msec; pub const MAILBOX_HIERARCHY_DELIMITER: char = '.'; @@ -455,6 +456,6 @@ enum CreatedMailbox { // ---- User cache ---- lazy_static! { - static ref USER_CACHE: std::sync::Mutex>> = + static ref USER_CACHE: std::sync::Mutex>> = std::sync::Mutex::new(HashMap::new()); } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index dfee88d..6dea00c 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,12 +1,12 @@ use crate::storage::*; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Hash)] pub struct GrgCreds {} pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} -impl IBuilder for GrgCreds { +impl IBuilders for GrgCreds { fn row_store(&self) -> Result { unimplemented!(); } @@ -14,6 +14,10 @@ impl IBuilder for GrgCreds { fn blob_store(&self) -> Result { unimplemented!(); } + + fn url(&self) -> &str { + return "grg://unimplemented;" + } } impl IRowStore for GrgStore { diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 80e7fdf..5cc8ef8 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,13 +1,13 @@ use futures::FutureExt; use crate::storage::*; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Hash)] pub struct FullMem {} pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} -impl IBuilder for FullMem { +impl IBuilders for FullMem { fn row_store(&self) -> Result { unimplemented!(); } @@ -15,6 +15,10 @@ impl IBuilder for FullMem { fn blob_store(&self) -> Result { unimplemented!(); } + + fn url(&self) -> &str { + return "mem://unimplemented;" + } } impl IRowStore for MemStore { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index a2bdd43..0939463 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -8,6 +8,7 @@ * into the object system so it is not exposed. */ +use std::hash::{Hash, Hasher}; use futures::future::BoxFuture; pub mod in_memory; @@ -40,32 +41,38 @@ impl std::fmt::Display for StorageError { } impl std::error::Error for StorageError {} -pub struct Engine { - pub bucket: String, - pub builders: Builder, +// Utils +pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; + +// ----- Builders +pub trait IBuilders { + fn row_store(&self) -> Result; + fn blob_store(&self) -> Result; + fn url(&self) -> &str; } -impl Clone for Engine { +pub type Builders = Box; +impl Clone for Builders { fn clone(&self) -> Self { - Engine { - bucket: "test".into(), - builders: Box::new(in_memory::FullMem{}) - } + // @FIXME write a real implementation with a box_clone function + Box::new(in_memory::FullMem{}) } } -impl std::fmt::Debug for Engine { +impl std::fmt::Debug for Builders { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Engine").field("bucket", &self.bucket).finish() + f.write_str("aerogramme::storage::Builder") } } - -// Utils -pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; - -pub trait IBuilder { - fn row_store(&self) -> Result; - fn blob_store(&self) -> Result; +impl PartialEq for Builders { + fn eq(&self, other: &Self) -> bool { + self.url() == other.url() + } +} +impl Eq for Builders {} +impl Hash for Builders { + fn hash(&self, state: &mut H) { + self.url().hash(state); + } } -pub type Builder = Box; // ------ Row pub trait IRowStore -- cgit v1.2.3 From 1e192f93d5bf544c82fe91fb799d77e8b5d53afe Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 12:58:45 +0100 Subject: make all our objects send+sync --- src/mail/incoming.rs | 6 +++--- src/mail/user.rs | 34 ++++++++++++++++++---------------- src/storage/garage.rs | 2 +- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 10 +++++----- 5 files changed, 28 insertions(+), 26 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index b7d2f48..3ea7d6a 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -54,10 +54,10 @@ async fn incoming_mail_watch_process_internal( creds: Credentials, mut rx_inbox_id: watch::Receiver>, ) -> Result<()> { - let mut lock_held = k2v_lock_loop(creds.k2v_client()?, INCOMING_PK, INCOMING_LOCK_SK); + let mut lock_held = k2v_lock_loop(creds.row_client()?, INCOMING_PK, INCOMING_LOCK_SK); - let k2v = creds.k2v_client()?; - let s3 = creds.s3_client()?; + let k2v = creds.row_client()?; + let s3 = creds.blob_client()?; let mut inbox: Option> = None; let mut prev_ct: Option = None; diff --git a/src/mail/user.rs b/src/mail/user.rs index 2104455..3b8d4e7 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -34,7 +34,7 @@ const MAILBOX_LIST_SK: &str = "list"; pub struct User { pub username: String, pub creds: Credentials, - pub k2v: K2vClient, + pub k2v: storage::RowStore, pub mailboxes: std::sync::Mutex>>, tx_inbox_id: watch::Sender>, @@ -174,7 +174,7 @@ impl User { // ---- Internal user & mailbox management ---- async fn open(username: String, creds: Credentials) -> Result> { - let k2v = creds.k2v_client()?; + let k2v = creds.row_client()?; let (tx_inbox_id, rx_inbox_id) = watch::channel(None); @@ -224,32 +224,32 @@ impl User { // ---- Mailbox list management ---- - async fn load_mailbox_list(&self) -> Result<(MailboxList, Option)> { - let (mut list, ct) = match self.k2v.read_item(MAILBOX_LIST_PK, MAILBOX_LIST_SK).await { - Err(k2v_client::Error::NotFound) => (MailboxList::new(), None), + async fn load_mailbox_list(&self) -> Result<(MailboxList, Option)> { + let (mut list, row) = match self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK).fetch().await { + Err(storage::StorageError::NotFound) => (MailboxList::new(), None), Err(e) => return Err(e.into()), - Ok(cv) => { + Ok(rv) => { let mut list = MailboxList::new(); - for v in cv.value { - if let K2vValue::Value(vbytes) = v { + for v in rv.content() { + if let storage::Alternative::Value(vbytes) = v { let list2 = open_deserialize::(&vbytes, &self.creds.keys.master)?; list.merge(list2); } } - (list, Some(cv.causality)) + (list, Some(rv.to_ref())) } }; - self.ensure_inbox_exists(&mut list, &ct).await?; + self.ensure_inbox_exists(&mut list, &row).await?; - Ok((list, ct)) + Ok((list, row)) } async fn ensure_inbox_exists( &self, list: &mut MailboxList, - ct: &Option, + ct: &Option, ) -> Result { // If INBOX doesn't exist, create a new mailbox with that name // and save new mailbox list. @@ -278,12 +278,14 @@ impl User { async fn save_mailbox_list( &self, list: &MailboxList, - ct: Option, + ct: Option, ) -> Result<()> { let list_blob = seal_serialize(list, &self.creds.keys.master)?; - self.k2v - .insert_item(MAILBOX_LIST_PK, MAILBOX_LIST_SK, list_blob, ct) - .await?; + let rref = match ct { + Some(x) => x, + None => self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK), + }; + rref.set_value(list_blob).push().await?; Ok(()) } } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 6dea00c..595a57c 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -21,7 +21,7 @@ impl IBuilders for GrgCreds { } impl IRowStore for GrgStore { - fn new_row(&self, partition: &str, sort: &str) -> RowRef { + fn row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 5cc8ef8..19b55b9 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -22,7 +22,7 @@ impl IBuilders for FullMem { } impl IRowStore for MemStore { - fn new_row(&self, partition: &str, sort: &str) -> RowRef { + fn row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 0939463..3e66e84 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -77,7 +77,7 @@ impl Hash for Builders { // ------ Row pub trait IRowStore { - fn new_row(&self, partition: &str, sort: &str) -> RowRef; + fn row(&self, partition: &str, sort: &str) -> RowRef; } pub type RowStore = Box; @@ -88,7 +88,7 @@ pub trait IRowRef fn rm(&self) -> AsyncResult<()>; fn poll(&self) -> AsyncResult>; } -pub type RowRef = Box; +pub type RowRef = Box; pub trait IRowValue { @@ -96,7 +96,7 @@ pub trait IRowValue fn content(&self) -> ConcurrentValues; fn push(&self) -> AsyncResult<()>; } -pub type RowValue = Box; +pub type RowValue = Box; // ------- Blob pub trait IBlobStore @@ -113,10 +113,10 @@ pub trait IBlobRef fn copy(&self, dst: &BlobRef) -> AsyncResult<()>; fn rm(&self) -> AsyncResult<()>; } -pub type BlobRef = Box; +pub type BlobRef = Box; pub trait IBlobValue { fn to_ref(&self) -> BlobRef; fn push(&self) -> AsyncResult<()>; } -pub type BlobValue = Box; +pub type BlobValue = Box; -- cgit v1.2.3 From a65f5b25894faa9802d274beb394f40062c65bae Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 15:28:19 +0100 Subject: WIP rewrite mail/incoming --- src/k2v_util.rs | 11 +++---- src/mail/incoming.rs | 81 +++++++++++++++++------------------------------- src/mail/mailbox.rs | 6 ++-- src/storage/garage.rs | 6 +++- src/storage/in_memory.rs | 9 ++++-- src/storage/mod.rs | 15 +++++++-- 6 files changed, 58 insertions(+), 70 deletions(-) (limited to 'src') diff --git a/src/k2v_util.rs b/src/k2v_util.rs index 9dadab4..3cd969b 100644 --- a/src/k2v_util.rs +++ b/src/k2v_util.rs @@ -1,14 +1,10 @@ +/* use anyhow::Result; - -use k2v_client::{CausalValue, CausalityToken, K2vClient}; - // ---- UTIL: function to wait for a value to have changed in K2V ---- pub async fn k2v_wait_value_changed( - k2v: &K2vClient, - pk: &str, - sk: &str, - prev_ct: &Option, + k2v: &storage::RowStore, + key: &storage::RowRef, ) -> Result { loop { if let Some(ct) = prev_ct { @@ -27,3 +23,4 @@ pub async fn k2v_wait_value_changed( } } } +*/ diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 3ea7d6a..4e3fc8c 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -22,6 +22,7 @@ use crate::mail::uidindex::ImapUidvalidity; use crate::mail::unique_ident::*; use crate::mail::user::User; use crate::mail::IMF; +use crate::storage; use crate::time::now_msec; const INCOMING_PK: &str = "incoming"; @@ -60,18 +61,17 @@ async fn incoming_mail_watch_process_internal( let s3 = creds.blob_client()?; let mut inbox: Option> = None; - let mut prev_ct: Option = None; + let mut incoming_key = k2v.row(INCOMING_PK, INCOMING_WATCH_SK); loop { - let new_mail = if *lock_held.borrow() { + let maybe_updated_incoming_key = if *lock_held.borrow() { info!("incoming lock held"); let wait_new_mail = async { loop { - match k2v_wait_value_changed(&k2v, INCOMING_PK, INCOMING_WATCH_SK, &prev_ct) - .await + match incoming_key.poll().await { - Ok(cv) => break cv, + Ok(row_val) => break row_val.to_ref(), Err(e) => { error!("Error in wait_new_mail: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; @@ -81,10 +81,10 @@ async fn incoming_mail_watch_process_internal( }; tokio::select! { - cv = wait_new_mail => Some(cv.causality), - _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => prev_ct.clone(), - _ = lock_held.changed() => None, - _ = rx_inbox_id.changed() => None, + inc_k = wait_new_mail => Some(inc_k), + _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key.clone()), + _ = lock_held.changed() => None, + _ = rx_inbox_id.changed() => None, } } else { info!("incoming lock not held"); @@ -123,10 +123,10 @@ async fn incoming_mail_watch_process_internal( // If we were able to open INBOX, and we have mail, // fetch new mail - if let (Some(inbox), Some(new_ct)) = (&inbox, new_mail) { + if let (Some(inbox), Some(updated_incoming_key)) = (&inbox, maybe_updated_incoming_key) { match handle_incoming_mail(&user, &s3, inbox, &lock_held).await { Ok(()) => { - prev_ct = Some(new_ct); + incoming_key = updated_incoming_key; } Err(e) => { error!("Could not fetch incoming mail: {}", e); @@ -141,27 +141,20 @@ async fn incoming_mail_watch_process_internal( async fn handle_incoming_mail( user: &Arc, - s3: &S3Client, + blobs: &storage::BlobStore, inbox: &Arc, lock_held: &watch::Receiver, ) -> Result<()> { - let lor = ListObjectsV2Request { - bucket: user.creds.storage.bucket.clone(), - max_keys: Some(1000), - prefix: Some("incoming/".into()), - ..Default::default() - }; - let mails_res = s3.list_objects_v2(lor).await?; + let mails_res = blobs.list("incoming/").await?; - for object in mails_res.contents.unwrap_or_default() { + for object in mails_res { if !*lock_held.borrow() { break; } - if let Some(key) = object.key { - if let Some(mail_id) = key.strip_prefix("incoming/") { - if let Ok(mail_id) = mail_id.parse::() { - move_incoming_message(user, s3, inbox, mail_id).await?; - } + let key = object.key(); + if let Some(mail_id) = key.strip_prefix("incoming/") { + if let Ok(mail_id) = mail_id.parse::() { + move_incoming_message(user, blobs, inbox, mail_id).await?; } } } @@ -171,7 +164,7 @@ async fn handle_incoming_mail( async fn move_incoming_message( user: &Arc, - s3: &S3Client, + s3: &storage::BlobStore, inbox: &Arc, id: UniqueIdent, ) -> Result<()> { @@ -180,20 +173,12 @@ async fn move_incoming_message( let object_key = format!("incoming/{}", id); // 1. Fetch message from S3 - let gor = GetObjectRequest { - bucket: user.creds.storage.bucket.clone(), - key: object_key.clone(), - ..Default::default() - }; - let get_result = s3.get_object(gor).await?; + let object = s3.blob(&object_key).fetch().await?; // 1.a decrypt message key from headers - info!("Object metadata: {:?}", get_result.metadata); - let key_encrypted_b64 = get_result - .metadata - .as_ref() - .ok_or(anyhow!("Missing key in metadata"))? - .get(MESSAGE_KEY) + //info!("Object metadata: {:?}", get_result.metadata); + let key_encrypted_b64 = object + .get_meta(MESSAGE_KEY) .ok_or(anyhow!("Missing key in metadata"))?; let key_encrypted = base64::decode(key_encrypted_b64)?; let message_key = sodiumoxide::crypto::sealedbox::open( @@ -206,13 +191,8 @@ async fn move_incoming_message( cryptoblob::Key::from_slice(&message_key).ok_or(anyhow!("Invalid message key"))?; // 1.b retrieve message body - let obj_body = get_result.body.ok_or(anyhow!("Missing object body"))?; - let mut mail_buf = Vec::with_capacity(get_result.content_length.unwrap_or(128) as usize); - obj_body - .into_async_read() - .read_to_end(&mut mail_buf) - .await?; - let plain_mail = cryptoblob::open(&mail_buf, &message_key) + let obj_body = object.content().ok_or(anyhow!("Missing object body"))?; + let plain_mail = cryptoblob::open(&obj_body, &message_key) .map_err(|_| anyhow!("Cannot decrypt email content"))?; // 2 parse mail and add to inbox @@ -222,19 +202,14 @@ async fn move_incoming_message( .await?; // 3 delete from incoming - let dor = DeleteObjectRequest { - bucket: user.creds.storage.bucket.clone(), - key: object_key.clone(), - ..Default::default() - }; - s3.delete_object(dor).await?; + object.to_ref().rm().await?; Ok(()) } // ---- UTIL: K2V locking loop, use this to try to grab a lock using a K2V entry as a signal ---- -fn k2v_lock_loop(k2v: K2vClient, pk: &'static str, sk: &'static str) -> watch::Receiver { +fn k2v_lock_loop(k2v: storage::RowStore, pk: &'static str, sk: &'static str) -> watch::Receiver { let (held_tx, held_rx) = watch::channel(false); tokio::spawn(k2v_lock_loop_internal(k2v, pk, sk, held_tx)); @@ -250,7 +225,7 @@ enum LockState { } async fn k2v_lock_loop_internal( - k2v: K2vClient, + k2v: storage::RowStore, pk: &'static str, sk: &'static str, held_tx: watch::Sender, diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 614382e..581f432 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -49,10 +49,9 @@ impl Mailbox { let mbox = RwLock::new(MailboxInternal { id, - bucket: creds.bucket().to_string(), encryption_key: creds.keys.master.clone(), - k2v: creds.storage.builders.row_store()?, - s3: creds.storage.builders.blob_store()?, + k2v: creds.storage.row_store()?, + s3: creds.storage.blob_store()?, uid_index, mail_path, }); @@ -183,7 +182,6 @@ struct MailboxInternal { // 2023-05-15 will probably be used later. #[allow(dead_code)] id: UniqueIdent, - bucket: String, mail_path: String, encryption_key: Key, diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 595a57c..46da4aa 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -27,6 +27,10 @@ impl IRowStore for GrgStore { } impl IRowRef for GrgRef { + fn clone_boxed(&self) -> RowRef { + unimplemented!(); + } + fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); } @@ -36,7 +40,7 @@ impl IRowRef for GrgRef { fn rm(&self) -> AsyncResult<()> { unimplemented!(); } - fn poll(&self) -> AsyncResult> { + fn poll(&self) -> AsyncResult { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 19b55b9..144a52f 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -28,6 +28,10 @@ impl IRowStore for MemStore { } impl IRowRef for MemRef { + fn clone_boxed(&self) -> RowRef { + unimplemented!(); + } + fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); } @@ -37,9 +41,10 @@ impl IRowRef for MemRef { fn rm(&self) -> AsyncResult<()> { unimplemented!(); } - fn poll(&self) -> AsyncResult> { + fn poll(&self) -> AsyncResult { async { - Ok(None) + let rv: RowValue = Box::new(MemValue{}); + Ok(rv) }.boxed() } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 3e66e84..c5ed1f8 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -83,12 +83,18 @@ pub type RowStore = Box; pub trait IRowRef { + fn clone_boxed(&self) -> RowRef; fn set_value(&self, content: Vec) -> RowValue; fn fetch(&self) -> AsyncResult; fn rm(&self) -> AsyncResult<()>; - fn poll(&self) -> AsyncResult>; + fn poll(&self) -> AsyncResult; } pub type RowRef = Box; +impl Clone for RowRef { + fn clone(&self) -> Self { + return self.clone_boxed() + } +} pub trait IRowValue { @@ -101,14 +107,15 @@ pub type RowValue = Box; // ------- Blob pub trait IBlobStore { - fn new_blob(&self, key: &str) -> BlobRef; - fn list(&self) -> AsyncResult>; + fn blob(&self, key: &str) -> BlobRef; + fn list(&self, prefix: &str) -> AsyncResult>; } pub type BlobStore = Box; pub trait IBlobRef { fn set_value(&self, content: Vec) -> BlobValue; + fn key(&self) -> &str; fn fetch(&self) -> AsyncResult; fn copy(&self, dst: &BlobRef) -> AsyncResult<()>; fn rm(&self) -> AsyncResult<()>; @@ -117,6 +124,8 @@ pub type BlobRef = Box; pub trait IBlobValue { fn to_ref(&self) -> BlobRef; + fn get_meta(&self, key: &str) -> Option<&[u8]>; + fn content(&self) -> Option<&[u8]>; fn push(&self) -> AsyncResult<()>; } pub type BlobValue = Box; -- cgit v1.2.3 From bf67935c54f5f66f4cab4ceb58c1b5831b9421b0 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 16:17:11 +0100 Subject: add rust analyzer to the shell --- src/mail/incoming.rs | 2 +- src/mail/mailbox.rs | 18 ++++++------------ 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 4e3fc8c..e550e98 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -198,7 +198,7 @@ async fn move_incoming_message( // 2 parse mail and add to inbox let msg = IMF::try_from(&plain_mail[..]).map_err(|_| anyhow!("Invalid email body"))?; inbox - .append_from_s3(msg, id, &object_key, message_key) + .append_from_s3(msg, id, object.to_ref(), message_key) .await?; // 3 delete from incoming diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 581f432..83039d5 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -14,7 +14,7 @@ use crate::login::Credentials; use crate::mail::uidindex::*; use crate::mail::unique_ident::*; use crate::mail::IMF; -use crate::storage::{RowStore, BlobStore}; +use crate::storage::{RowStore, BlobStore, self}; use crate::time::now_msec; pub struct Mailbox { @@ -121,13 +121,13 @@ impl Mailbox { &self, msg: IMF<'a>, ident: UniqueIdent, - s3_key: &str, + blob_ref: storage::BlobRef, message_key: Key, ) -> Result<()> { self.mbox .write() .await - .append_from_s3(msg, ident, s3_key, message_key) + .append_from_s3(msg, ident, blob_ref, message_key) .await } @@ -348,20 +348,14 @@ impl MailboxInternal { &mut self, mail: IMF<'a>, ident: UniqueIdent, - s3_key: &str, + blob_ref: storage::BlobRef, message_key: Key, ) -> Result<()> { futures::try_join!( async { // Copy mail body from previous location - let cor = CopyObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/{}", self.mail_path, ident), - copy_source: format!("{}/{}", self.bucket, s3_key), - metadata_directive: Some("REPLACE".into()), - ..Default::default() - }; - self.s3.copy_object(cor).await?; + let dst = self.s3.blob(format!("{}/{}", self.mail_path, ident)); + blob_ref.copy(dst).await?; Ok::<_, anyhow::Error>(()) }, async { -- cgit v1.2.3 From 652da6efd35f198289ba3de26b60eb2e228de73a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 2 Nov 2023 17:25:56 +0100 Subject: converted incoming mail --- src/mail/mailbox.rs | 106 ++++++++++++---------------------------------------- src/storage/mod.rs | 14 ++++--- 2 files changed, 32 insertions(+), 88 deletions(-) (limited to 'src') diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 83039d5..e8111df 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -1,11 +1,5 @@ use anyhow::{anyhow, bail, Result}; -use k2v_client::K2vClient; -use k2v_client::{BatchReadOp, Filter, K2vValue}; -use rusoto_s3::{ - CopyObjectRequest, DeleteObjectRequest, GetObjectRequest, PutObjectRequest, S3Client, S3, -}; use serde::{Deserialize, Serialize}; -use tokio::io::AsyncReadExt; use tokio::sync::RwLock; use crate::bayou::Bayou; @@ -206,35 +200,18 @@ impl MailboxInternal { async fn fetch_meta(&self, ids: &[UniqueIdent]) -> Result> { let ids = ids.iter().map(|x| x.to_string()).collect::>(); - let ops = ids - .iter() - .map(|id| BatchReadOp { - partition_key: &self.mail_path, - filter: Filter { - start: Some(id), - end: None, - prefix: None, - limit: None, - reverse: false, - }, - single_item: true, - conflicts_only: false, - tombstones: false, - }) - .collect::>(); - let res_vec = self.k2v.read_batch(&ops).await?; + let ops = ids.iter().map(|id| (self.mail_path.as_str(), id.as_str())).collect::>(); + let res_vec = self.k2v.select(storage::Selector::List(ops)).await?; let mut meta_vec = vec![]; - for (op, res) in ops.iter().zip(res_vec.into_iter()) { - if res.items.len() != 1 { - bail!("Expected 1 item, got {}", res.items.len()); - } - let (_, cv) = res.items.iter().next().unwrap(); + for res in res_vec.into_iter() { let mut meta_opt = None; - for v in cv.value.iter() { + + // Resolve conflicts + for v in res.content().iter() { match v { - K2vValue::Tombstone => (), - K2vValue::Value(v) => { + storage::Alternative::Tombstone => (), + storage::Alternative::Value(v) => { let meta = open_deserialize::(v, &self.encryption_key)?; match meta_opt.as_mut() { None => { @@ -250,7 +227,7 @@ impl MailboxInternal { if let Some(meta) = meta_opt { meta_vec.push(meta); } else { - bail!("No valid meta value in k2v for {:?}", op.filter.start); + bail!("No valid meta value in k2v for {:?}", res.to_ref().sk()); } } @@ -258,19 +235,9 @@ impl MailboxInternal { } async fn fetch_full(&self, id: UniqueIdent, message_key: &Key) -> Result> { - let gor = GetObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/{}", self.mail_path, id), - ..Default::default() - }; - - let obj_res = self.s3.get_object(gor).await?; - - let obj_body = obj_res.body.ok_or(anyhow!("Missing object body"))?; - let mut buf = Vec::with_capacity(obj_res.content_length.unwrap_or(128) as usize); - obj_body.into_async_read().read_to_end(&mut buf).await?; - - cryptoblob::open(&buf, message_key) + let obj_res = self.s3.blob(&format!("{}/{}", self.mail_path, id)).fetch().await?; + let body = obj_res.content().ok_or(anyhow!("missing body"))?; + cryptoblob::open(body, message_key) } // ---- Functions for changing the mailbox ---- @@ -303,13 +270,7 @@ impl MailboxInternal { async { // Encrypt and save mail body let message_blob = cryptoblob::seal(mail.raw, &message_key)?; - let por = PutObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/{}", self.mail_path, ident), - body: Some(message_blob.into()), - ..Default::default() - }; - self.s3.put_object(por).await?; + self.s3.blob(&format!("{}/{}", self.mail_path, ident)).set_value(message_blob).push().await?; Ok::<_, anyhow::Error>(()) }, async { @@ -321,9 +282,7 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v - .insert_item(&self.mail_path, &ident.to_string(), meta_blob, None) - .await?; + self.k2v.row(&self.mail_path, &ident.to_string()).set_value(meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -354,8 +313,8 @@ impl MailboxInternal { futures::try_join!( async { // Copy mail body from previous location - let dst = self.s3.blob(format!("{}/{}", self.mail_path, ident)); - blob_ref.copy(dst).await?; + let dst = self.s3.blob(&format!("{}/{}", self.mail_path, ident)); + blob_ref.copy(&dst).await?; Ok::<_, anyhow::Error>(()) }, async { @@ -367,9 +326,7 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v - .insert_item(&self.mail_path, &ident.to_string(), meta_blob, None) - .await?; + self.k2v.row(&self.mail_path, &ident.to_string()).set_value(meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -393,21 +350,13 @@ impl MailboxInternal { futures::try_join!( async { // Delete mail body from S3 - let dor = DeleteObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/{}", self.mail_path, ident), - ..Default::default() - }; - self.s3.delete_object(dor).await?; + self.s3.blob(&format!("{}/{}", self.mail_path, ident)).rm().await?; Ok::<_, anyhow::Error>(()) }, async { // Delete mail meta from K2V let sk = ident.to_string(); - let v = self.k2v.read_item(&self.mail_path, &sk).await?; - self.k2v - .delete_item(&self.mail_path, &sk, v.causality) - .await?; + self.k2v.row(&self.mail_path, &sk).fetch().await?.to_ref().rm().await?; Ok::<_, anyhow::Error>(()) } )?; @@ -438,7 +387,7 @@ impl MailboxInternal { source_id: UniqueIdent, new_id: UniqueIdent, ) -> Result<()> { - if self.bucket != from.bucket || self.encryption_key != from.encryption_key { + if self.encryption_key != from.encryption_key { bail!("Message to be copied/moved does not belong to same account."); } @@ -453,24 +402,15 @@ impl MailboxInternal { futures::try_join!( async { - // Copy mail body from S3 - let cor = CopyObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/{}", self.mail_path, new_id), - copy_source: format!("{}/{}/{}", from.bucket, from.mail_path, source_id), - ..Default::default() - }; - - self.s3.copy_object(cor).await?; + let dst = self.s3.blob(&format!("{}/{}", self.mail_path, new_id)); + self.s3.blob(&format!("{}/{}", from.mail_path, source_id)).copy(&dst).await?; Ok::<_, anyhow::Error>(()) }, async { // Copy mail meta in K2V let meta = &from.fetch_meta(&[source_id]).await?[0]; let meta_blob = seal_serialize(meta, &self.encryption_key)?; - self.k2v - .insert_item(&self.mail_path, &new_id.to_string(), meta_blob, None) - .await?; + self.k2v.row(&self.mail_path, &new_id.to_string()).set_value(meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync(), diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c5ed1f8..b687959 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -14,17 +14,18 @@ use futures::future::BoxFuture; pub mod in_memory; pub mod garage; -pub enum Selector<'a> { - Range{ begin: &'a str, end: &'a str }, - Filter(u64), -} - pub enum Alternative { Tombstone, Value(Vec), } type ConcurrentValues = Vec; +pub enum Selector<'a> { + Range { begin: &'a str, end: &'a str }, + List (Vec<(&'a str, &'a str)>), + Prefix (&'a str), +} + #[derive(Debug)] pub enum StorageError { NotFound, @@ -78,12 +79,15 @@ impl Hash for Builders { pub trait IRowStore { fn row(&self, partition: &str, sort: &str) -> RowRef; + fn select(&self, selector: Selector) -> AsyncResult>; } pub type RowStore = Box; pub trait IRowRef { fn clone_boxed(&self) -> RowRef; + fn pk(&self) -> &str; + fn sk(&self) -> &str; fn set_value(&self, content: Vec) -> RowValue; fn fetch(&self) -> AsyncResult; fn rm(&self) -> AsyncResult<()>; -- cgit v1.2.3 From 916b27d87ec7f5bff41f9dd888914d50ae067fc0 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 15 Nov 2023 15:56:43 +0100 Subject: WIP refactor storage (new timestamp.rs file) --- src/bayou.rs | 94 +++++++----------------------------------------- src/mail/incoming.rs | 3 +- src/mail/mailbox.rs | 4 +-- src/mail/unique_ident.rs | 2 +- src/mail/user.rs | 3 +- src/main.rs | 2 +- src/storage/garage.rs | 8 +++++ src/storage/in_memory.rs | 8 +++++ src/storage/mod.rs | 3 +- src/time.rs | 9 ----- src/timestamp.rs | 65 +++++++++++++++++++++++++++++++++ 11 files changed, 101 insertions(+), 100 deletions(-) delete mode 100644 src/time.rs create mode 100644 src/timestamp.rs (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 9f70017..3201783 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -15,9 +15,9 @@ use rusoto_s3::{ }; use crate::cryptoblob::*; -use crate::k2v_util::k2v_wait_value_changed; use crate::login::Credentials; -use crate::time::now_msec; +use crate::timestamp::*; +use crate::storage; const KEEP_STATE_EVERY: usize = 64; @@ -48,12 +48,11 @@ pub trait BayouState: } pub struct Bayou { - bucket: String, path: String, key: Key, - k2v: K2vClient, - s3: S3Client, + k2v: storage::RowStore, + s3: storage::BlobStore, checkpoint: (Timestamp, S), history: Vec<(Timestamp, S::Op, Option)>, @@ -67,13 +66,12 @@ pub struct Bayou { impl Bayou { pub fn new(creds: &Credentials, path: String) -> Result { - let k2v_client = creds.k2v_client()?; - let s3_client = creds.s3_client()?; + let k2v_client = creds.row_client()?; + let s3_client = creds.blob_client()?; let watch = K2vWatch::new(creds, path.clone(), WATCH_SK.to_string())?; Ok(Self { - bucket: creds.bucket().to_string(), path, key: creds.keys.master.clone(), k2v: k2v_client, @@ -103,17 +101,8 @@ impl Bayou { } else { debug!("(sync) loading checkpoint: {}", key); - let gor = GetObjectRequest { - bucket: self.bucket.clone(), - key: key.to_string(), - ..Default::default() - }; - - let obj_res = self.s3.get_object(gor).await?; - - let obj_body = obj_res.body.ok_or(anyhow!("Missing object body"))?; - let mut buf = Vec::with_capacity(obj_res.content_length.unwrap_or(128) as usize); - obj_body.into_async_read().read_to_end(&mut buf).await?; + let obj_res = self.s3.blob(key).fetch().await?; + let buf = obj_res.content().ok_or(anyhow!("object can't be empty"))?; debug!("(sync) checkpoint body length: {}", buf.len()); @@ -145,7 +134,8 @@ impl Bayou { // 3. List all operations starting from checkpoint let ts_ser = self.checkpoint.0.to_string(); debug!("(sync) looking up operations starting at {}", ts_ser); - let ops_map = self + let ops_map = self.k2v.select(storage::Selector::Range { begin: &ts_ser, end: WATCH_SK }).await?; + /*let ops_map = self .k2v .read_batch(&[BatchReadOp { partition_key: &self.path, @@ -164,13 +154,11 @@ impl Bayou { .into_iter() .next() .ok_or(anyhow!("Missing K2V result"))? - .items; + .items;*/ let mut ops = vec![]; - for (tsstr, val) in ops_map { - let ts = tsstr - .parse::() - .map_err(|_| anyhow!("Invalid operation timestamp: {}", tsstr))?; + for row_value in ops_map { + let ts = row_value.timestamp(); if val.value.len() != 1 { bail!("Invalid operation, has {} values", val.value.len()); } @@ -536,59 +524,3 @@ impl K2vWatch { info!("bayou k2v watch bg loop exiting"); } } - -// ---- TIMESTAMP CLASS ---- - -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] -pub struct Timestamp { - pub msec: u64, - pub rand: u64, -} - -impl Timestamp { - #[allow(dead_code)] - // 2023-05-15 try to make clippy happy and not sure if this fn will be used in the future. - pub fn now() -> Self { - let mut rng = thread_rng(); - Self { - msec: now_msec(), - rand: rng.gen::(), - } - } - - pub fn after(other: &Self) -> Self { - let mut rng = thread_rng(); - Self { - msec: std::cmp::max(now_msec(), other.msec + 1), - rand: rng.gen::(), - } - } - - pub fn zero() -> Self { - Self { msec: 0, rand: 0 } - } -} - -impl ToString for Timestamp { - fn to_string(&self) -> String { - let mut bytes = [0u8; 16]; - bytes[0..8].copy_from_slice(&u64::to_be_bytes(self.msec)); - bytes[8..16].copy_from_slice(&u64::to_be_bytes(self.rand)); - hex::encode(bytes) - } -} - -impl FromStr for Timestamp { - type Err = &'static str; - - fn from_str(s: &str) -> Result { - let bytes = hex::decode(s).map_err(|_| "invalid hex")?; - if bytes.len() != 16 { - return Err("bad length"); - } - Ok(Self { - msec: u64::from_be_bytes(bytes[0..8].try_into().unwrap()), - rand: u64::from_be_bytes(bytes[8..16].try_into().unwrap()), - }) - } -} diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index e550e98..c3a9390 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -15,7 +15,6 @@ use tokio::sync::watch; use tracing::{error, info, warn}; use crate::cryptoblob; -use crate::k2v_util::k2v_wait_value_changed; use crate::login::{Credentials, PublicCredentials}; use crate::mail::mailbox::Mailbox; use crate::mail::uidindex::ImapUidvalidity; @@ -23,7 +22,7 @@ use crate::mail::unique_ident::*; use crate::mail::user::User; use crate::mail::IMF; use crate::storage; -use crate::time::now_msec; +use crate::timestamp::now_msec; const INCOMING_PK: &str = "incoming"; const INCOMING_LOCK_SK: &str = "lock"; diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index e8111df..f27d50a 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -9,7 +9,7 @@ use crate::mail::uidindex::*; use crate::mail::unique_ident::*; use crate::mail::IMF; use crate::storage::{RowStore, BlobStore, self}; -use crate::time::now_msec; +use crate::timestamp::now_msec; pub struct Mailbox { pub(super) id: UniqueIdent, @@ -227,7 +227,7 @@ impl MailboxInternal { if let Some(meta) = meta_opt { meta_vec.push(meta); } else { - bail!("No valid meta value in k2v for {:?}", res.to_ref().sk()); + bail!("No valid meta value in k2v for {:?}", res.to_ref().key()); } } diff --git a/src/mail/unique_ident.rs b/src/mail/unique_ident.rs index 267f66e..0e629db 100644 --- a/src/mail/unique_ident.rs +++ b/src/mail/unique_ident.rs @@ -5,7 +5,7 @@ use lazy_static::lazy_static; use rand::prelude::*; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; -use crate::time::now_msec; +use crate::timestamp::now_msec; /// An internal Mail Identifier is composed of two components: /// - a process identifier, 128 bits, itself composed of: diff --git a/src/mail/user.rs b/src/mail/user.rs index 3b8d4e7..6d3bc1a 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -2,7 +2,6 @@ use std::collections::{BTreeMap, HashMap}; use std::sync::{Arc, Weak}; use anyhow::{anyhow, bail, Result}; -use k2v_client::{CausalityToken, K2vClient, K2vValue}; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use tokio::sync::watch; @@ -14,7 +13,7 @@ use crate::mail::mailbox::Mailbox; use crate::mail::uidindex::ImapUidvalidity; use crate::mail::unique_ident::{gen_ident, UniqueIdent}; use crate::storage; -use crate::time::now_msec; +use crate::timestamp::now_msec; pub const MAILBOX_HIERARCHY_DELIMITER: char = '.'; diff --git a/src/main.rs b/src/main.rs index 8d2a140..f395143 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ #![feature(async_fn_in_trait)] +mod timestamp; mod bayou; mod config; mod cryptoblob; @@ -10,7 +11,6 @@ mod login; mod mail; mod server; mod storage; -mod time; use std::path::PathBuf; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 46da4aa..0abeb4d 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -24,6 +24,10 @@ impl IRowStore for GrgStore { fn row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } + + fn select(&self, selector: Selector) -> AsyncResult> { + unimplemented!(); + } } impl IRowRef for GrgRef { @@ -31,6 +35,10 @@ impl IRowRef for GrgRef { unimplemented!(); } + fn key(&self) -> (&str, &str) { + unimplemented!(); + } + fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 144a52f..8db4eff 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -25,9 +25,17 @@ impl IRowStore for MemStore { fn row(&self, partition: &str, sort: &str) -> RowRef { unimplemented!(); } + + fn select(&self, selector: Selector) -> AsyncResult> { + unimplemented!(); + } } impl IRowRef for MemRef { + fn key(&self) -> (&str, &str) { + unimplemented!(); + } + fn clone_boxed(&self) -> RowRef { unimplemented!(); } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index b687959..c3bf19f 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -86,8 +86,7 @@ pub type RowStore = Box; pub trait IRowRef { fn clone_boxed(&self) -> RowRef; - fn pk(&self) -> &str; - fn sk(&self) -> &str; + fn key(&self) -> (&str, &str); fn set_value(&self, content: Vec) -> RowValue; fn fetch(&self) -> AsyncResult; fn rm(&self) -> AsyncResult<()>; diff --git a/src/time.rs b/src/time.rs deleted file mode 100644 index d34ee22..0000000 --- a/src/time.rs +++ /dev/null @@ -1,9 +0,0 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - -/// Returns milliseconds since UNIX Epoch -pub fn now_msec() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Fix your clock :o") - .as_millis() as u64 -} diff --git a/src/timestamp.rs b/src/timestamp.rs new file mode 100644 index 0000000..76cb74b --- /dev/null +++ b/src/timestamp.rs @@ -0,0 +1,65 @@ +use rand::prelude::*; +use std::str::FromStr; +use std::time::{SystemTime, UNIX_EPOCH}; + +/// Returns milliseconds since UNIX Epoch +pub fn now_msec() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Fix your clock :o") + .as_millis() as u64 +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Timestamp { + pub msec: u64, + pub rand: u64, +} + +impl Timestamp { + #[allow(dead_code)] + // 2023-05-15 try to make clippy happy and not sure if this fn will be used in the future. + pub fn now() -> Self { + let mut rng = thread_rng(); + Self { + msec: now_msec(), + rand: rng.gen::(), + } + } + + pub fn after(other: &Self) -> Self { + let mut rng = thread_rng(); + Self { + msec: std::cmp::max(now_msec(), other.msec + 1), + rand: rng.gen::(), + } + } + + pub fn zero() -> Self { + Self { msec: 0, rand: 0 } + } +} + +impl ToString for Timestamp { + fn to_string(&self) -> String { + let mut bytes = [0u8; 16]; + bytes[0..8].copy_from_slice(&u64::to_be_bytes(self.msec)); + bytes[8..16].copy_from_slice(&u64::to_be_bytes(self.rand)); + hex::encode(bytes) + } +} + +impl FromStr for Timestamp { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + let bytes = hex::decode(s).map_err(|_| "invalid hex")?; + if bytes.len() != 16 { + return Err("bad length"); + } + Ok(Self { + msec: u64::from_be_bytes(bytes[0..8].try_into().unwrap()), + rand: u64::from_be_bytes(bytes[8..16].try_into().unwrap()), + }) + } +} -- cgit v1.2.3 From 6da8b815b694a37d39a2be04c8e1585aac17954a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 16 Nov 2023 18:27:24 +0100 Subject: not very clear how we pass data across channel --- src/bayou.rs | 134 ++++++++++++++++++++++------------------------- src/mail/incoming.rs | 2 +- src/storage/garage.rs | 9 +++- src/storage/in_memory.rs | 4 ++ src/storage/mod.rs | 18 +++++-- 5 files changed, 89 insertions(+), 78 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 3201783..f95bd82 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -9,16 +9,12 @@ use serde::{Deserialize, Serialize}; use tokio::io::AsyncReadExt; use tokio::sync::{watch, Notify}; -use k2v_client::{BatchDeleteOp, BatchReadOp, CausalityToken, Filter, K2vClient, K2vValue}; -use rusoto_s3::{ - DeleteObjectRequest, GetObjectRequest, ListObjectsV2Request, PutObjectRequest, S3Client, S3, -}; - use crate::cryptoblob::*; use crate::login::Credentials; use crate::timestamp::*; use crate::storage; + const KEEP_STATE_EVERY: usize = 64; // Checkpointing interval constants: a checkpoint is not made earlier @@ -61,7 +57,7 @@ pub struct Bayou { last_try_checkpoint: Option, watch: Arc, - last_sync_watch_ct: Option, + last_sync_watch_ct: storage::RowRef, } impl Bayou { @@ -69,6 +65,7 @@ impl Bayou { let k2v_client = creds.row_client()?; let s3_client = creds.blob_client()?; + let target = k2v_client.row(&path, WATCH_SK); let watch = K2vWatch::new(creds, path.clone(), WATCH_SK.to_string())?; Ok(Self { @@ -81,7 +78,7 @@ impl Bayou { last_sync: None, last_try_checkpoint: None, watch, - last_sync_watch_ct: None, + last_sync_watch_ct: target, }) } @@ -134,7 +131,7 @@ impl Bayou { // 3. List all operations starting from checkpoint let ts_ser = self.checkpoint.0.to_string(); debug!("(sync) looking up operations starting at {}", ts_ser); - let ops_map = self.k2v.select(storage::Selector::Range { begin: &ts_ser, end: WATCH_SK }).await?; + let ops_map = self.k2v.select(storage::Selector::Range { shard_key: &self.path, begin: &ts_ser, end: WATCH_SK }).await?; /*let ops_map = self .k2v .read_batch(&[BatchReadOp { @@ -158,18 +155,22 @@ impl Bayou { let mut ops = vec![]; for row_value in ops_map { - let ts = row_value.timestamp(); - if val.value.len() != 1 { - bail!("Invalid operation, has {} values", val.value.len()); + let row = row_value.to_ref(); + let sort_key = row.key().1; + let ts = sort_key.parse::().map_err(|_| anyhow!("Invalid operation timestamp: {}", sort_key))?; + + let val = row_value.content(); + if val.len() != 1 { + bail!("Invalid operation, has {} values", row_value.content().len()); } - match &val.value[0] { - K2vValue::Value(v) => { + match &val[0] { + storage::Alternative::Value(v) => { let op = open_deserialize::(v, &self.key)?; - debug!("(sync) operation {}: {} {:?}", tsstr, base64::encode(v), op); + debug!("(sync) operation {}: {} {:?}", sort_key, base64::encode(v), op); ops.push((ts, op)); } - K2vValue::Tombstone => { - unreachable!(); + storage::Alternative::Tombstone => { + continue; } } } @@ -372,13 +373,12 @@ impl Bayou { let cryptoblob = seal_serialize(&state_cp, &self.key)?; debug!("(cp) checkpoint body length: {}", cryptoblob.len()); - let por = PutObjectRequest { - bucket: self.bucket.clone(), - key: format!("{}/checkpoint/{}", self.path, ts_cp.to_string()), - body: Some(cryptoblob.into()), - ..Default::default() - }; - self.s3.put_object(por).await?; + self.s3 + .blob(format!("{}/checkpoint/{}", self.path, ts_cp.to_string()).as_str()) + .set_value(cryptoblob.into()) + .push() + .await?; + // Drop old checkpoints (but keep at least CHECKPOINTS_TO_KEEP of them) let ecp_len = existing_checkpoints.len(); @@ -388,25 +388,22 @@ impl Bayou { // Delete blobs for (_ts, key) in existing_checkpoints[..last_to_keep].iter() { debug!("(cp) drop old checkpoint {}", key); - let dor = DeleteObjectRequest { - bucket: self.bucket.clone(), - key: key.to_string(), - ..Default::default() - }; - self.s3.delete_object(dor).await?; + self.s3 + .blob(key) + .rm() + .await?; } // Delete corresponding range of operations let ts_ser = existing_checkpoints[last_to_keep].0.to_string(); self.k2v - .delete_batch(&[BatchDeleteOp { - partition_key: &self.path, - prefix: None, - start: None, - end: Some(&ts_ser), - single_item: false, - }]) + .rm(storage::Selector::Range{ + shard_key: &self.path, + begin: "", + end: &ts_ser + }) .await?; + } Ok(()) @@ -425,22 +422,14 @@ impl Bayou { async fn list_checkpoints(&self) -> Result> { let prefix = format!("{}/checkpoint/", self.path); - let lor = ListObjectsV2Request { - bucket: self.bucket.clone(), - max_keys: Some(1000), - prefix: Some(prefix.clone()), - ..Default::default() - }; - - let checkpoints_res = self.s3.list_objects_v2(lor).await?; + let checkpoints_res = self.s3.list(&prefix).await?; let mut checkpoints = vec![]; - for object in checkpoints_res.contents.unwrap_or_default() { - if let Some(key) = object.key { - if let Some(ckid) = key.strip_prefix(&prefix) { - if let Ok(ts) = ckid.parse::() { - checkpoints.push((ts, key)); - } + for object in checkpoints_res { + let key = object.key(); + if let Some(ckid) = key.strip_prefix(&prefix) { + if let Ok(ts) = ckid.parse::() { + checkpoints.push((ts, key.into())); } } } @@ -454,23 +443,25 @@ impl Bayou { struct K2vWatch { pk: String, sk: String, - rx: watch::Receiver>, + rx: watch::Receiver, notify: Notify, } impl K2vWatch { /// Creates a new watch and launches subordinate threads. /// These threads hold Weak pointers to the struct; - /// the exit when the Arc is dropped. + /// they exit when the Arc is dropped. fn new(creds: &Credentials, pk: String, sk: String) -> Result> { - let (tx, rx) = watch::channel::>(None); + let row_client = creds.row_client()?; + + let (tx, rx) = watch::channel::(row_client.row(&pk, &sk)); let notify = Notify::new(); let watch = Arc::new(K2vWatch { pk, sk, rx, notify }); tokio::spawn(Self::background_task( Arc::downgrade(&watch), - creds.k2v_client()?, + row_client, tx, )); @@ -479,41 +470,42 @@ impl K2vWatch { async fn background_task( self_weak: Weak, - k2v: K2vClient, - tx: watch::Sender>, + k2v: storage::RowStore, + tx: watch::Sender, ) { - let mut ct = None; + let mut row = match Weak::upgrade(&self_weak) { + Some(this) => k2v.row(&this.pk, &this.sk), + None => { + error!("can't start loop"); + return + }, + }; + while let Some(this) = Weak::upgrade(&self_weak) { debug!( - "bayou k2v watch bg loop iter ({}, {}): ct = {:?}", - this.pk, this.sk, ct + "bayou k2v watch bg loop iter ({}, {})", + this.pk, this.sk ); tokio::select!( _ = tokio::time::sleep(Duration::from_secs(60)) => continue, - update = k2v_wait_value_changed(&k2v, &this.pk, &this.sk, &ct) => { + update = row.poll() => { + //update = k2v_wait_value_changed(&k2v, &this.pk, &this.sk, &ct) => { match update { Err(e) => { error!("Error in bayou k2v wait value changed: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } - Ok(cv) => { - if tx.send(Some(cv.causality.clone())).is_err() { + Ok(new_value) => { + row = new_value.to_ref(); + if tx.send(XXX).is_err() { break; } - ct = Some(cv.causality); } } } _ = this.notify.notified() => { let rand = u128::to_be_bytes(thread_rng().gen()).to_vec(); - if let Err(e) = k2v - .insert_item( - &this.pk, - &this.sk, - rand, - ct.clone(), - ) - .await + if let Err(e) = row.set_value(rand).push().await { error!("Error in bayou k2v watch updater loop: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index c3a9390..9899ae8 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -81,7 +81,7 @@ async fn incoming_mail_watch_process_internal( tokio::select! { inc_k = wait_new_mail => Some(inc_k), - _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key.clone()), + _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key), _ = lock_held.changed() => None, _ = rx_inbox_id.changed() => None, } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 0abeb4d..aef9a0d 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -28,11 +28,18 @@ impl IRowStore for GrgStore { fn select(&self, selector: Selector) -> AsyncResult> { unimplemented!(); } + + fn rm(&self, selector: Selector) -> AsyncResult<()> { + unimplemented!(); + } } impl IRowRef for GrgRef { - fn clone_boxed(&self) -> RowRef { + /*fn clone_boxed(&self) -> RowRef { unimplemented!(); + }*/ + fn to_orphan(&self) -> RowRefOrphan { + unimplemented!() } fn key(&self) -> (&str, &str) { diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 8db4eff..a4436e6 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -29,6 +29,10 @@ impl IRowStore for MemStore { fn select(&self, selector: Selector) -> AsyncResult> { unimplemented!(); } + + fn rm(&self, selector: Selector) -> AsyncResult<()> { + unimplemented!(); + } } impl IRowRef for MemRef { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c3bf19f..2e3c0ee 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -21,9 +21,9 @@ pub enum Alternative { type ConcurrentValues = Vec; pub enum Selector<'a> { - Range { begin: &'a str, end: &'a str }, - List (Vec<(&'a str, &'a str)>), - Prefix (&'a str), + Range { shard_key: &'a str, begin: &'a str, end: &'a str }, + List (Vec<(&'a str, &'a str)>), // list of (shard_key, sort_key) + Prefix { shard_key: &'a str, prefix: &'a str }, } #[derive(Debug)] @@ -80,12 +80,14 @@ pub trait IRowStore { fn row(&self, partition: &str, sort: &str) -> RowRef; fn select(&self, selector: Selector) -> AsyncResult>; + fn rm(&self, selector: Selector) -> AsyncResult<()>; } pub type RowStore = Box; pub trait IRowRef { - fn clone_boxed(&self) -> RowRef; + /*fn clone_boxed(&self) -> RowRef;*/ + fn to_orphan(&self) -> RowRefOrphan; fn key(&self) -> (&str, &str); fn set_value(&self, content: Vec) -> RowValue; fn fetch(&self) -> AsyncResult; @@ -93,11 +95,17 @@ pub trait IRowRef fn poll(&self) -> AsyncResult; } pub type RowRef = Box; -impl Clone for RowRef { +/*impl Clone for RowRef { fn clone(&self) -> Self { return self.clone_boxed() } +}*/ + +pub trait IRowRefOrphan +{ + fn attach(&self, store: &RowStore) -> RowRef; } +pub type RowRefOrphan = Box; pub trait IRowValue { -- cgit v1.2.3 From 7eb690e49dd995663e8ea35b1a1f5b14584b4509 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 10:46:13 +0100 Subject: introduce an "orphan" enum --- src/bayou.rs | 8 ++++---- src/mail/user.rs | 41 +++++++++++++++++++++++++++++++++-------- src/storage/garage.rs | 9 ++++++++- src/storage/in_memory.rs | 15 +++++++++++++-- src/storage/mod.rs | 14 ++++++++------ 5 files changed, 66 insertions(+), 21 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index f95bd82..d3027c5 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -443,7 +443,7 @@ impl Bayou { struct K2vWatch { pk: String, sk: String, - rx: watch::Receiver, + rx: watch::Receiver, notify: Notify, } @@ -454,7 +454,7 @@ impl K2vWatch { fn new(creds: &Credentials, pk: String, sk: String) -> Result> { let row_client = creds.row_client()?; - let (tx, rx) = watch::channel::(row_client.row(&pk, &sk)); + let (tx, rx) = watch::channel::(row_client.row(&pk, &sk).to_orphan()); let notify = Notify::new(); let watch = Arc::new(K2vWatch { pk, sk, rx, notify }); @@ -471,7 +471,7 @@ impl K2vWatch { async fn background_task( self_weak: Weak, k2v: storage::RowStore, - tx: watch::Sender, + tx: watch::Sender, ) { let mut row = match Weak::upgrade(&self_weak) { Some(this) => k2v.row(&this.pk, &this.sk), @@ -497,7 +497,7 @@ impl K2vWatch { } Ok(new_value) => { row = new_value.to_ref(); - if tx.send(XXX).is_err() { + if tx.send(row.to_orphan()).is_err() { break; } } diff --git a/src/mail/user.rs b/src/mail/user.rs index 6d3bc1a..7011dcc 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -81,7 +81,11 @@ impl User { let mb_uidvalidity = mb.current_uid_index().await.uidvalidity; if mb_uidvalidity > uidvalidity { list.update_uidvalidity(name, mb_uidvalidity); - self.save_mailbox_list(&list, ct).await?; + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(&list, orphan).await?; } Ok(Some(mb)) } else { @@ -104,7 +108,11 @@ impl User { let (mut list, ct) = self.load_mailbox_list().await?; match list.create_mailbox(name) { CreatedMailbox::Created(_, _) => { - self.save_mailbox_list(&list, ct).await?; + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(&list, orphan).await?; Ok(()) } CreatedMailbox::Existed(_, _) => Err(anyhow!("Mailbox {} already exists", name)), @@ -121,7 +129,11 @@ impl User { if list.has_mailbox(name) { // TODO: actually delete mailbox contents list.set_mailbox(name, None); - self.save_mailbox_list(&list, ct).await?; + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(&list, orphan).await?; Ok(()) } else { bail!("Mailbox {} does not exist", name); @@ -142,7 +154,11 @@ impl User { if old_name == INBOX { list.rename_mailbox(old_name, new_name)?; if !self.ensure_inbox_exists(&mut list, &ct).await? { - self.save_mailbox_list(&list, ct).await?; + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(&list, orphan).await?; } } else { let names = list.existing_mailbox_names(); @@ -165,7 +181,12 @@ impl User { list.rename_mailbox(name, &nnew)?; } } - self.save_mailbox_list(&list, ct).await?; + + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(&list, orphan).await?; } Ok(()) } @@ -257,7 +278,11 @@ impl User { let saved; let (inbox_id, inbox_uidvalidity) = match list.create_mailbox(INBOX) { CreatedMailbox::Created(i, v) => { - self.save_mailbox_list(list, ct.clone()).await?; + let orphan = match ct { + Some(x) => Some(x.to_orphan()), + None => None, + }; + self.save_mailbox_list(list, orphan).await?; saved = true; (i, v) } @@ -277,11 +302,11 @@ impl User { async fn save_mailbox_list( &self, list: &MailboxList, - ct: Option, + ct: Option, ) -> Result<()> { let list_blob = seal_serialize(list, &self.creds.keys.master)?; let rref = match ct { - Some(x) => x, + Some(x) => self.k2v.from_orphan(x), None => self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK), }; rref.set_value(list_blob).push().await?; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index aef9a0d..d6ac7ac 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -6,6 +6,9 @@ pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} +#[derive(Clone, Debug)] +pub struct GrgOrphanRowRef {} + impl IBuilders for GrgCreds { fn row_store(&self) -> Result { unimplemented!(); @@ -32,13 +35,17 @@ impl IRowStore for GrgStore { fn rm(&self, selector: Selector) -> AsyncResult<()> { unimplemented!(); } + + fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef { + unimplemented!(); + } } impl IRowRef for GrgRef { /*fn clone_boxed(&self) -> RowRef { unimplemented!(); }*/ - fn to_orphan(&self) -> RowRefOrphan { + fn to_orphan(&self) -> OrphanRowRef { unimplemented!() } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index a4436e6..0bdf9b1 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -7,6 +7,9 @@ pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} +#[derive(Clone, Debug)] +pub struct MemOrphanRowRef {} + impl IBuilders for FullMem { fn row_store(&self) -> Result { unimplemented!(); @@ -33,16 +36,24 @@ impl IRowStore for MemStore { fn rm(&self, selector: Selector) -> AsyncResult<()> { unimplemented!(); } + + fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef { + unimplemented!(); + } } impl IRowRef for MemRef { + fn to_orphan(&self) -> OrphanRowRef { + unimplemented!() + } + fn key(&self) -> (&str, &str) { unimplemented!(); } - fn clone_boxed(&self) -> RowRef { + /*fn clone_boxed(&self) -> RowRef { unimplemented!(); - } + }*/ fn set_value(&self, content: Vec) -> RowValue { unimplemented!(); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 2e3c0ee..c9a49c5 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -20,6 +20,12 @@ pub enum Alternative { } type ConcurrentValues = Vec; +#[derive(Clone, Debug)] +pub enum OrphanRowRef { + Garage(garage::GrgOrphanRowRef), + Memory(in_memory::MemOrphanRowRef), +} + pub enum Selector<'a> { Range { shard_key: &'a str, begin: &'a str, end: &'a str }, List (Vec<(&'a str, &'a str)>), // list of (shard_key, sort_key) @@ -81,13 +87,14 @@ pub trait IRowStore fn row(&self, partition: &str, sort: &str) -> RowRef; fn select(&self, selector: Selector) -> AsyncResult>; fn rm(&self, selector: Selector) -> AsyncResult<()>; + fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef; } pub type RowStore = Box; pub trait IRowRef { /*fn clone_boxed(&self) -> RowRef;*/ - fn to_orphan(&self) -> RowRefOrphan; + fn to_orphan(&self) -> OrphanRowRef; fn key(&self) -> (&str, &str); fn set_value(&self, content: Vec) -> RowValue; fn fetch(&self) -> AsyncResult; @@ -101,11 +108,6 @@ pub type RowRef = Box; } }*/ -pub trait IRowRefOrphan -{ - fn attach(&self, store: &RowStore) -> RowRef; -} -pub type RowRefOrphan = Box; pub trait IRowValue { -- cgit v1.2.3 From 4a33ac2265dae0e8fd1f7fbaec54ab7120334cbe Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 12:15:44 +0100 Subject: incoming has been fully ported --- src/login/mod.rs | 2 +- src/mail/incoming.rs | 74 +++++++++++++++++++++--------------------------- src/storage/garage.rs | 6 ++++ src/storage/in_memory.rs | 6 ++++ src/storage/mod.rs | 3 +- 5 files changed, 47 insertions(+), 44 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index f4bf4d2..a150829 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -47,7 +47,7 @@ pub struct Credentials { #[derive(Clone, Debug)] pub struct PublicCredentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: StorageCredentials, + pub storage: Builders, pub public_key: PublicKey, } diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 9899ae8..db22f3e 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +//use std::collections::HashMap; use std::convert::TryFrom; use std::sync::{Arc, Weak}; @@ -6,11 +6,7 @@ use std::time::Duration; use anyhow::{anyhow, bail, Result}; use futures::{future::BoxFuture, FutureExt}; -use k2v_client::{CausalityToken, K2vClient, K2vValue}; -use rusoto_s3::{ - DeleteObjectRequest, GetObjectRequest, ListObjectsV2Request, PutObjectRequest, S3Client, S3, -}; -use tokio::io::AsyncReadExt; +//use tokio::io::AsyncReadExt; use tokio::sync::watch; use tracing::{error, info, warn}; @@ -81,7 +77,7 @@ async fn incoming_mail_watch_process_internal( tokio::select! { inc_k = wait_new_mail => Some(inc_k), - _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key), + _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(k2v.from_orphan(incoming_key.to_orphan())), _ = lock_held.changed() => None, _ = rx_inbox_id.changed() => None, } @@ -220,7 +216,7 @@ fn k2v_lock_loop(k2v: storage::RowStore, pk: &'static str, sk: &'static str) -> enum LockState { Unknown, Empty, - Held(UniqueIdent, u64, CausalityToken), + Held(UniqueIdent, u64, storage::OrphanRowRef), } async fn k2v_lock_loop_internal( @@ -236,10 +232,10 @@ async fn k2v_lock_loop_internal( // Loop 1: watch state of lock in K2V, save that in corresponding watch channel let watch_lock_loop: BoxFuture> = async { - let mut ct = None; + let mut ct = k2v.row(pk, sk); loop { info!("k2v watch lock loop iter: ct = {:?}", ct); - match k2v_wait_value_changed(&k2v, pk, sk, &ct).await { + match ct.poll().await { Err(e) => { error!( "Error in k2v wait value changed: {} ; assuming we no longer hold lock.", @@ -250,8 +246,8 @@ async fn k2v_lock_loop_internal( } Ok(cv) => { let mut lock_state = None; - for v in cv.value.iter() { - if let K2vValue::Value(vbytes) = v { + for v in cv.content().iter() { + if let storage::Alternative::Value(vbytes) = v { if vbytes.len() == 32 { let ts = u64::from_be_bytes(vbytes[..8].try_into().unwrap()); let pid = UniqueIdent(vbytes[8..].try_into().unwrap()); @@ -264,16 +260,18 @@ async fn k2v_lock_loop_internal( } } } + let new_ct = cv.to_ref(); + info!( "k2v watch lock loop: changed, old ct = {:?}, new ct = {:?}, v = {:?}", - ct, cv.causality, lock_state + ct, new_ct, lock_state ); state_tx.send( lock_state - .map(|(pid, ts)| LockState::Held(pid, ts, cv.causality.clone())) + .map(|(pid, ts)| LockState::Held(pid, ts, new_ct.to_orphan())) .unwrap_or(LockState::Empty), )?; - ct = Some(cv.causality); + ct = new_ct; } } } @@ -359,7 +357,11 @@ async fn k2v_lock_loop_internal( now_msec() + LOCK_DURATION.as_millis() as u64, )); lock[8..].copy_from_slice(&our_pid.0); - if let Err(e) = k2v.insert_item(pk, sk, lock, ct).await { + let row = match ct { + Some(orphan) => k2v.from_orphan(orphan), + None => k2v.row(pk, sk), + }; + if let Err(e) = row.set_value(lock).push().await { error!("Could not take lock: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } @@ -385,7 +387,8 @@ async fn k2v_lock_loop_internal( _ => None, }; if let Some(ct) = release { - let _ = k2v.delete_item(pk, sk, ct.clone()).await; + let row = k2v.from_orphan(ct); + let _ = row.rm().await; } } @@ -407,13 +410,14 @@ impl EncryptedMessage { } pub async fn deliver_to(self: Arc, creds: PublicCredentials) -> Result<()> { - let s3_client = creds.storage.s3_client()?; - let k2v_client = creds.storage.k2v_client()?; + let s3_client = creds.storage.blob_store()?; + let k2v_client = creds.storage.row_store()?; // Get causality token of previous watch key - let watch_ct = match k2v_client.read_item(INCOMING_PK, INCOMING_WATCH_SK).await { - Err(_) => None, - Ok(cv) => Some(cv.causality), + let query = k2v_client.row(INCOMING_PK, INCOMING_WATCH_SK); + let watch_ct = match query.fetch().await { + Err(_) => query, + Ok(cv) => cv.to_ref(), }; // Write mail to encrypted storage @@ -421,28 +425,14 @@ impl EncryptedMessage { sodiumoxide::crypto::sealedbox::seal(self.key.as_ref(), &creds.public_key); let key_header = base64::encode(&encrypted_key); - let por = PutObjectRequest { - bucket: creds.storage.bucket.clone(), - key: format!("incoming/{}", gen_ident()), - metadata: Some( - [(MESSAGE_KEY.to_string(), key_header)] - .into_iter() - .collect::>(), - ), - body: Some(self.encrypted_body.clone().into()), - ..Default::default() - }; - s3_client.put_object(por).await?; + let mut send = s3_client + .blob(&format!("incoming/{}", gen_ident())) + .set_value(self.encrypted_body.clone().into()); + send.set_meta(MESSAGE_KEY, &key_header); + send.push().await?; // Update watch key to signal new mail - k2v_client - .insert_item( - INCOMING_PK, - INCOMING_WATCH_SK, - gen_ident().0.to_vec(), - watch_ct, - ) - .await?; + watch_ct.set_value(gen_ident().0.to_vec()).push().await?; Ok(()) } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index d6ac7ac..f458aeb 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -67,6 +67,12 @@ impl IRowRef for GrgRef { } } +impl std::fmt::Debug for GrgRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unimplemented!(); + } +} + impl IRowValue for GrgValue { fn to_ref(&self) -> RowRef { unimplemented!(); diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 0bdf9b1..20f96a4 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -72,6 +72,12 @@ impl IRowRef for MemRef { } } +impl std::fmt::Debug for MemRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unimplemented!(); + } +} + impl IRowValue for MemValue { fn to_ref(&self) -> RowRef { unimplemented!(); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c9a49c5..08ccfec 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -91,7 +91,7 @@ pub trait IRowStore } pub type RowStore = Box; -pub trait IRowRef +pub trait IRowRef: std::fmt::Debug { /*fn clone_boxed(&self) -> RowRef;*/ fn to_orphan(&self) -> OrphanRowRef; @@ -138,6 +138,7 @@ pub type BlobRef = Box; pub trait IBlobValue { fn to_ref(&self) -> BlobRef; fn get_meta(&self, key: &str) -> Option<&[u8]>; + fn set_meta(&mut self, key: &str, val: &str); fn content(&self) -> Option<&[u8]>; fn push(&self) -> AsyncResult<()>; } -- cgit v1.2.3 From e92dc35564e91ce4e6a8defa9e8b52eef9e55fae Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 15:02:43 +0100 Subject: fix orphan storage compatibility --- src/mail/incoming.rs | 6 +++--- src/mail/user.rs | 2 +- src/storage/garage.rs | 2 +- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 3 ++- 5 files changed, 8 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index db22f3e..da4c819 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -77,7 +77,7 @@ async fn incoming_mail_watch_process_internal( tokio::select! { inc_k = wait_new_mail => Some(inc_k), - _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(k2v.from_orphan(incoming_key.to_orphan())), + _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(k2v.from_orphan(incoming_key.to_orphan()).expect("Incompatible source & target storage")), _ = lock_held.changed() => None, _ = rx_inbox_id.changed() => None, } @@ -358,7 +358,7 @@ async fn k2v_lock_loop_internal( )); lock[8..].copy_from_slice(&our_pid.0); let row = match ct { - Some(orphan) => k2v.from_orphan(orphan), + Some(orphan) => k2v.from_orphan(orphan).expect("Source & target must be storage compatible"), None => k2v.row(pk, sk), }; if let Err(e) = row.set_value(lock).push().await { @@ -387,7 +387,7 @@ async fn k2v_lock_loop_internal( _ => None, }; if let Some(ct) = release { - let row = k2v.from_orphan(ct); + let row = k2v.from_orphan(ct).expect("Incompatible source & target storage"); let _ = row.rm().await; } } diff --git a/src/mail/user.rs b/src/mail/user.rs index 7011dcc..7a3e5c7 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -306,7 +306,7 @@ impl User { ) -> Result<()> { let list_blob = seal_serialize(list, &self.creds.keys.master)?; let rref = match ct { - Some(x) => self.k2v.from_orphan(x), + Some(x) => self.k2v.from_orphan(x).expect("Source & target must be same storage"), None => self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK), }; rref.set_value(list_blob).push().await?; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index f458aeb..00962f2 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -36,7 +36,7 @@ impl IRowStore for GrgStore { unimplemented!(); } - fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef { + fn from_orphan(&self, orphan: OrphanRowRef) -> Result { unimplemented!(); } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 20f96a4..a29b790 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -37,7 +37,7 @@ impl IRowStore for MemStore { unimplemented!(); } - fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef { + fn from_orphan(&self, orphan: OrphanRowRef) -> Result { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 08ccfec..86d7fa2 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -36,6 +36,7 @@ pub enum Selector<'a> { pub enum StorageError { NotFound, Internal, + IncompatibleOrphan, } impl std::fmt::Display for StorageError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -87,7 +88,7 @@ pub trait IRowStore fn row(&self, partition: &str, sort: &str) -> RowRef; fn select(&self, selector: Selector) -> AsyncResult>; fn rm(&self, selector: Selector) -> AsyncResult<()>; - fn from_orphan(&self, orphan: OrphanRowRef) -> RowRef; + fn from_orphan(&self, orphan: OrphanRowRef) -> Result; } pub type RowStore = Box; -- cgit v1.2.3 From 89cb8d9572d623baa858cc1c6a8472053728eafc Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 15:23:05 +0100 Subject: no more error on baiyou --- src/bayou.rs | 15 +++++---------- src/storage/garage.rs | 2 +- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 2 +- 4 files changed, 8 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index d3027c5..72c2b80 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -1,4 +1,3 @@ -use std::str::FromStr; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; @@ -6,7 +5,6 @@ use anyhow::{anyhow, bail, Result}; use log::{debug, error, info}; use rand::prelude::*; use serde::{Deserialize, Serialize}; -use tokio::io::AsyncReadExt; use tokio::sync::{watch, Notify}; use crate::cryptoblob::*; @@ -233,7 +231,7 @@ impl Bayou { // Save info that sync has been done self.last_sync = new_last_sync; - self.last_sync_watch_ct = new_last_sync_watch_ct; + self.last_sync_watch_ct = self.k2v.from_orphan(new_last_sync_watch_ct).expect("Source & target storage must be compatible"); Ok(()) } @@ -245,7 +243,7 @@ impl Bayou { Some(t) => Instant::now() > t + (CHECKPOINT_INTERVAL / 5), _ => true, }; - let changed = self.last_sync_watch_ct != *self.watch.rx.borrow(); + let changed = self.last_sync_watch_ct.to_orphan() != *self.watch.rx.borrow(); if too_old || changed { self.sync().await?; } @@ -266,12 +264,9 @@ impl Bayou { .unwrap_or(&self.checkpoint.0), ); self.k2v - .insert_item( - &self.path, - &ts.to_string(), - seal_serialize(&op, &self.key)?, - None, - ) + .row(&self.path, &ts.to_string()) + .set_value(seal_serialize(&op, &self.key)?) + .push() .await?; self.watch.notify.notify_one(); diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 00962f2..ad33769 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -6,7 +6,7 @@ pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct GrgOrphanRowRef {} impl IBuilders for GrgCreds { diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index a29b790..a2ad04f 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -7,7 +7,7 @@ pub struct MemStore {} pub struct MemRef {} pub struct MemValue {} -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct MemOrphanRowRef {} impl IBuilders for FullMem { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 86d7fa2..c948a08 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -20,7 +20,7 @@ pub enum Alternative { } type ConcurrentValues = Vec; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub enum OrphanRowRef { Garage(garage::GrgOrphanRowRef), Memory(in_memory::MemOrphanRowRef), -- cgit v1.2.3 From 16b38f3197167c344bb522dcfa83292ddb3c1026 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 16:42:25 +0100 Subject: integrate storage choice in config --- src/config.rs | 57 ++++++++++++++++++++++++++++++-------------- src/login/static_provider.rs | 1 + 2 files changed, 40 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 074c192..2a55036 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,10 +8,6 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Config { - pub s3_endpoint: String, - pub k2v_endpoint: String, - pub aws_region: String, - pub login_static: Option, pub login_ldap: Option, @@ -19,21 +15,30 @@ pub struct Config { pub imap: Option, } +pub type LoginStaticConfig = HashMap; + #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct LoginStaticConfig { - pub default_bucket: Option, - pub users: HashMap, +pub enum StaticStorage { + Garage(StaticGarageConfig), + InMemory, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct LoginStaticUser { - #[serde(default)] - pub email_addresses: Vec, - pub password: String, +pub struct StaticGarageConfig { + pub s3_endpoint: String, + pub k2v_endpoint: String, + pub aws_region: String, pub aws_access_key_id: String, pub aws_secret_access_key: String, pub bucket: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LoginStaticUser { + #[serde(default)] + pub email_addresses: Vec, + pub password: String, pub user_secret: String, #[serde(default)] @@ -43,27 +48,43 @@ pub struct LoginStaticUser { pub secret_key: Option, } +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum LdapStorage { + Garage(LdapGarageConfig), + InMemory, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LdapGarageConfig { + pub s3_endpoint: String, + pub k2v_endpoint: String, + pub aws_region: String, + + pub aws_access_key_id_attr: String, + pub aws_secret_access_key_attr: String, + pub bucket_attr: Option, + pub default_bucket: Option, +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct LoginLdapConfig { + // LDAP connection info pub ldap_server: String, - #[serde(default)] pub pre_bind_on_login: bool, pub bind_dn: Option, pub bind_password: Option, - pub search_base: String, + + // Schema-like info required for Aerogramme's logic pub username_attr: String, #[serde(default = "default_mail_attr")] pub mail_attr: String, - - pub aws_access_key_id_attr: String, - pub aws_secret_access_key_attr: String, pub user_secret_attr: String, pub alternate_user_secrets_attr: Option, - pub bucket: Option, - pub bucket_attr: Option, + // Storage related thing + pub storage: LdapStorage, } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index b9be5a6..378a863 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -7,6 +7,7 @@ use async_trait::async_trait; use crate::config::*; use crate::cryptoblob::{Key, SecretKey}; use crate::login::*; +use crate::storage; pub struct StaticLoginProvider { default_bucket: Option, -- cgit v1.2.3 From 36f4050a40c3ba7b9637a973063b6b5549a2c208 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 17 Nov 2023 18:46:22 +0100 Subject: WIP provider config --- src/config.rs | 2 ++ src/login/static_provider.rs | 25 ++++++++++++------------- src/server.rs | 13 ++----------- src/storage/in_memory.rs | 2 +- 4 files changed, 17 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 2a55036..477968e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -46,6 +46,8 @@ pub struct LoginStaticUser { pub master_key: Option, pub secret_key: Option, + + pub storage: StaticStorage, } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 378a863..df1432f 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -10,18 +10,13 @@ use crate::login::*; use crate::storage; pub struct StaticLoginProvider { - default_bucket: Option, users: HashMap>, users_by_email: HashMap>, - - k2v_region: Region, - s3_region: Region, } impl StaticLoginProvider { - pub fn new(config: LoginStaticConfig, k2v_region: Region, s3_region: Region) -> Result { + pub fn new(config: LoginStaticConfig) -> Result { let users = config - .users .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect::>(); @@ -36,11 +31,8 @@ impl StaticLoginProvider { } Ok(Self { - default_bucket: config.default_bucket, users, users_by_email, - k2v_region, - s3_region, }) } } @@ -59,23 +51,30 @@ impl LoginProvider for StaticLoginProvider { bail!("Wrong password"); } + /* tracing::debug!(user=%username, "fetch bucket"); let bucket = user .bucket .clone() .or_else(|| self.default_bucket.clone()) .ok_or(anyhow!( - "No bucket configured and no default bucket specieid" - ))?; + "No bucket configured and no default bucket specified" + ))?;*/ tracing::debug!(user=%username, "fetch keys"); - let storage = StorageCredentials { + let storage: storage::Builders = match user.storage { + StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), + StaticStorage::Garage(c) => Box::new(storage::garage::GrgCreds {}), + }; + + /* + StorageCredentials { k2v_region: self.k2v_region.clone(), s3_region: self.s3_region.clone(), aws_access_key_id: user.aws_access_key_id.clone(), aws_secret_access_key: user.aws_secret_access_key.clone(), bucket, - }; + };*/ let keys = match (&user.master_key, &user.secret_key) { (Some(m), Some(s)) => { diff --git a/src/server.rs b/src/server.rs index f0eb35f..3485a61 100644 --- a/src/server.rs +++ b/src/server.rs @@ -61,18 +61,9 @@ impl Server { } fn build(config: Config) -> Result<(ArcLoginProvider, Option, Option)> { - let s3_region = Region { - name: config.aws_region.clone(), - endpoint: config.s3_endpoint, - }; - let k2v_region = Region { - name: config.aws_region, - endpoint: config.k2v_endpoint, - }; - let lp: ArcLoginProvider = match (config.login_static, config.login_ldap) { - (Some(st), None) => Arc::new(StaticLoginProvider::new(st, k2v_region, s3_region)?), - (None, Some(ld)) => Arc::new(LdapLoginProvider::new(ld, k2v_region, s3_region)?), + (Some(st), None) => Arc::new(StaticLoginProvider::new(st)?), + (None, Some(ld)) => Arc::new(LdapLoginProvider::new(ld)?), (Some(_), Some(_)) => { bail!("A single login provider must be set up in config file") } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index a2ad04f..cde2335 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -30,7 +30,7 @@ impl IRowStore for MemStore { } fn select(&self, selector: Selector) -> AsyncResult> { - unimplemented!(); + unimplemented!() } fn rm(&self, selector: Selector) -> AsyncResult<()> { -- cgit v1.2.3 From bd6c3464e609dc76e119457ea583af0f08eeabb4 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 21 Nov 2023 09:04:54 +0100 Subject: remove old storagecredentials --- src/login/mod.rs | 68 -------------------------------------------- src/login/static_provider.rs | 5 ++++ 2 files changed, 5 insertions(+), 68 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index a150829..52d9829 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -51,17 +51,6 @@ pub struct PublicCredentials { pub public_key: PublicKey, } -/// The struct StorageCredentials contains access key to an S3 and K2V bucket -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct StorageCredentials { - pub s3_region: Region, - pub k2v_region: Region, - - pub aws_access_key_id: String, - pub aws_secret_access_key: String, - pub bucket: String, -} - /// The struct UserSecrets represents intermediary secrets that are mixed in with the user's /// password when decrypting the cryptographic keys that are stored in their bucket. /// These secrets should be stored somewhere else (e.g. in the LDAP server or in the @@ -87,24 +76,6 @@ pub struct CryptoKeys { pub public: PublicKey, } -/// A custom S3 region, composed of a region name and endpoint. -/// We use this instead of rusoto_signature::Region so that we can -/// derive Hash and Eq -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct Region { - pub name: String, - pub endpoint: String, -} - -impl Region { - pub fn as_rusoto_region(&self) -> rusoto_signature::Region { - rusoto_signature::Region::Custom { - name: self.name.clone(), - endpoint: self.endpoint.clone(), - } - } -} - // ---- @@ -117,45 +88,6 @@ impl Credentials { } } -impl StorageCredentials { - pub fn k2v_client(&self) -> Result { - let aws_creds = AwsCredentials::new( - self.aws_access_key_id.clone(), - self.aws_secret_access_key.clone(), - None, - None, - ); - - Ok(K2vClient::new( - self.k2v_region.as_rusoto_region(), - self.bucket.clone(), - aws_creds, - None, - )?) - } - - pub fn s3_client(&self) -> Result { - let aws_creds_provider = StaticProvider::new_minimal( - self.aws_access_key_id.clone(), - self.aws_secret_access_key.clone(), - ); - - let connector = hyper_rustls::HttpsConnectorBuilder::new() - .with_native_roots() - .https_or_http() - .enable_http1() - .enable_http2() - .build(); - let client = HttpClient::from_connector(connector); - - Ok(S3Client::new_with( - client, - aws_creds_provider, - self.s3_region.as_rusoto_region(), - )) - } -} - impl CryptoKeys { pub async fn init( storage: &StorageCredentials, diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index df1432f..d013c6f 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -106,6 +106,7 @@ impl LoginProvider for StaticLoginProvider { Some(u) => u, }; + /* let bucket = user .bucket .clone() @@ -120,6 +121,10 @@ impl LoginProvider for StaticLoginProvider { aws_access_key_id: user.aws_access_key_id.clone(), aws_secret_access_key: user.aws_secret_access_key.clone(), bucket, + };*/ + let storage: storage::Builders = match user.storage { + StaticStorage::InMemory => X, + StaticStorage::Garage => Y, }; let k2v_client = storage.k2v_client()?; -- cgit v1.2.3 From 6e8b2cfc9ff1abf2b4844884d9ebd807d37bd76e Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 21 Nov 2023 09:56:31 +0100 Subject: rewrite CryptoKeys with Storage abstraction --- src/login/mod.rs | 107 ++++++++++++----------------------------------- src/storage/garage.rs | 6 ++- src/storage/in_memory.rs | 6 ++- src/storage/mod.rs | 3 +- 4 files changed, 38 insertions(+), 84 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index 52d9829..050151d 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -6,13 +6,7 @@ use std::sync::Arc; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; -use k2v_client::{ - BatchInsertOp, BatchReadOp, CausalValue, CausalityToken, Filter, K2vClient, K2vValue, -}; use rand::prelude::*; -use rusoto_core::HttpClient; -use rusoto_credential::{AwsCredentials, StaticProvider}; -use rusoto_s3::S3Client; use crate::cryptoblob::*; use crate::storage::*; @@ -90,12 +84,12 @@ impl Credentials { impl CryptoKeys { pub async fn init( - storage: &StorageCredentials, + storage: &Builders, user_secrets: &UserSecrets, password: &str, ) -> Result { // Check that salt and public don't exist already - let k2v = storage.k2v_client()?; + let k2v = storage.row_store()?; let (salt_ct, public_ct) = Self::check_uninitialized(&k2v).await?; // Generate salt for password identifiers @@ -140,12 +134,12 @@ impl CryptoKeys { } pub async fn init_without_password( - storage: &StorageCredentials, + storage: &Builders, master: &Key, secret: &SecretKey, ) -> Result { // Check that salt and public don't exist already - let k2v = storage.k2v_client()?; + let k2v = storage.row_store()?; let (salt_ct, public_ct) = Self::check_uninitialized(&k2v).await?; // Generate salt for password identifiers @@ -172,7 +166,7 @@ impl CryptoKeys { } pub async fn open( - storage: &StorageCredentials, + storage: &Builders, user_secrets: &UserSecrets, password: &str, ) -> Result { @@ -215,11 +209,11 @@ impl CryptoKeys { } pub async fn open_without_password( - storage: &StorageCredentials, + storage: &Builders, master: &Key, secret: &SecretKey, ) -> Result { - let k2v = storage.k2v_client()?; + let k2v = storage.row_store()?; let (_ident_salt, expected_public) = Self::load_salt_and_public(&k2v).await?; // Create CryptoKeys struct from given keys @@ -240,11 +234,11 @@ impl CryptoKeys { pub async fn add_password( &self, - storage: &StorageCredentials, + storage: &Builders, user_secrets: &UserSecrets, password: &str, ) -> Result<()> { - let k2v = storage.k2v_client()?; + let k2v = storage.row_store()?; let (ident_salt, _public) = Self::load_salt_and_public(&k2v).await?; // Generate short password digest (= password identity) @@ -289,11 +283,11 @@ impl CryptoKeys { } pub async fn delete_password( - storage: &StorageCredentials, + storage: &Builders, password: &str, allow_delete_all: bool, ) -> Result<()> { - let k2v = storage.k2v_client()?; + let k2v = storage.row_client()?; let (ident_salt, _public) = Self::load_salt_and_public(&k2v).await?; // Generate short password digest (= password identity) @@ -322,47 +316,32 @@ impl CryptoKeys { // ---- STORAGE UTIL ---- async fn check_uninitialized( - k2v: &K2vClient, - ) -> Result<(Option, Option)> { + k2v: &RowStore, + ) -> Result<(RowRef, RowRef)> { let params = k2v - .read_batch(&[ - k2v_read_single_key("keys", "salt", true), - k2v_read_single_key("keys", "public", true), - ]) + .select(Selector::List(vec![ + ("keys", "salt"), + ("keys", "public"), + ])) .await .context("ReadBatch for salt and public in check_uninitialized")?; + if params.len() != 2 { bail!( "Invalid response from k2v storage: {:?} (expected two items)", params ); } - if params[0].items.len() > 1 || params[1].items.len() > 1 { - bail!( - "invalid response from k2v storage: {:?} (several items in single_item read)", - params - ); - } - let salt_ct = match params[0].items.iter().next() { - None => None, - Some((_, CausalValue { causality, value })) => { - if value.iter().any(|x| matches!(x, K2vValue::Value(_))) { - bail!("key storage already initialized"); - } - Some(causality.clone()) - } - }; + let salt_ct = params[0].to_ref(); + if params[0].content().iter().any(|x| matches!(x, Alternative::Value(_))) { + bail!("key storage already initialized"); + } - let public_ct = match params[1].items.iter().next() { - None => None, - Some((_, CausalValue { causality, value })) => { - if value.iter().any(|x| matches!(x, K2vValue::Value(_))) { - bail!("key storage already initialized"); - } - Some(causality.clone()) - } - }; + let public_ct = params[1].to_ref(); + if params[1].content().iter().any(|x| matches!(x, Alternative::Value(_))) { + bail!("key storage already initialized"); + } Ok((salt_ct, public_ct)) } @@ -511,37 +490,3 @@ pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result( - partition_key: &'a str, - sort_key: &'a str, - tombstones: bool, -) -> BatchReadOp<'a> { - BatchReadOp { - partition_key, - filter: Filter { - start: Some(sort_key), - end: None, - prefix: None, - limit: None, - reverse: false, - }, - conflicts_only: false, - tombstones, - single_item: true, - } -} - -pub fn k2v_insert_single_key<'a>( - partition_key: &'a str, - sort_key: &'a str, - causality: Option, - value: impl AsRef<[u8]>, -) -> BatchInsertOp<'a> { - BatchInsertOp { - partition_key, - sort_key, - causality, - value: K2vValue::Value(value.as_ref().to_vec()), - } -} diff --git a/src/storage/garage.rs b/src/storage/garage.rs index ad33769..0a22928 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -85,7 +85,11 @@ impl IRowValue for GrgValue { } } - +impl std::fmt::Debug for GrgValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unimplemented!(); + } +} /* diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index cde2335..49169d7 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -90,4 +90,8 @@ impl IRowValue for MemValue { } } - +impl std::fmt::Debug for MemValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + unimplemented!(); + } +} diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c948a08..324e6b9 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -44,6 +44,7 @@ impl std::fmt::Display for StorageError { match self { Self::NotFound => f.write_str("Item not found"), Self::Internal => f.write_str("An internal error occured"), + Self::IncompatibleOrphan => f.write_str("Incompatible orphan"), } } } @@ -110,7 +111,7 @@ pub type RowRef = Box; }*/ -pub trait IRowValue +pub trait IRowValue: std::fmt::Debug { fn to_ref(&self) -> RowRef; fn content(&self) -> ConcurrentValues; -- cgit v1.2.3 From a7c9d554f6523c384cc0a14a789e0c8d9070e605 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 21 Nov 2023 15:09:39 +0100 Subject: fix login mod --- src/login/mod.rs | 138 ++++++++++++++++++------------------------- src/login/static_provider.rs | 19 ------ src/storage/garage.rs | 2 +- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 9 +-- 5 files changed, 62 insertions(+), 108 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index 050151d..216c340 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -1,8 +1,8 @@ pub mod ldap_provider; pub mod static_provider; -use std::collections::BTreeMap; use std::sync::Arc; +use futures::try_join; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; @@ -122,13 +122,14 @@ impl CryptoKeys { let password_blob = [&kdf_salt[..], &password_sealed].concat(); // Write values to storage - k2v.insert_batch(&[ - k2v_insert_single_key("keys", "salt", salt_ct, ident_salt), - k2v_insert_single_key("keys", "public", public_ct, keys.public), - k2v_insert_single_key("keys", &password_sortkey, None, &password_blob), - ]) - .await - .context("InsertBatch for salt, public, and password")?; + // @FIXME Implement insert batch in the storage API + let (salt, public, passwd) = ( + salt_ct.set_value(&ident_salt), + public_ct.set_value(keys.public.as_ref()), + k2v.row("keys", &password_sortkey).set_value(&password_blob) + ); + try_join!(salt.push(), public.push(), passwd.push()) + .context("InsertBatch for salt, public, and password")?; Ok(keys) } @@ -155,12 +156,13 @@ impl CryptoKeys { }; // Write values to storage - k2v.insert_batch(&[ - k2v_insert_single_key("keys", "salt", salt_ct, ident_salt), - k2v_insert_single_key("keys", "public", public_ct, keys.public), - ]) - .await - .context("InsertBatch for salt and public")?; + // @FIXME implement insert batch in the storage API + let (salt, public) = ( + salt_ct.set_value(&ident_salt), + public_ct.set_value(keys.public.as_ref()), + ); + + try_join!(salt.push(), public.push()).context("InsertBatch for salt and public")?; Ok(keys) } @@ -170,7 +172,7 @@ impl CryptoKeys { user_secrets: &UserSecrets, password: &str, ) -> Result { - let k2v = storage.k2v_client()?; + let k2v = storage.row_store()?; let (ident_salt, expected_public) = Self::load_salt_and_public(&k2v).await?; // Generate short password digest (= password identity) @@ -178,20 +180,21 @@ impl CryptoKeys { // Lookup password blob let password_sortkey = format!("password:{}", hex::encode(&ident)); + let password_ref = k2v.row("keys", &password_sortkey); let password_blob = { - let mut val = match k2v.read_item("keys", &password_sortkey).await { - Err(k2v_client::Error::NotFound) => { + let val = match password_ref.fetch().await { + Err(StorageError::NotFound) => { bail!("invalid password") } x => x?, }; - if val.value.len() != 1 { + if val.content().len() != 1 { bail!("multiple values for password in storage"); } - match val.value.pop().unwrap() { - K2vValue::Value(v) => v, - K2vValue::Tombstone => bail!("invalid password"), + match val.content().pop().unwrap() { + Alternative::Value(v) => v, + Alternative::Tombstone => bail!("invalid password"), } }; @@ -258,26 +261,24 @@ impl CryptoKeys { let password_blob = [&kdf_salt[..], &password_sealed].concat(); // List existing passwords to overwrite existing entry if necessary - let ct = match k2v.read_item("keys", &password_sortkey).await { - Err(k2v_client::Error::NotFound) => None, + let pass_key = k2v.row("keys", &password_sortkey); + let passwd = match pass_key.fetch().await { + Err(StorageError::NotFound) => pass_key, v => { let entry = v?; - if entry.value.iter().any(|x| matches!(x, K2vValue::Value(_))) { + if entry.content().iter().any(|x| matches!(x, Alternative::Value(_))) { bail!("password already exists"); } - Some(entry.causality) + entry.to_ref() } }; // Write values to storage - k2v.insert_batch(&[k2v_insert_single_key( - "keys", - &password_sortkey, - ct, - &password_blob, - )]) - .await - .context("InsertBatch for new password")?; + passwd + .set_value(&password_blob) + .push() + .await + .context("InsertBatch for new password")?; Ok(()) } @@ -287,7 +288,7 @@ impl CryptoKeys { password: &str, allow_delete_all: bool, ) -> Result<()> { - let k2v = storage.row_client()?; + let k2v = storage.row_store()?; let (ident_salt, _public) = Self::load_salt_and_public(&k2v).await?; // Generate short password digest (= password identity) @@ -299,22 +300,23 @@ impl CryptoKeys { // Check password is there let pw = existing_passwords - .get(&password_sortkey) + .iter() + .map(|x| x.to_ref()) + .find(|x| x.key().1 == &password_sortkey) + //.get(&password_sortkey) .ok_or(anyhow!("password does not exist"))?; if !allow_delete_all && existing_passwords.len() < 2 { bail!("No other password exists, not deleting last password."); } - k2v.delete_item("keys", &password_sortkey, pw.causality.clone()) - .await - .context("DeleteItem for password")?; + pw.rm().await.context("DeleteItem for password")?; Ok(()) } // ---- STORAGE UTIL ---- - + // async fn check_uninitialized( k2v: &RowStore, ) -> Result<(RowRef, RowRef)> { @@ -346,32 +348,29 @@ impl CryptoKeys { Ok((salt_ct, public_ct)) } - pub async fn load_salt_and_public(k2v: &K2vClient) -> Result<([u8; 32], PublicKey)> { - let mut params = k2v - .read_batch(&[ - k2v_read_single_key("keys", "salt", false), - k2v_read_single_key("keys", "public", false), - ]) + pub async fn load_salt_and_public(k2v: &RowStore) -> Result<([u8; 32], PublicKey)> { + let params = k2v + .select(Selector::List(vec![ + ("keys", "salt"), + ("keys", "public"), + ])) .await .context("ReadBatch for salt and public in load_salt_and_public")?; + if params.len() != 2 { bail!( "Invalid response from k2v storage: {:?} (expected two items)", params ); } - if params[0].items.len() != 1 || params[1].items.len() != 1 { + if params[0].content().len() != 1 || params[1].content().len() != 1 { bail!("cryptographic keys not initialized for user"); } // Retrieve salt from given response - let salt_vals = &mut params[0].items.iter_mut().next().unwrap().1.value; - if salt_vals.len() != 1 { - bail!("Multiple values for `salt`"); - } - let salt: Vec = match &mut salt_vals[0] { - K2vValue::Value(v) => std::mem::take(v), - K2vValue::Tombstone => bail!("salt is a tombstone"), + let salt: Vec = match &mut params[0].content().iter_mut().next().unwrap() { + Alternative::Value(v) => std::mem::take(v), + Alternative::Tombstone => bail!("salt is a tombstone"), }; if salt.len() != 32 { bail!("`salt` is not 32 bytes long"); @@ -380,40 +379,21 @@ impl CryptoKeys { salt_constlen.copy_from_slice(&salt); // Retrieve public from given response - let public_vals = &mut params[1].items.iter_mut().next().unwrap().1.value; - if public_vals.len() != 1 { - bail!("Multiple values for `public`"); - } - let public: Vec = match &mut public_vals[0] { - K2vValue::Value(v) => std::mem::take(v), - K2vValue::Tombstone => bail!("public is a tombstone"), + let public: Vec = match &mut params[1].content().iter_mut().next().unwrap() { + Alternative::Value(v) => std::mem::take(v), + Alternative::Tombstone => bail!("public is a tombstone"), }; let public = PublicKey::from_slice(&public).ok_or(anyhow!("Invalid public key length"))?; Ok((salt_constlen, public)) } - async fn list_existing_passwords(k2v: &K2vClient) -> Result> { - let mut res = k2v - .read_batch(&[BatchReadOp { - partition_key: "keys", - filter: Filter { - start: None, - end: None, - prefix: Some("password:"), - limit: None, - reverse: false, - }, - conflicts_only: false, - tombstones: false, - single_item: false, - }]) + async fn list_existing_passwords(k2v: &RowStore) -> Result> { + let res = k2v.select(Selector::Prefix { shard_key: "keys", prefix: "password:" }) .await .context("ReadBatch for prefix password: in list_existing_passwords")?; - if res.len() != 1 { - bail!("unexpected k2v result: {:?}, expected one item", res); - } - Ok(res.pop().unwrap().items) + + Ok(res) } fn serialize(&self) -> [u8; 64] { diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index d013c6f..0e86cff 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -51,31 +51,12 @@ impl LoginProvider for StaticLoginProvider { bail!("Wrong password"); } - /* - tracing::debug!(user=%username, "fetch bucket"); - let bucket = user - .bucket - .clone() - .or_else(|| self.default_bucket.clone()) - .ok_or(anyhow!( - "No bucket configured and no default bucket specified" - ))?;*/ - tracing::debug!(user=%username, "fetch keys"); let storage: storage::Builders = match user.storage { StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), StaticStorage::Garage(c) => Box::new(storage::garage::GrgCreds {}), }; - /* - StorageCredentials { - k2v_region: self.k2v_region.clone(), - s3_region: self.s3_region.clone(), - aws_access_key_id: user.aws_access_key_id.clone(), - aws_secret_access_key: user.aws_secret_access_key.clone(), - bucket, - };*/ - let keys = match (&user.master_key, &user.secret_key) { (Some(m), Some(s)) => { let master_key = diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 0a22928..d9c768f 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -53,7 +53,7 @@ impl IRowRef for GrgRef { unimplemented!(); } - fn set_value(&self, content: Vec) -> RowValue { + fn set_value(&self, content: &[u8]) -> RowValue { unimplemented!(); } fn fetch(&self) -> AsyncResult { diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 49169d7..5ba9461 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -55,7 +55,7 @@ impl IRowRef for MemRef { unimplemented!(); }*/ - fn set_value(&self, content: Vec) -> RowValue { + fn set_value(&self, content: &[u8]) -> RowValue { unimplemented!(); } fn fetch(&self) -> AsyncResult { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 324e6b9..c002278 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -95,21 +95,14 @@ pub type RowStore = Box; pub trait IRowRef: std::fmt::Debug { - /*fn clone_boxed(&self) -> RowRef;*/ fn to_orphan(&self) -> OrphanRowRef; fn key(&self) -> (&str, &str); - fn set_value(&self, content: Vec) -> RowValue; + fn set_value(&self, content: &[u8]) -> RowValue; fn fetch(&self) -> AsyncResult; fn rm(&self) -> AsyncResult<()>; fn poll(&self) -> AsyncResult; } pub type RowRef = Box; -/*impl Clone for RowRef { - fn clone(&self) -> Self { - return self.clone_boxed() - } -}*/ - pub trait IRowValue: std::fmt::Debug { -- cgit v1.2.3 From 14c7a96c282e20ff0d5343a7a378554f34983d21 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 23 Nov 2023 15:04:47 +0100 Subject: extract setup logic --- src/future_rest_admin_api.txt | 174 +++++++++++++++++++++++++++++ src/mail/user.rs | 4 +- src/main.rs | 249 +----------------------------------------- src/server.rs | 2 +- 4 files changed, 179 insertions(+), 250 deletions(-) create mode 100644 src/future_rest_admin_api.txt (limited to 'src') diff --git a/src/future_rest_admin_api.txt b/src/future_rest_admin_api.txt new file mode 100644 index 0000000..19ece27 --- /dev/null +++ b/src/future_rest_admin_api.txt @@ -0,0 +1,174 @@ + Command::FirstLogin { + creds, + user_secrets, + } => { + let creds = make_storage_creds(creds); + let user_secrets = make_user_secrets(user_secrets); + + println!("Please enter your password for key decryption."); + println!("If you are using LDAP login, this must be your LDAP password."); + println!("If you are using the static login provider, enter any password, and this will also become your password for local IMAP access."); + let password = rpassword::prompt_password("Enter password: ")?; + let password_confirm = rpassword::prompt_password("Confirm password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + + CryptoKeys::init(&creds, &user_secrets, &password).await?; + + println!(""); + println!("Cryptographic key setup is complete."); + println!(""); + println!("If you are using the static login provider, add the following section to your .toml configuration file:"); + println!(""); + dump_config(&password, &creds); + } + Command::InitializeLocalKeys { creds } => { + let creds = make_storage_creds(creds); + + println!("Please enter a password for local IMAP access."); + println!("This password is not used for key decryption, your keys will be printed below (do not lose them!)"); + println!( + "If you plan on using LDAP login, stop right here and use `first-login` instead" + ); + let password = rpassword::prompt_password("Enter password: ")?; + let password_confirm = rpassword::prompt_password("Confirm password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + + let master = gen_key(); + let (_, secret) = gen_keypair(); + let keys = CryptoKeys::init_without_password(&creds, &master, &secret).await?; + + println!(""); + println!("Cryptographic key setup is complete."); + println!(""); + println!("Add the following section to your .toml configuration file:"); + println!(""); + dump_config(&password, &creds); + dump_keys(&keys); + } + Command::AddPassword { + creds, + user_secrets, + gen, + } => { + let creds = make_storage_creds(creds); + let user_secrets = make_user_secrets(user_secrets); + + let existing_password = + rpassword::prompt_password("Enter existing password to decrypt keys: ")?; + let new_password = if gen { + let password = base64::encode_config( + &u128::to_be_bytes(thread_rng().gen())[..10], + base64::URL_SAFE_NO_PAD, + ); + println!("Your new password: {}", password); + println!("Keep it safe!"); + password + } else { + let password = rpassword::prompt_password("Enter new password: ")?; + let password_confirm = rpassword::prompt_password("Confirm new password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + password + }; + + let keys = CryptoKeys::open(&creds, &user_secrets, &existing_password).await?; + keys.add_password(&creds, &user_secrets, &new_password) + .await?; + println!(""); + println!("New password added successfully."); + } + Command::DeletePassword { + creds, + user_secrets, + allow_delete_all, + } => { + let creds = make_storage_creds(creds); + let user_secrets = make_user_secrets(user_secrets); + + let existing_password = rpassword::prompt_password("Enter password to delete: ")?; + + let keys = match allow_delete_all { + true => Some(CryptoKeys::open(&creds, &user_secrets, &existing_password).await?), + false => None, + }; + + CryptoKeys::delete_password(&creds, &existing_password, allow_delete_all).await?; + + println!(""); + println!("Password was deleted successfully."); + + if let Some(keys) = keys { + println!("As a reminder, here are your cryptographic keys:"); + dump_keys(&keys); + } + } + Command::ShowKeys { + creds, + user_secrets, + } => { + let creds = make_storage_creds(creds); + let user_secrets = make_user_secrets(user_secrets); + + let existing_password = rpassword::prompt_password("Enter key decryption password: ")?; + + let keys = CryptoKeys::open(&creds, &user_secrets, &existing_password).await?; + dump_keys(&keys); + } + } + + Ok(()) +} + +fn make_storage_creds(c: StorageCredsArgs) -> StorageCredentials { + let s3_region = Region { + name: c.region.clone(), + endpoint: c.s3_endpoint, + }; + let k2v_region = Region { + name: c.region, + endpoint: c.k2v_endpoint, + }; + StorageCredentials { + k2v_region, + s3_region, + aws_access_key_id: c.aws_access_key_id, + aws_secret_access_key: c.aws_secret_access_key, + bucket: c.bucket, + } +} + +fn make_user_secrets(c: UserSecretsArgs) -> UserSecrets { + UserSecrets { + user_secret: c.user_secret, + alternate_user_secrets: c + .alternate_user_secrets + .split(',') + .map(|x| x.trim()) + .filter(|x| !x.is_empty()) + .map(|x| x.to_string()) + .collect(), + } +} + +fn dump_config(password: &str, creds: &StorageCredentials) { + println!("[login_static.users.]"); + println!( + "password = \"{}\"", + hash_password(password).expect("unable to hash password") + ); + println!("aws_access_key_id = \"{}\"", creds.aws_access_key_id); + println!( + "aws_secret_access_key = \"{}\"", + creds.aws_secret_access_key + ); +} + +fn dump_keys(keys: &CryptoKeys) { + println!("master_key = \"{}\"", base64::encode(&keys.master)); + println!("secret_key = \"{}\"", base64::encode(&keys.secret)); +} diff --git a/src/mail/user.rs b/src/mail/user.rs index 7a3e5c7..bdfb30c 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use tokio::sync::watch; use crate::cryptoblob::{open_deserialize, seal_serialize}; -use crate::login::{Credentials, StorageCredentials}; +use crate::login::Credentials; use crate::mail::incoming::incoming_mail_watch_process; use crate::mail::mailbox::Mailbox; use crate::mail::uidindex::ImapUidvalidity; @@ -309,7 +309,7 @@ impl User { Some(x) => self.k2v.from_orphan(x).expect("Source & target must be same storage"), None => self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK), }; - rref.set_value(list_blob).push().await?; + rref.set_value(&list_blob).push().await?; Ok(()) } } diff --git a/src/main.rs b/src/main.rs index f395143..9efd9a5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,13 +14,10 @@ mod storage; use std::path::PathBuf; -use anyhow::{bail, Result}; +use anyhow::Result; use clap::{Parser, Subcommand}; -use rand::prelude::*; use config::*; -use cryptoblob::*; -use login::{static_provider::*, *}; use server::Server; #[derive(Parser, Debug)] @@ -36,74 +33,7 @@ enum Command { Server { #[clap(short, long, env = "CONFIG_FILE", default_value = "aerogramme.toml")] config_file: PathBuf, - }, - /// TEST TEST TEST - Test { - #[clap(short, long, env = "CONFIG_FILE", default_value = "aerogramme.toml")] - config_file: PathBuf, - }, - /// Initializes key pairs for a user and adds a key decryption password - FirstLogin { - #[clap(flatten)] - creds: StorageCredsArgs, - #[clap(flatten)] - user_secrets: UserSecretsArgs, - }, - /// Initializes key pairs for a user and dumps keys to stdout for usage with static - /// login provider - InitializeLocalKeys { - #[clap(flatten)] - creds: StorageCredsArgs, - }, - /// Adds a key decryption password for a user - AddPassword { - #[clap(flatten)] - creds: StorageCredsArgs, - #[clap(flatten)] - user_secrets: UserSecretsArgs, - /// Automatically generate password - #[clap(short, long)] - gen: bool, - }, - /// Deletes a key decription password for a user - DeletePassword { - #[clap(flatten)] - creds: StorageCredsArgs, - #[clap(flatten)] - user_secrets: UserSecretsArgs, - /// Allow to delete all passwords - #[clap(long)] - allow_delete_all: bool, - }, - /// Dumps all encryption keys for user - ShowKeys { - #[clap(flatten)] - creds: StorageCredsArgs, - #[clap(flatten)] - user_secrets: UserSecretsArgs, - }, -} - -#[derive(Parser, Debug)] -struct StorageCredsArgs { - /// Name of the region to use - #[clap(short = 'r', long, env = "AWS_REGION")] - region: String, - /// Url of the endpoint to connect to for K2V - #[clap(short = 'k', long, env = "K2V_ENDPOINT")] - k2v_endpoint: String, - /// Url of the endpoint to connect to for S3 - #[clap(short = 's', long, env = "S3_ENDPOINT")] - s3_endpoint: String, - /// Access key ID - #[clap(short = 'A', long, env = "AWS_ACCESS_KEY_ID")] - aws_access_key_id: String, - /// Access key ID - #[clap(short = 'S', long, env = "AWS_SECRET_ACCESS_KEY")] - aws_secret_access_key: String, - /// Bucket name - #[clap(short = 'b', long, env = "BUCKET")] - bucket: String, + } } #[derive(Parser, Debug)] @@ -140,183 +70,8 @@ async fn main() -> Result<()> { let server = Server::new(config).await?; server.run().await?; } - Command::Test { config_file } => { - let config = read_config(config_file)?; - - let _server = Server::new(config).await?; - //server.test().await?; - } - Command::FirstLogin { - creds, - user_secrets, - } => { - let creds = make_storage_creds(creds); - let user_secrets = make_user_secrets(user_secrets); - - println!("Please enter your password for key decryption."); - println!("If you are using LDAP login, this must be your LDAP password."); - println!("If you are using the static login provider, enter any password, and this will also become your password for local IMAP access."); - let password = rpassword::prompt_password("Enter password: ")?; - let password_confirm = rpassword::prompt_password("Confirm password: ")?; - if password != password_confirm { - bail!("Passwords don't match."); - } - - CryptoKeys::init(&creds, &user_secrets, &password).await?; - - println!(""); - println!("Cryptographic key setup is complete."); - println!(""); - println!("If you are using the static login provider, add the following section to your .toml configuration file:"); - println!(""); - dump_config(&password, &creds); - } - Command::InitializeLocalKeys { creds } => { - let creds = make_storage_creds(creds); - - println!("Please enter a password for local IMAP access."); - println!("This password is not used for key decryption, your keys will be printed below (do not lose them!)"); - println!( - "If you plan on using LDAP login, stop right here and use `first-login` instead" - ); - let password = rpassword::prompt_password("Enter password: ")?; - let password_confirm = rpassword::prompt_password("Confirm password: ")?; - if password != password_confirm { - bail!("Passwords don't match."); - } - - let master = gen_key(); - let (_, secret) = gen_keypair(); - let keys = CryptoKeys::init_without_password(&creds, &master, &secret).await?; - - println!(""); - println!("Cryptographic key setup is complete."); - println!(""); - println!("Add the following section to your .toml configuration file:"); - println!(""); - dump_config(&password, &creds); - dump_keys(&keys); - } - Command::AddPassword { - creds, - user_secrets, - gen, - } => { - let creds = make_storage_creds(creds); - let user_secrets = make_user_secrets(user_secrets); - - let existing_password = - rpassword::prompt_password("Enter existing password to decrypt keys: ")?; - let new_password = if gen { - let password = base64::encode_config( - &u128::to_be_bytes(thread_rng().gen())[..10], - base64::URL_SAFE_NO_PAD, - ); - println!("Your new password: {}", password); - println!("Keep it safe!"); - password - } else { - let password = rpassword::prompt_password("Enter new password: ")?; - let password_confirm = rpassword::prompt_password("Confirm new password: ")?; - if password != password_confirm { - bail!("Passwords don't match."); - } - password - }; - - let keys = CryptoKeys::open(&creds, &user_secrets, &existing_password).await?; - keys.add_password(&creds, &user_secrets, &new_password) - .await?; - println!(""); - println!("New password added successfully."); - } - Command::DeletePassword { - creds, - user_secrets, - allow_delete_all, - } => { - let creds = make_storage_creds(creds); - let user_secrets = make_user_secrets(user_secrets); - - let existing_password = rpassword::prompt_password("Enter password to delete: ")?; - - let keys = match allow_delete_all { - true => Some(CryptoKeys::open(&creds, &user_secrets, &existing_password).await?), - false => None, - }; - - CryptoKeys::delete_password(&creds, &existing_password, allow_delete_all).await?; - - println!(""); - println!("Password was deleted successfully."); - - if let Some(keys) = keys { - println!("As a reminder, here are your cryptographic keys:"); - dump_keys(&keys); - } - } - Command::ShowKeys { - creds, - user_secrets, - } => { - let creds = make_storage_creds(creds); - let user_secrets = make_user_secrets(user_secrets); - - let existing_password = rpassword::prompt_password("Enter key decryption password: ")?; - - let keys = CryptoKeys::open(&creds, &user_secrets, &existing_password).await?; - dump_keys(&keys); - } } Ok(()) } -fn make_storage_creds(c: StorageCredsArgs) -> StorageCredentials { - let s3_region = Region { - name: c.region.clone(), - endpoint: c.s3_endpoint, - }; - let k2v_region = Region { - name: c.region, - endpoint: c.k2v_endpoint, - }; - StorageCredentials { - k2v_region, - s3_region, - aws_access_key_id: c.aws_access_key_id, - aws_secret_access_key: c.aws_secret_access_key, - bucket: c.bucket, - } -} - -fn make_user_secrets(c: UserSecretsArgs) -> UserSecrets { - UserSecrets { - user_secret: c.user_secret, - alternate_user_secrets: c - .alternate_user_secrets - .split(',') - .map(|x| x.trim()) - .filter(|x| !x.is_empty()) - .map(|x| x.to_string()) - .collect(), - } -} - -fn dump_config(password: &str, creds: &StorageCredentials) { - println!("[login_static.users.]"); - println!( - "password = \"{}\"", - hash_password(password).expect("unable to hash password") - ); - println!("aws_access_key_id = \"{}\"", creds.aws_access_key_id); - println!( - "aws_secret_access_key = \"{}\"", - creds.aws_secret_access_key - ); -} - -fn dump_keys(keys: &CryptoKeys) { - println!("master_key = \"{}\"", base64::encode(&keys.master)); - println!("secret_key = \"{}\"", base64::encode(&keys.secret)); -} diff --git a/src/server.rs b/src/server.rs index 3485a61..eca11ad 100644 --- a/src/server.rs +++ b/src/server.rs @@ -9,7 +9,7 @@ use crate::config::*; use crate::imap; use crate::lmtp::*; use crate::login::ArcLoginProvider; -use crate::login::{ldap_provider::*, static_provider::*, Region}; +use crate::login::{ldap_provider::*, static_provider::*}; pub struct Server { lmtp_server: Option>, -- cgit v1.2.3 From 8cd9801030e24c58621b3bed8723e8a8a4722ef8 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 23 Nov 2023 15:16:44 +0100 Subject: various fixes --- src/bayou.rs | 4 ++-- src/login/static_provider.rs | 6 +++--- src/mail/incoming.rs | 4 ++-- src/mail/mailbox.rs | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 72c2b80..3042f94 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -265,7 +265,7 @@ impl Bayou { ); self.k2v .row(&self.path, &ts.to_string()) - .set_value(seal_serialize(&op, &self.key)?) + .set_value(&seal_serialize(&op, &self.key)?) .push() .await?; @@ -500,7 +500,7 @@ impl K2vWatch { } _ = this.notify.notified() => { let rand = u128::to_be_bytes(thread_rng().gen()).to_vec(); - if let Err(e) = row.set_value(rand).push().await + if let Err(e) = row.set_value(&rand).push().await { error!("Error in bayou k2v watch updater loop: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 0e86cff..bd22060 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -104,11 +104,11 @@ impl LoginProvider for StaticLoginProvider { bucket, };*/ let storage: storage::Builders = match user.storage { - StaticStorage::InMemory => X, - StaticStorage::Garage => Y, + StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), + StaticStorage::Garage(c) => Box::new(storage::garage::GrgCreds {}), }; - let k2v_client = storage.k2v_client()?; + let k2v_client = storage.row_store()?; let (_, public_key) = CryptoKeys::load_salt_and_public(&k2v_client).await?; Ok(PublicCredentials { diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index da4c819..e3c729f 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -361,7 +361,7 @@ async fn k2v_lock_loop_internal( Some(orphan) => k2v.from_orphan(orphan).expect("Source & target must be storage compatible"), None => k2v.row(pk, sk), }; - if let Err(e) = row.set_value(lock).push().await { + if let Err(e) = row.set_value(&lock).push().await { error!("Could not take lock: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } @@ -432,7 +432,7 @@ impl EncryptedMessage { send.push().await?; // Update watch key to signal new mail - watch_ct.set_value(gen_ident().0.to_vec()).push().await?; + watch_ct.set_value(gen_ident().0.as_ref()).push().await?; Ok(()) } diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index f27d50a..060267a 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -282,7 +282,7 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &ident.to_string()).set_value(meta_blob).push().await?; + self.k2v.row(&self.mail_path, &ident.to_string()).set_value(&meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -326,7 +326,7 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &ident.to_string()).set_value(meta_blob).push().await?; + self.k2v.row(&self.mail_path, &ident.to_string()).set_value(&meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -410,7 +410,7 @@ impl MailboxInternal { // Copy mail meta in K2V let meta = &from.fetch_meta(&[source_id]).await?[0]; let meta_blob = seal_serialize(meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &new_id.to_string()).set_value(meta_blob).push().await?; + self.k2v.row(&self.mail_path, &new_id.to_string()).set_value(&meta_blob).push().await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync(), -- cgit v1.2.3 From 0722886efbeef3713bd7a671d2c09c8af2bdb6bd Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 23 Nov 2023 17:19:35 +0100 Subject: it compiles! --- src/config.rs | 2 +- src/login/ldap_provider.rs | 90 ++++++++++++++++++++++++++------------------ src/login/static_provider.rs | 38 +++++++++---------- src/storage/garage.rs | 9 ++++- 4 files changed, 80 insertions(+), 59 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 477968e..34940f2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,7 +31,7 @@ pub struct StaticGarageConfig { pub aws_access_key_id: String, pub aws_secret_access_key: String, - pub bucket: Option, + pub bucket: String, } #[derive(Serialize, Deserialize, Debug, Clone)] diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index 2eeb6d9..561b1c2 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -5,10 +5,9 @@ use log::debug; use crate::config::*; use crate::login::*; +use crate::storage; pub struct LdapLoginProvider { - k2v_region: Region, - s3_region: Region, ldap_server: String, pre_bind_on_login: bool, @@ -19,12 +18,9 @@ pub struct LdapLoginProvider { username_attr: String, mail_attr: String, - aws_access_key_id_attr: String, - aws_secret_access_key_attr: String, + storage_specific: StorageSpecific, user_secret_attr: String, alternate_user_secrets_attr: Option, - - bucket_source: BucketSource, } enum BucketSource { @@ -32,8 +28,13 @@ enum BucketSource { Attr(String), } +enum StorageSpecific { + InMemory, + Garage { from_config: LdapGarageConfig, bucket_source: BucketSource }, +} + impl LdapLoginProvider { - pub fn new(config: LoginLdapConfig, k2v_region: Region, s3_region: Region) -> Result { + pub fn new(config: LoginLdapConfig) -> Result { let bind_dn_and_pw = match (config.bind_dn, config.bind_password) { (Some(dn), Some(pw)) => Some((dn, pw)), (None, None) => None, @@ -42,12 +43,6 @@ impl LdapLoginProvider { ), }; - let bucket_source = match (config.bucket, config.bucket_attr) { - (Some(b), None) => BucketSource::Constant(b), - (None, Some(a)) => BucketSource::Attr(a), - _ => bail!("Must set `bucket` or `bucket_attr`, but not both"), - }; - if config.pre_bind_on_login && bind_dn_and_pw.is_none() { bail!("Cannot use `pre_bind_on_login` without setting `bind_dn` and `bind_password`"); } @@ -55,20 +50,34 @@ impl LdapLoginProvider { let mut attrs_to_retrieve = vec![ config.username_attr.clone(), config.mail_attr.clone(), - config.aws_access_key_id_attr.clone(), - config.aws_secret_access_key_attr.clone(), config.user_secret_attr.clone(), ]; + if let Some(a) = &config.alternate_user_secrets_attr { attrs_to_retrieve.push(a.clone()); } - if let BucketSource::Attr(a) = &bucket_source { - attrs_to_retrieve.push(a.clone()); - } + + // storage specific + let specific = match config.storage { + LdapStorage::InMemory => StorageSpecific::InMemory, + LdapStorage::Garage(grgconf) => { + let bucket_source = match (grgconf.default_bucket.clone(), grgconf.bucket_attr.clone()) { + (Some(b), None) => BucketSource::Constant(b), + (None, Some(a)) => BucketSource::Attr(a), + _ => bail!("Must set `bucket` or `bucket_attr`, but not both"), + }; + + if let BucketSource::Attr(a) = &bucket_source { + attrs_to_retrieve.push(a.clone()); + } + + StorageSpecific::Garage { from_config: grgconf, bucket_source } + }, + }; + + Ok(Self { - k2v_region, - s3_region, ldap_server: config.ldap_server, pre_bind_on_login: config.pre_bind_on_login, bind_dn_and_pw, @@ -76,29 +85,36 @@ impl LdapLoginProvider { attrs_to_retrieve, username_attr: config.username_attr, mail_attr: config.mail_attr, - aws_access_key_id_attr: config.aws_access_key_id_attr, - aws_secret_access_key_attr: config.aws_secret_access_key_attr, + storage_specific: specific, user_secret_attr: config.user_secret_attr, alternate_user_secrets_attr: config.alternate_user_secrets_attr, - bucket_source, }) } - fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { - let aws_access_key_id = get_attr(user, &self.aws_access_key_id_attr)?; - let aws_secret_access_key = get_attr(user, &self.aws_secret_access_key_attr)?; - let bucket = match &self.bucket_source { - BucketSource::Constant(b) => b.clone(), - BucketSource::Attr(a) => get_attr(user, a)?, + fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { + let storage: Builders = match &self.storage_specific { + StorageSpecific::InMemory => Box::new(storage::in_memory::FullMem {}), + StorageSpecific::Garage { from_config, bucket_source } => { + let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?; + let aws_secret_access_key = get_attr(user, &from_config.aws_secret_access_key_attr)?; + let bucket = match bucket_source { + BucketSource::Constant(b) => b.clone(), + BucketSource::Attr(a) => get_attr(user, &a)?, + }; + + + Box::new(storage::garage::GrgCreds { + region: from_config.aws_region.clone(), + s3_endpoint: from_config.s3_endpoint.clone(), + k2v_endpoint: from_config.k2v_endpoint.clone(), + aws_access_key_id, + aws_secret_access_key, + bucket + }) + }, }; - Ok(StorageCredentials { - k2v_region: self.k2v_region.clone(), - s3_region: self.s3_region.clone(), - aws_access_key_id, - aws_secret_access_key, - bucket, - }) + Ok(storage) } } @@ -204,7 +220,7 @@ impl LoginProvider for LdapLoginProvider { let storage = self.storage_creds_from_ldap_user(&user)?; drop(ldap); - let k2v_client = storage.k2v_client()?; + let k2v_client = storage.row_store()?; let (_, public_key) = CryptoKeys::load_salt_and_public(&k2v_client).await?; Ok(PublicCredentials { diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index bd22060..0b726cb 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -52,9 +52,16 @@ impl LoginProvider for StaticLoginProvider { } tracing::debug!(user=%username, "fetch keys"); - let storage: storage::Builders = match user.storage { + let storage: storage::Builders = match &user.storage { StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), - StaticStorage::Garage(c) => Box::new(storage::garage::GrgCreds {}), + StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { + region: grgconf.aws_region.clone(), + k2v_endpoint: grgconf.k2v_endpoint.clone(), + s3_endpoint: grgconf.s3_endpoint.clone(), + aws_access_key_id: grgconf.aws_access_key_id.clone(), + aws_secret_access_key: grgconf.aws_secret_access_key.clone(), + bucket: grgconf.bucket.clone(), + }), }; let keys = match (&user.master_key, &user.secret_key) { @@ -87,25 +94,16 @@ impl LoginProvider for StaticLoginProvider { Some(u) => u, }; - /* - let bucket = user - .bucket - .clone() - .or_else(|| self.default_bucket.clone()) - .ok_or(anyhow!( - "No bucket configured and no default bucket specieid" - ))?; - - let storage = StorageCredentials { - k2v_region: self.k2v_region.clone(), - s3_region: self.s3_region.clone(), - aws_access_key_id: user.aws_access_key_id.clone(), - aws_secret_access_key: user.aws_secret_access_key.clone(), - bucket, - };*/ - let storage: storage::Builders = match user.storage { + let storage: storage::Builders = match &user.storage { StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), - StaticStorage::Garage(c) => Box::new(storage::garage::GrgCreds {}), + StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { + region: grgconf.aws_region.clone(), + k2v_endpoint: grgconf.k2v_endpoint.clone(), + s3_endpoint: grgconf.s3_endpoint.clone(), + aws_access_key_id: grgconf.aws_access_key_id.clone(), + aws_secret_access_key: grgconf.aws_secret_access_key.clone(), + bucket: grgconf.bucket.clone(), + }), }; let k2v_client = storage.row_store()?; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index d9c768f..052e812 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,7 +1,14 @@ use crate::storage::*; #[derive(Clone, Debug, Hash)] -pub struct GrgCreds {} +pub struct GrgCreds { + pub region: String, + pub s3_endpoint: String, + pub k2v_endpoint: String, + pub aws_access_key_id: String, + pub aws_secret_access_key: String, + pub bucket: String, +} pub struct GrgStore {} pub struct GrgRef {} pub struct GrgValue {} -- cgit v1.2.3 From e2581c0dfb95a3fca86bb4801f425ed519257ff9 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 24 Nov 2023 11:44:42 +0100 Subject: reworked configuration file --- src/config.rs | 4 ++++ src/main.rs | 30 +++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 34940f2..cacc5c8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -18,6 +18,7 @@ pub struct Config { pub type LoginStaticConfig = HashMap; #[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(tag = "storage_driver")] pub enum StaticStorage { Garage(StaticGarageConfig), InMemory, @@ -47,10 +48,12 @@ pub struct LoginStaticUser { pub master_key: Option, pub secret_key: Option, + #[serde(flatten)] pub storage: StaticStorage, } #[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(tag = "storage_driver")] pub enum LdapStorage { Garage(LdapGarageConfig), InMemory, @@ -86,6 +89,7 @@ pub struct LoginLdapConfig { pub alternate_user_secrets_attr: Option, // Storage related thing + #[serde(flatten)] pub storage: LdapStorage, } diff --git a/src/main.rs b/src/main.rs index 9efd9a5..1055650 100644 --- a/src/main.rs +++ b/src/main.rs @@ -33,7 +33,8 @@ enum Command { Server { #[clap(short, long, env = "CONFIG_FILE", default_value = "aerogramme.toml")] config_file: PathBuf, - } + }, + Test, } #[derive(Parser, Debug)] @@ -70,6 +71,33 @@ async fn main() -> Result<()> { let server = Server::new(config).await?; server.run().await?; } + Command::Test => { + use std::collections::HashMap; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + println!("--- toml ---\n{}\n--- end ---\n", toml::to_string(&Config { + lmtp: None, + imap: Some(ImapConfig { bind_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) }), + login_ldap: None, + login_static: Some(HashMap::from([ + ("alice".into(), LoginStaticUser { + password: "hash".into(), + user_secret: "hello".into(), + alternate_user_secrets: vec![], + email_addresses: vec![], + master_key: None, + secret_key: None, + storage: StaticStorage::Garage(StaticGarageConfig { + s3_endpoint: "http://".into(), + k2v_endpoint: "http://".into(), + aws_region: "garage".into(), + aws_access_key_id: "GK...".into(), + aws_secret_access_key: "xxx".into(), + bucket: "aerogramme".into(), + }), + }) + ])), + }).unwrap()); + } } Ok(()) -- cgit v1.2.3 From 2779837a3750c3b2964c3a4d5d43de25218bc605 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Mon, 4 Dec 2023 16:51:27 +0100 Subject: WIP config rework --- src/config.rs | 87 +++++++++++++++++++++++++++++++++++------------------------ src/main.rs | 2 +- 2 files changed, 53 insertions(+), 36 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index cacc5c8..286ef65 100644 --- a/src/config.rs +++ b/src/config.rs @@ -7,49 +7,43 @@ use anyhow::Result; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Config { - pub login_static: Option, - pub login_ldap: Option, +pub struct CompanionConfig { + pub pid: Option, + pub imap: ImapConfig, - pub lmtp: Option, - pub imap: Option, + #[serde(flatten)] + pub users: LoginStaticUser, } -pub type LoginStaticConfig = HashMap; +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ProviderConfig { + pub pid: Option, + pub imap: ImapConfig, + pub lmtp: LmtpConfig, + pub users: UserManagement, +} #[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(tag = "storage_driver")] -pub enum StaticStorage { - Garage(StaticGarageConfig), - InMemory, +#[serde(tag = "user_driver")] +pub enum UserManagement { + Static(LoginStaticUser), + Ldap(LoginLdapConfig), } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct StaticGarageConfig { - pub s3_endpoint: String, - pub k2v_endpoint: String, - pub aws_region: String, +pub struct LmtpConfig { + pub bind_addr: SocketAddr, + pub hostname: String, +} - pub aws_access_key_id: String, - pub aws_secret_access_key: String, - pub bucket: String, +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ImapConfig { + pub bind_addr: SocketAddr, } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct LoginStaticUser { - #[serde(default)] - pub email_addresses: Vec, - pub password: String, - - pub user_secret: String, - #[serde(default)] - pub alternate_user_secrets: Vec, - - pub master_key: Option, - pub secret_key: Option, - - #[serde(flatten)] - pub storage: StaticStorage, + pub user_list: String, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -93,17 +87,40 @@ pub struct LoginLdapConfig { pub storage: LdapStorage, } +// ---- + #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct LmtpConfig { - pub bind_addr: SocketAddr, - pub hostname: String, +#[serde(tag = "storage_driver")] +pub enum StaticStorage { + Garage(StaticGarageConfig), + InMemory, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ImapConfig { - pub bind_addr: SocketAddr, +pub struct StaticGarageConfig { + pub s3_endpoint: String, + pub k2v_endpoint: String, + pub aws_region: String, + + pub aws_access_key_id: String, + pub aws_secret_access_key: String, + pub bucket: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct UserEntry { + #[serde(default)] + pub email_addresses: Vec, + pub password: String, + + pub master_key: Option, + pub secret_key: Option, + + #[serde(flatten)] + pub storage: StaticStorage, } +// --- pub fn read_config(config_file: PathBuf) -> Result { let mut file = std::fs::OpenOptions::new() .read(true) diff --git a/src/main.rs b/src/main.rs index 1055650..a566ec6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -74,7 +74,7 @@ async fn main() -> Result<()> { Command::Test => { use std::collections::HashMap; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - println!("--- toml ---\n{}\n--- end ---\n", toml::to_string(&Config { + println!("--- message pack ---\n{:?}\n--- end ---\n", rmp_serde::to_vec(&Config { lmtp: None, imap: Some(ImapConfig { bind_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) }), login_ldap: None, -- cgit v1.2.3 From 3ddbce4529225fe004acde3ab2e95261673bafc3 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 6 Dec 2023 20:57:25 +0100 Subject: WIP refactor --- src/config.rs | 33 +++++++--- src/login/mod.rs | 43 ++++--------- src/login/static_provider.rs | 44 +++++++------ src/main.rs | 146 +++++++++++++++++++++++++++++-------------- src/server.rs | 40 +++++------- 5 files changed, 177 insertions(+), 129 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 286ef65..5bd7380 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,7 +12,7 @@ pub struct CompanionConfig { pub imap: ImapConfig, #[serde(flatten)] - pub users: LoginStaticUser, + pub users: LoginStaticConfig, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -26,7 +26,7 @@ pub struct ProviderConfig { #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(tag = "user_driver")] pub enum UserManagement { - Static(LoginStaticUser), + Static(LoginStaticConfig), Ldap(LoginLdapConfig), } @@ -42,8 +42,8 @@ pub struct ImapConfig { } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct LoginStaticUser { - pub user_list: String, +pub struct LoginStaticConfig { + pub user_list: PathBuf, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -107,21 +107,40 @@ pub struct StaticGarageConfig { pub bucket: String, } +pub type UserList = HashMap; + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(tag = "crypto_root")] +pub enum CryptographyRoot { + PasswordProtected, + Keyring, + InPlace { + master_key: String, + secret_key: String, + } +} + #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UserEntry { #[serde(default)] pub email_addresses: Vec, pub password: String, - pub master_key: Option, - pub secret_key: Option, + pub crypto_root: CryptographyRoot, #[serde(flatten)] pub storage: StaticStorage, } +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(tag = "role")] +pub enum AnyConfig { + Companion(CompanionConfig), + Provider(ProviderConfig), +} + // --- -pub fn read_config(config_file: PathBuf) -> Result { +pub fn read_config<'a, T: Deserialize<'a>>(config_file: PathBuf) -> Result { let mut file = std::fs::OpenOptions::new() .read(true) .open(config_file.as_path())?; diff --git a/src/login/mod.rs b/src/login/mod.rs index 216c340..a9b9efe 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -45,6 +45,7 @@ pub struct PublicCredentials { pub public_key: PublicKey, } +/* /// The struct UserSecrets represents intermediary secrets that are mixed in with the user's /// password when decrypting the cryptographic keys that are stored in their bucket. /// These secrets should be stored somewhere else (e.g. in the LDAP server or in the @@ -57,6 +58,7 @@ pub struct UserSecrets { /// with old passwords pub alternate_user_secrets: Vec, } +*/ /// The struct CryptoKeys contains the cryptographic keys used to encrypt and decrypt /// data in a user's mailbox. @@ -85,7 +87,6 @@ impl Credentials { impl CryptoKeys { pub async fn init( storage: &Builders, - user_secrets: &UserSecrets, password: &str, ) -> Result { // Check that salt and public don't exist already @@ -113,7 +114,7 @@ impl CryptoKeys { thread_rng().fill(&mut kdf_salt); // Calculate key for password secret box - let password_key = user_secrets.derive_password_key(&kdf_salt, password)?; + let password_key = derive_password_key(&kdf_salt, password)?; // Seal a secret box that contains our crypto keys let password_sealed = seal(&keys.serialize(), &password_key)?; @@ -169,7 +170,6 @@ impl CryptoKeys { pub async fn open( storage: &Builders, - user_secrets: &UserSecrets, password: &str, ) -> Result { let k2v = storage.row_store()?; @@ -200,8 +200,7 @@ impl CryptoKeys { // Try to open blob let kdf_salt = &password_blob[..32]; - let password_openned = - user_secrets.try_open_encrypted_keys(kdf_salt, password, &password_blob[32..])?; + let password_openned = try_open_encrypted_keys(kdf_salt, password, &password_blob[32..])?; let keys = Self::deserialize(&password_openned)?; if keys.public != expected_public { @@ -238,7 +237,6 @@ impl CryptoKeys { pub async fn add_password( &self, storage: &Builders, - user_secrets: &UserSecrets, password: &str, ) -> Result<()> { let k2v = storage.row_store()?; @@ -252,7 +250,7 @@ impl CryptoKeys { thread_rng().fill(&mut kdf_salt); // Calculate key for password secret box - let password_key = user_secrets.derive_password_key(&kdf_salt, password)?; + let password_key = derive_password_key(&kdf_salt, password)?; // Seal a secret box that contains our crypto keys let password_sealed = seal(&self.serialize(), &password_key)?; @@ -418,32 +416,13 @@ impl CryptoKeys { } } -impl UserSecrets { - fn derive_password_key_with(user_secret: &str, kdf_salt: &[u8], password: &str) -> Result { - let tmp = format!("{}\n\n{}", user_secret, password); - Ok(Key::from_slice(&argon2_kdf(kdf_salt, tmp.as_bytes(), 32)?).unwrap()) - } - - fn derive_password_key(&self, kdf_salt: &[u8], password: &str) -> Result { - Self::derive_password_key_with(&self.user_secret, kdf_salt, password) - } +fn derive_password_key(kdf_salt: &[u8], password: &str) -> Result { + Ok(Key::from_slice(&argon2_kdf(kdf_salt, password.as_bytes(), 32)?).unwrap()) +} - fn try_open_encrypted_keys( - &self, - kdf_salt: &[u8], - password: &str, - encrypted_keys: &[u8], - ) -> Result> { - let secrets_to_try = - std::iter::once(&self.user_secret).chain(self.alternate_user_secrets.iter()); - for user_secret in secrets_to_try { - let password_key = Self::derive_password_key_with(user_secret, kdf_salt, password)?; - if let Ok(res) = open(encrypted_keys, &password_key) { - return Ok(res); - } - } - bail!("Unable to decrypt password blob."); - } +fn try_open_encrypted_keys(kdf_salt: &[u8], password: &str, encrypted_keys: &[u8]) -> Result> { + let password_key = derive_password_key(kdf_salt, password)?; + open(encrypted_keys, &password_key) } // ---- UTIL ---- diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 0b726cb..3f6a840 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::sync::Arc; +use std::path::PathBuf; use anyhow::{anyhow, bail, Result}; use async_trait::async_trait; @@ -10,13 +11,28 @@ use crate::login::*; use crate::storage; pub struct StaticLoginProvider { - users: HashMap>, - users_by_email: HashMap>, + user_list: PathBuf, + users: HashMap>, + users_by_email: HashMap>, } impl StaticLoginProvider { pub fn new(config: LoginStaticConfig) -> Result { - let users = config + let mut lp = Self { + user_list: config.user_list, + users: HashMap::new(), + users_by_email: HashMap::new(), + }; + + lp.update_user_list(); + + Ok(lp) + } + + pub fn update_user_list(&mut self) -> Result<()> { + let ulist: UserList = read_config(self.user_list)?; + + let users = ulist .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect::>(); @@ -29,11 +45,7 @@ impl StaticLoginProvider { users_by_email.insert(m.clone(), u.clone()); } } - - Ok(Self { - users, - users_by_email, - }) + Ok(()) } } @@ -64,24 +76,18 @@ impl LoginProvider for StaticLoginProvider { }), }; - let keys = match (&user.master_key, &user.secret_key) { - (Some(m), Some(s)) => { + let keys = match user.crypto_root { /*(&user.master_key, &user.secret_key) {*/ + CryptographyRoot::InPlace { master_key: m, secret_key: s } => { let master_key = Key::from_slice(&base64::decode(m)?).ok_or(anyhow!("Invalid master key"))?; let secret_key = SecretKey::from_slice(&base64::decode(s)?) .ok_or(anyhow!("Invalid secret key"))?; CryptoKeys::open_without_password(&storage, &master_key, &secret_key).await? } - (None, None) => { - let user_secrets = UserSecrets { - user_secret: user.user_secret.clone(), - alternate_user_secrets: user.alternate_user_secrets.clone(), - }; - CryptoKeys::open(&storage, &user_secrets, password).await? + CryptographyRoot::PasswordProtected => { + CryptoKeys::open(&storage, password).await? } - _ => bail!( - "Either both master and secret key or none of them must be specified for user" - ), + CryptographyRoot::Keyring => unimplemented!(), }; tracing::debug!(user=%username, "logged"); diff --git a/src/main.rs b/src/main.rs index a566ec6..7b567bc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -25,26 +25,59 @@ use server::Server; struct Args { #[clap(subcommand)] command: Command, + + #[clap(short, long, env = "CONFIG_FILE", default_value = "aerogramme.toml")] + config_file: PathBuf, } #[derive(Subcommand, Debug)] enum Command { - /// Runs the IMAP+LMTP server daemon - Server { - #[clap(short, long, env = "CONFIG_FILE", default_value = "aerogramme.toml")] - config_file: PathBuf, + #[clap(subcommand)] + Companion(CompanionCommand), + + #[clap(subcommand)] + Provider(ProviderCommand), + //Test, +} + +#[derive(Subcommand, Debug)] +enum CompanionCommand { + /// Runs the IMAP proxy + Daemon, + Reload { + #[clap(short, long, env = "AEROGRAMME_PID")] + pid: Option, }, - Test, + Wizard, + #[clap(subcommand)] + Account(AccountManagement), } -#[derive(Parser, Debug)] -struct UserSecretsArgs { - /// User secret - #[clap(short = 'U', long, env = "USER_SECRET")] - user_secret: String, - /// Alternate user secrets (comma-separated list of strings) - #[clap(long, env = "ALTERNATE_USER_SECRETS", default_value = "")] - alternate_user_secrets: String, +#[derive(Subcommand, Debug)] +enum ProviderCommand { + /// Runs the IMAP+LMTP server daemon + Daemon, + Reload, + #[clap(subcommand)] + Account(AccountManagement), +} + +#[derive(Subcommand, Debug)] +enum AccountManagement { + Add { + #[clap(short, long)] + login: String, + #[clap(short, long)] + setup: PathBuf, + }, + Delete { + #[clap(short, long)] + login: String, + }, + ChangePassword { + #[clap(short, long)] + login: String + }, } #[tokio::main] @@ -63,43 +96,62 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let args = Args::parse(); + let any_config = read_config(args.config_file)?; - match args.command { - Command::Server { config_file } => { - let config = read_config(config_file)?; - - let server = Server::new(config).await?; - server.run().await?; - } - Command::Test => { - use std::collections::HashMap; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - println!("--- message pack ---\n{:?}\n--- end ---\n", rmp_serde::to_vec(&Config { - lmtp: None, - imap: Some(ImapConfig { bind_addr: SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) }), - login_ldap: None, - login_static: Some(HashMap::from([ - ("alice".into(), LoginStaticUser { - password: "hash".into(), - user_secret: "hello".into(), - alternate_user_secrets: vec![], - email_addresses: vec![], - master_key: None, - secret_key: None, - storage: StaticStorage::Garage(StaticGarageConfig { - s3_endpoint: "http://".into(), - k2v_endpoint: "http://".into(), - aws_region: "garage".into(), - aws_access_key_id: "GK...".into(), - aws_secret_access_key: "xxx".into(), - bucket: "aerogramme".into(), - }), - }) - ])), - }).unwrap()); - } + match (args.command, any_config) { + (Command::Companion(subcommand), AnyConfig::Companion(config)) => match subcommand { + CompanionCommand::Daemon => { + let server = Server::from_companion_config(config).await?; + server.run().await?; + }, + CompanionCommand::Reload { pid } => { + unimplemented!(); + }, + CompanionCommand::Wizard => { + unimplemented!(); + }, + CompanionCommand::Account(cmd) => { + let user_file = config.users.user_list; + account_management(cmd, user_file); + } + }, + (Command::Provider(subcommand), AnyConfig::Provider(config)) => match subcommand { + ProviderCommand::Daemon => { + let server = Server::from_provider_config(config).await?; + server.run().await?; + }, + ProviderCommand::Reload => { + unimplemented!(); + }, + ProviderCommand::Account(cmd) => { + let user_file = match config.users { + UserManagement::Static(conf) => conf.user_list, + UserManagement::Ldap(_) => panic!("LDAP account management is not supported from Aerogramme.") + }; + account_management(cmd, user_file); + } + }, + (Command::Provider(_), AnyConfig::Companion(_)) => { + panic!("Your want to run a 'Provider' command but your configuration file has role 'Companion'."); + }, + (Command::Companion(_), AnyConfig::Provider(_)) => { + panic!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); + }, } Ok(()) } +fn account_management(cmd: AccountManagement, users: PathBuf) { + match cmd { + Add => { + unimplemented!(); + }, + Delete => { + unimplemented!(); + }, + ChangePassword => { + unimplemented!(); + }, + } +} diff --git a/src/server.rs b/src/server.rs index eca11ad..2321da8 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use anyhow::{bail, Result}; +use anyhow::Result; use futures::try_join; use log::*; use tokio::sync::watch; @@ -17,19 +17,24 @@ pub struct Server { } impl Server { - pub async fn new(config: Config) -> Result { - let (login, lmtp_conf, imap_conf) = build(config)?; + pub async fn from_companion_config(config: CompanionConfig) -> Result { + let login = Arc::new(StaticLoginProvider::new(config.users)?); - let lmtp_server = lmtp_conf.map(|cfg| LmtpServer::new(cfg, login.clone())); - let imap_server = match imap_conf { - Some(cfg) => Some(imap::new(cfg, login.clone()).await?), - None => None, + let lmtp_server = None; + let imap_server = Some(imap::new(config.imap, login).await?); + Ok(Self { lmtp_server, imap_server }) + } + + pub async fn from_provider_config(config: ProviderConfig) -> Result { + let login: ArcLoginProvider = match config.users { + UserManagement::Static(x) => Arc::new(StaticLoginProvider::new(x)?), + UserManagement::Ldap(x) => Arc::new(LdapLoginProvider::new(x)?), }; - Ok(Self { - lmtp_server, - imap_server, - }) + let lmtp_server = Some(LmtpServer::new(config.lmtp, login.clone())); + let imap_server = Some(imap::new(config.imap, login).await?); + + Ok(Self { lmtp_server, imap_server }) } pub async fn run(self) -> Result<()> { @@ -60,19 +65,6 @@ impl Server { } } -fn build(config: Config) -> Result<(ArcLoginProvider, Option, Option)> { - let lp: ArcLoginProvider = match (config.login_static, config.login_ldap) { - (Some(st), None) => Arc::new(StaticLoginProvider::new(st)?), - (None, Some(ld)) => Arc::new(LdapLoginProvider::new(ld)?), - (Some(_), Some(_)) => { - bail!("A single login provider must be set up in config file") - } - (None, None) => bail!("No login provider is set up in config file"), - }; - - Ok((lp, config.lmtp, config.imap)) -} - pub fn watch_ctrl_c() -> (watch::Receiver, Arc>) { let (send_cancel, watch_cancel) = watch::channel(false); let send_cancel = Arc::new(send_cancel); -- cgit v1.2.3 From cf18eb8afb76a25150c885c6cf525aedcc25facc Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 8 Dec 2023 15:23:50 +0100 Subject: now compile again --- src/config.rs | 4 +--- src/login/ldap_provider.rs | 22 +--------------------- src/login/static_provider.rs | 4 ++-- 3 files changed, 4 insertions(+), 26 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 5bd7380..85d38aa 100644 --- a/src/config.rs +++ b/src/config.rs @@ -79,8 +79,6 @@ pub struct LoginLdapConfig { pub username_attr: String, #[serde(default = "default_mail_attr")] pub mail_attr: String, - pub user_secret_attr: String, - pub alternate_user_secrets_attr: Option, // Storage related thing #[serde(flatten)] @@ -140,7 +138,7 @@ pub enum AnyConfig { } // --- -pub fn read_config<'a, T: Deserialize<'a>>(config_file: PathBuf) -> Result { +pub fn read_config(config_file: PathBuf) -> Result { let mut file = std::fs::OpenOptions::new() .read(true) .open(config_file.as_path())?; diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index 561b1c2..f72b289 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -19,8 +19,6 @@ pub struct LdapLoginProvider { mail_attr: String, storage_specific: StorageSpecific, - user_secret_attr: String, - alternate_user_secrets_attr: Option, } enum BucketSource { @@ -50,13 +48,8 @@ impl LdapLoginProvider { let mut attrs_to_retrieve = vec![ config.username_attr.clone(), config.mail_attr.clone(), - config.user_secret_attr.clone(), ]; - if let Some(a) = &config.alternate_user_secrets_attr { - attrs_to_retrieve.push(a.clone()); - } - // storage specific let specific = match config.storage { LdapStorage::InMemory => StorageSpecific::InMemory, @@ -86,8 +79,6 @@ impl LdapLoginProvider { username_attr: config.username_attr, mail_attr: config.mail_attr, storage_specific: specific, - user_secret_attr: config.user_secret_attr, - alternate_user_secrets_attr: config.alternate_user_secrets_attr, }) } @@ -165,20 +156,9 @@ impl LoginProvider for LdapLoginProvider { debug!("Ldap login with user name {} successfull", username); let storage = self.storage_creds_from_ldap_user(&user)?; - - let user_secret = get_attr(&user, &self.user_secret_attr)?; - let alternate_user_secrets = match &self.alternate_user_secrets_attr { - None => vec![], - Some(a) => user.attrs.get(a).cloned().unwrap_or_default(), - }; - let user_secrets = UserSecrets { - user_secret, - alternate_user_secrets, - }; - drop(ldap); - let keys = CryptoKeys::open(&storage, &user_secrets, password).await?; + let keys = CryptoKeys::open(&storage, password).await?; Ok(Credentials { storage, keys }) } diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 3f6a840..d0a4624 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -30,7 +30,7 @@ impl StaticLoginProvider { } pub fn update_user_list(&mut self) -> Result<()> { - let ulist: UserList = read_config(self.user_list)?; + let ulist: UserList = read_config(self.user_list.clone())?; let users = ulist .into_iter() @@ -76,7 +76,7 @@ impl LoginProvider for StaticLoginProvider { }), }; - let keys = match user.crypto_root { /*(&user.master_key, &user.secret_key) {*/ + let keys = match &user.crypto_root { /*(&user.master_key, &user.secret_key) {*/ CryptographyRoot::InPlace { master_key: m, secret_key: s } => { let master_key = Key::from_slice(&base64::decode(m)?).ok_or(anyhow!("Invalid master key"))?; -- cgit v1.2.3 From 532c99f3d30ab8adc0963f0814ce3151e1b61caf Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 8 Dec 2023 18:13:00 +0100 Subject: rework static login provider --- src/config.rs | 1 + src/login/static_provider.rs | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 85d38aa..876091f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -124,6 +124,7 @@ pub struct UserEntry { pub email_addresses: Vec, pub password: String, + #[serde(flatten)] pub crypto_root: CryptographyRoot, #[serde(flatten)] diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index d0a4624..0f6ab3a 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -19,12 +19,17 @@ pub struct StaticLoginProvider { impl StaticLoginProvider { pub fn new(config: LoginStaticConfig) -> Result { let mut lp = Self { - user_list: config.user_list, + user_list: config.user_list.clone(), users: HashMap::new(), users_by_email: HashMap::new(), }; - lp.update_user_list(); + lp + .update_user_list() + .context( + format!( + "failed to read {:?}, make sure it exists and it's correctly formatted", + config.user_list))?; Ok(lp) } @@ -32,17 +37,18 @@ impl StaticLoginProvider { pub fn update_user_list(&mut self) -> Result<()> { let ulist: UserList = read_config(self.user_list.clone())?; - let users = ulist + self.users = ulist .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect::>(); - let mut users_by_email = HashMap::new(); - for (_, u) in users.iter() { + + self.users_by_email.clear(); + for (_, u) in self.users.iter() { for m in u.email_addresses.iter() { - if users_by_email.contains_key(m) { + if self.users_by_email.contains_key(m) { bail!("Several users have same email address: {}", m); } - users_by_email.insert(m.clone(), u.clone()); + self.users_by_email.insert(m.clone(), u.clone()); } } Ok(()) -- cgit v1.2.3 From 23f918fd0edb224668fb775c770075eb4f44ce4d Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 8 Dec 2023 19:06:12 +0100 Subject: implement account create --- src/config.rs | 24 +++++++++++++++++++++- src/main.rs | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 78 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 876091f..506640f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,5 @@ use std::collections::HashMap; -use std::io::Read; +use std::io::{Read, Write}; use std::net::SocketAddr; use std::path::PathBuf; @@ -131,6 +131,18 @@ pub struct UserEntry { pub storage: StaticStorage, } +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SetupEntry { + #[serde(default)] + pub email_addresses: Vec, + + #[serde(default)] + pub clear_password: Option, + + #[serde(flatten)] + pub storage: StaticStorage, +} + #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(tag = "role")] pub enum AnyConfig { @@ -150,6 +162,16 @@ pub fn read_config(config_file: PathBuf) -> Resu Ok(toml::from_str(&config)?) } +pub fn write_config(config_file: PathBuf, config: &T) -> Result<()> { + let mut file = std::fs::OpenOptions::new() + .write(true) + .open(config_file.as_path())?; + + file.write_all(toml::to_string(config)?.as_bytes())?; + + Ok(()) +} + fn default_mail_attr() -> String { "mail".into() } diff --git a/src/main.rs b/src/main.rs index 7b567bc..679204d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,11 +14,12 @@ mod storage; use std::path::PathBuf; -use anyhow::Result; +use anyhow::{bail, Result, Context}; use clap::{Parser, Subcommand}; use config::*; use server::Server; +use login::{static_provider::*, *}; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] @@ -57,23 +58,28 @@ enum CompanionCommand { enum ProviderCommand { /// Runs the IMAP+LMTP server daemon Daemon, + /// Reload the daemon Reload, + /// Manage static accounts #[clap(subcommand)] Account(AccountManagement), } #[derive(Subcommand, Debug)] enum AccountManagement { + /// Add an account Add { #[clap(short, long)] login: String, #[clap(short, long)] setup: PathBuf, }, + /// Delete an account Delete { #[clap(short, long)] login: String, }, + /// Change password for a given account ChangePassword { #[clap(short, long)] login: String @@ -98,7 +104,7 @@ async fn main() -> Result<()> { let args = Args::parse(); let any_config = read_config(args.config_file)?; - match (args.command, any_config) { + match (&args.command, any_config) { (Command::Companion(subcommand), AnyConfig::Companion(config)) => match subcommand { CompanionCommand::Daemon => { let server = Server::from_companion_config(config).await?; @@ -112,7 +118,7 @@ async fn main() -> Result<()> { }, CompanionCommand::Account(cmd) => { let user_file = config.users.user_list; - account_management(cmd, user_file); + account_management(&args.command, cmd, user_file)?; } }, (Command::Provider(subcommand), AnyConfig::Provider(config)) => match subcommand { @@ -128,7 +134,7 @@ async fn main() -> Result<()> { UserManagement::Static(conf) => conf.user_list, UserManagement::Ldap(_) => panic!("LDAP account management is not supported from Aerogramme.") }; - account_management(cmd, user_file); + account_management(&args.command, cmd, user_file)?; } }, (Command::Provider(_), AnyConfig::Companion(_)) => { @@ -142,16 +148,55 @@ async fn main() -> Result<()> { Ok(()) } -fn account_management(cmd: AccountManagement, users: PathBuf) { +fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) -> Result<()> { + let mut ulist: UserList = read_config(users.clone())?; + match cmd { - Add => { - unimplemented!(); + AccountManagement::Add { login, setup } => { + tracing::debug!(user=login, "will-create"); + let stp: SetupEntry = read_config(setup.clone())?; + tracing::debug!(user=login, "loaded setup entry"); + let crypto_root = match root { + Command::Provider(_) => CryptographyRoot::PasswordProtected, + Command::Companion(_) => { + // @TODO use keyring by default instead of inplace in the future + // @TODO generate keys + CryptographyRoot::InPlace { + master_key: "".to_string(), + secret_key: "".to_string(), + } + } + }; + + let password = match stp.clear_password { + Some(pwd) => pwd, + None => { + let password = rpassword::prompt_password("Enter password: ")?; + let password_confirm = rpassword::prompt_password("Confirm password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + password + } + }; + let hash = hash_password(password.as_str()).context("unable to hash password")?; + + ulist.insert(login.clone(), UserEntry { + email_addresses: stp.email_addresses, + password: hash, + crypto_root, + storage: stp.storage, + }); + + write_config(users.clone(), &ulist)?; }, - Delete => { + AccountManagement::Delete { login } => { unimplemented!(); }, - ChangePassword => { + AccountManagement::ChangePassword { login } => { unimplemented!(); }, - } + }; + + Ok(()) } -- cgit v1.2.3 From 47e25cd7f710fcd82356377cf48eccf9f65d31cc Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 12 Dec 2023 09:17:59 +0100 Subject: WIP --- src/config.rs | 25 ++++++++++++++++++++++--- src/login/mod.rs | 14 +++++++++++++- src/login/static_provider.rs | 6 +++--- src/main.rs | 4 +++- 4 files changed, 41 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index 506640f..cd3bff3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,7 +4,7 @@ use std::net::SocketAddr; use std::path::PathBuf; use anyhow::Result; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize, Serializer, Deserializer}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CompanionConfig { @@ -79,6 +79,7 @@ pub struct LoginLdapConfig { pub username_attr: String, #[serde(default = "default_mail_attr")] pub mail_attr: String, + pub crypto_root_attr: String, // Storage related thing #[serde(flatten)] @@ -110,9 +111,11 @@ pub type UserList = HashMap; #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(tag = "crypto_root")] pub enum CryptographyRoot { - PasswordProtected, + PasswordProtected { + root_blob: String, + }, Keyring, - InPlace { + ClearText { master_key: String, secret_key: String, } @@ -175,3 +178,19 @@ pub fn write_config(config_file: PathBuf, config: &T) -> Result<() fn default_mail_attr() -> String { "mail".into() } + +fn as_base64(val: &T, serializer: &mut S) -> Result<(), S::Error> + where T: AsRef<[u8]>, + S: Serializer +{ + serializer.serialize_str(&base64::encode(val.as_ref())) +} + +fn from_base64(deserializer: &mut D) -> Result, D::Error> + where D: Deserializer +{ + use serde::de::Error; + String::deserialize(deserializer) + .and_then(|string| base64::decode(&string).map_err(|err| Error::custom(err.to_string()))) +} + diff --git a/src/login/mod.rs b/src/login/mod.rs index a9b9efe..f7a81c2 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -169,9 +169,20 @@ impl CryptoKeys { } pub async fn open( - storage: &Builders, password: &str, + root_blob: &str, ) -> Result { + let kdf_salt = &password_blob[..32]; + let password_openned = try_open_encrypted_keys(kdf_salt, password, &password_blob[32..])?; + + let keys = Self::deserialize(&password_openned)?; + if keys.public != expected_public { + bail!("Password public key doesn't match stored public key"); + } + + Ok(keys) + + /* let k2v = storage.row_store()?; let (ident_salt, expected_public) = Self::load_salt_and_public(&k2v).await?; @@ -208,6 +219,7 @@ impl CryptoKeys { } Ok(keys) + */ } pub async fn open_without_password( diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 0f6ab3a..7fadf2f 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -83,15 +83,15 @@ impl LoginProvider for StaticLoginProvider { }; let keys = match &user.crypto_root { /*(&user.master_key, &user.secret_key) {*/ - CryptographyRoot::InPlace { master_key: m, secret_key: s } => { + CryptographyRoot::ClearText { master_key: m, secret_key: s } => { let master_key = Key::from_slice(&base64::decode(m)?).ok_or(anyhow!("Invalid master key"))?; let secret_key = SecretKey::from_slice(&base64::decode(s)?) .ok_or(anyhow!("Invalid secret key"))?; CryptoKeys::open_without_password(&storage, &master_key, &secret_key).await? } - CryptographyRoot::PasswordProtected => { - CryptoKeys::open(&storage, password).await? + CryptographyRoot::PasswordProtected { root_blob } => { + CryptoKeys::open(password, root_blob).await? } CryptographyRoot::Keyring => unimplemented!(), }; diff --git a/src/main.rs b/src/main.rs index 679204d..c252623 100644 --- a/src/main.rs +++ b/src/main.rs @@ -191,7 +191,9 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - write_config(users.clone(), &ulist)?; }, AccountManagement::Delete { login } => { - unimplemented!(); + tracing::debug!(user=login, "will-delete"); + ulist.remove(&login); + write_config(users.clone(), &ulist)?; }, AccountManagement::ChangePassword { login } => { unimplemented!(); -- cgit v1.2.3 From 064a1077c8c66fe8d3ee71f831c930e1ddfbc34a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 13 Dec 2023 16:09:01 +0100 Subject: it compiles again! --- src/config.rs | 37 +--- src/login/ldap_provider.rs | 20 +- src/login/mod.rs | 452 +++++++++++-------------------------------- src/login/static_provider.rs | 20 +- src/main.rs | 22 +-- 5 files changed, 149 insertions(+), 402 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index cd3bff3..eae50f5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,7 +4,7 @@ use std::net::SocketAddr; use std::path::PathBuf; use anyhow::Result; -use serde::{Deserialize, Serialize, Serializer, Deserializer}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CompanionConfig { @@ -79,6 +79,8 @@ pub struct LoginLdapConfig { pub username_attr: String, #[serde(default = "default_mail_attr")] pub mail_attr: String, + + // The field that will contain the crypto root thingy pub crypto_root_attr: String, // Storage related thing @@ -108,27 +110,12 @@ pub struct StaticGarageConfig { pub type UserList = HashMap; -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(tag = "crypto_root")] -pub enum CryptographyRoot { - PasswordProtected { - root_blob: String, - }, - Keyring, - ClearText { - master_key: String, - secret_key: String, - } -} - #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UserEntry { #[serde(default)] pub email_addresses: Vec, pub password: String, - - #[serde(flatten)] - pub crypto_root: CryptographyRoot, + pub crypto_root: String, #[serde(flatten)] pub storage: StaticStorage, @@ -178,19 +165,3 @@ pub fn write_config(config_file: PathBuf, config: &T) -> Result<() fn default_mail_attr() -> String { "mail".into() } - -fn as_base64(val: &T, serializer: &mut S) -> Result<(), S::Error> - where T: AsRef<[u8]>, - S: Serializer -{ - serializer.serialize_str(&base64::encode(val.as_ref())) -} - -fn from_base64(deserializer: &mut D) -> Result, D::Error> - where D: Deserializer -{ - use serde::de::Error; - String::deserialize(deserializer) - .and_then(|string| base64::decode(&string).map_err(|err| Error::custom(err.to_string()))) -} - diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index f72b289..6e94061 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -17,6 +17,7 @@ pub struct LdapLoginProvider { attrs_to_retrieve: Vec, username_attr: String, mail_attr: String, + crypto_root_attr: String, storage_specific: StorageSpecific, } @@ -48,6 +49,7 @@ impl LdapLoginProvider { let mut attrs_to_retrieve = vec![ config.username_attr.clone(), config.mail_attr.clone(), + config.crypto_root_attr.clone(), ]; // storage specific @@ -78,6 +80,7 @@ impl LdapLoginProvider { attrs_to_retrieve, username_attr: config.username_attr, mail_attr: config.mail_attr, + crypto_root_attr: config.crypto_root_attr, storage_specific: specific, }) } @@ -155,10 +158,16 @@ impl LoginProvider for LdapLoginProvider { .context("Invalid password")?; debug!("Ldap login with user name {} successfull", username); + // cryptography + let crstr = get_attr(&user, &self.crypto_root_attr)?; + let cr = CryptoRoot(crstr); + let keys = cr.crypto_keys(password)?; + + // storage let storage = self.storage_creds_from_ldap_user(&user)?; + drop(ldap); - let keys = CryptoKeys::open(&storage, password).await?; Ok(Credentials { storage, keys }) } @@ -197,12 +206,15 @@ impl LoginProvider for LdapLoginProvider { let user = SearchEntry::construct(matches.into_iter().next().unwrap()); debug!("Found matching LDAP user for email {}: {}", email, user.dn); + // cryptography + let crstr = get_attr(&user, &self.crypto_root_attr)?; + let cr = CryptoRoot(crstr); + let public_key = cr.public_key()?; + + // storage let storage = self.storage_creds_from_ldap_user(&user)?; drop(ldap); - let k2v_client = storage.row_store()?; - let (_, public_key) = CryptoKeys::load_salt_and_public(&k2v_client).await?; - Ok(PublicCredentials { storage, public_key, diff --git a/src/login/mod.rs b/src/login/mod.rs index f7a81c2..3d7a49f 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -2,7 +2,7 @@ pub mod ldap_provider; pub mod static_provider; use std::sync::Arc; -use futures::try_join; +use base64::Engine; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; @@ -37,6 +37,14 @@ pub struct Credentials { /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } +impl Credentials { + pub fn row_client(&self) -> Result { + Ok(self.storage.row_store()?) + } + pub fn blob_client(&self) -> Result { + Ok(self.storage.blob_store()?) + } +} #[derive(Clone, Debug)] pub struct PublicCredentials { @@ -45,20 +53,81 @@ pub struct PublicCredentials { pub public_key: PublicKey, } -/* -/// The struct UserSecrets represents intermediary secrets that are mixed in with the user's -/// password when decrypting the cryptographic keys that are stored in their bucket. -/// These secrets should be stored somewhere else (e.g. in the LDAP server or in the -/// local config file), as an additionnal authentification factor so that the password -/// isn't enough just alone to decrypt the content of a user's bucket. -pub struct UserSecrets { - /// The main user secret that will be used to encrypt keys when a new password is added - pub user_secret: String, - /// Alternative user secrets that will be tried when decrypting keys that were encrypted - /// with old passwords - pub alternate_user_secrets: Vec, +use serde::{Serialize, Deserialize}; +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CryptoRoot(pub String); + +impl CryptoRoot { + pub fn create_pass(password: &str, k: &CryptoKeys) -> Result { + let bytes = k.password_seal(password)?; + let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes); + let cr = format!("aero:cryptoroot:pass:{}", b64); + Ok(Self(cr)) + } + + pub fn create_cleartext(k: &CryptoKeys) -> Self { + let bytes = k.serialize(); + let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes); + let cr = format!("aero:cryptoroot:cleartext:{}", b64); + Self(cr) + } + + pub fn create_incoming(pk: &PublicKey) -> Self { + let bytes: &[u8] = &pk[..]; + let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes); + let cr = format!("aero:cryptoroot:incoming:{}", b64); + Self(cr) + } + + pub fn public_key(&self) -> Result { + match self.0.splitn(4, ':').collect::>()[..] { + [ "aero", "cryptoroot", "pass", b64blob ] => { + let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; + if blob.len() < 32 { + bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); + } + PublicKey::from_slice(&blob[..32]).context("must be a valid public key") + }, + [ "aero", "cryptoroot", "cleartext", b64blob ] => { + let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; + Ok(CryptoKeys::deserialize(&blob)?.public) + }, + [ "aero", "cryptoroot", "incoming", b64blob ] => { + let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; + if blob.len() < 32 { + bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); + } + PublicKey::from_slice(&blob[..32]).context("must be a valid public key") + }, + [ "aero", "cryptoroot", "keyring", _ ] => { + bail!("keyring is not yet implemented!") + }, + _ => bail!(format!("passed string '{}' is not a valid cryptoroot", self.0)), + } + } + pub fn crypto_keys(&self, password: &str) -> Result { + match self.0.splitn(4, ':').collect::>()[..] { + [ "aero", "cryptoroot", "pass", b64blob ] => { + let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; + if blob.len() < 32 { + bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); + } + CryptoKeys::password_open(password, &blob[32..]) + }, + [ "aero", "cryptoroot", "cleartext", b64blob ] => { + let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; + CryptoKeys::deserialize(&blob) + }, + [ "aero", "cryptoroot", "incoming", b64blob ] => { + bail!("incoming cryptoroot does not contain a crypto key!") + }, + [ "aero", "cryptoroot", "keyring", _ ] =>{ + bail!("keyring is not yet implemented!") + }, + _ => bail!(format!("passed string '{}' is not a valid cryptoroot", self.0)), + } + } } -*/ /// The struct CryptoKeys contains the cryptographic keys used to encrypt and decrypt /// data in a user's mailbox. @@ -75,337 +144,22 @@ pub struct CryptoKeys { // ---- -impl Credentials { - pub fn row_client(&self) -> Result { - Ok(self.storage.row_store()?) - } - pub fn blob_client(&self) -> Result { - Ok(self.storage.blob_store()?) - } -} + impl CryptoKeys { - pub async fn init( - storage: &Builders, - password: &str, - ) -> Result { - // Check that salt and public don't exist already - let k2v = storage.row_store()?; - let (salt_ct, public_ct) = Self::check_uninitialized(&k2v).await?; - - // Generate salt for password identifiers - let mut ident_salt = [0u8; 32]; - thread_rng().fill(&mut ident_salt); - - // Generate (public, private) key pair and master key + /// Initialize a new cryptography root + pub fn init() -> Self { let (public, secret) = gen_keypair(); let master = gen_key(); - let keys = CryptoKeys { + CryptoKeys { master, secret, public, - }; - - // Generate short password digest (= password identity) - let ident = argon2_kdf(&ident_salt, password.as_bytes(), 16)?; - - // Generate salt for KDF - let mut kdf_salt = [0u8; 32]; - thread_rng().fill(&mut kdf_salt); - - // Calculate key for password secret box - let password_key = derive_password_key(&kdf_salt, password)?; - - // Seal a secret box that contains our crypto keys - let password_sealed = seal(&keys.serialize(), &password_key)?; - - let password_sortkey = format!("password:{}", hex::encode(&ident)); - let password_blob = [&kdf_salt[..], &password_sealed].concat(); - - // Write values to storage - // @FIXME Implement insert batch in the storage API - let (salt, public, passwd) = ( - salt_ct.set_value(&ident_salt), - public_ct.set_value(keys.public.as_ref()), - k2v.row("keys", &password_sortkey).set_value(&password_blob) - ); - try_join!(salt.push(), public.push(), passwd.push()) - .context("InsertBatch for salt, public, and password")?; - - Ok(keys) - } - - pub async fn init_without_password( - storage: &Builders, - master: &Key, - secret: &SecretKey, - ) -> Result { - // Check that salt and public don't exist already - let k2v = storage.row_store()?; - let (salt_ct, public_ct) = Self::check_uninitialized(&k2v).await?; - - // Generate salt for password identifiers - let mut ident_salt = [0u8; 32]; - thread_rng().fill(&mut ident_salt); - - // Create CryptoKeys struct from given keys - let public = secret.public_key(); - let keys = CryptoKeys { - master: master.clone(), - secret: secret.clone(), - public, - }; - - // Write values to storage - // @FIXME implement insert batch in the storage API - let (salt, public) = ( - salt_ct.set_value(&ident_salt), - public_ct.set_value(keys.public.as_ref()), - ); - - try_join!(salt.push(), public.push()).context("InsertBatch for salt and public")?; - - Ok(keys) - } - - pub async fn open( - password: &str, - root_blob: &str, - ) -> Result { - let kdf_salt = &password_blob[..32]; - let password_openned = try_open_encrypted_keys(kdf_salt, password, &password_blob[32..])?; - - let keys = Self::deserialize(&password_openned)?; - if keys.public != expected_public { - bail!("Password public key doesn't match stored public key"); - } - - Ok(keys) - - /* - let k2v = storage.row_store()?; - let (ident_salt, expected_public) = Self::load_salt_and_public(&k2v).await?; - - // Generate short password digest (= password identity) - let ident = argon2_kdf(&ident_salt, password.as_bytes(), 16)?; - - // Lookup password blob - let password_sortkey = format!("password:{}", hex::encode(&ident)); - let password_ref = k2v.row("keys", &password_sortkey); - - let password_blob = { - let val = match password_ref.fetch().await { - Err(StorageError::NotFound) => { - bail!("invalid password") - } - x => x?, - }; - if val.content().len() != 1 { - bail!("multiple values for password in storage"); - } - match val.content().pop().unwrap() { - Alternative::Value(v) => v, - Alternative::Tombstone => bail!("invalid password"), - } - }; - - // Try to open blob - let kdf_salt = &password_blob[..32]; - let password_openned = try_open_encrypted_keys(kdf_salt, password, &password_blob[32..])?; - - let keys = Self::deserialize(&password_openned)?; - if keys.public != expected_public { - bail!("Password public key doesn't match stored public key"); - } - - Ok(keys) - */ - } - - pub async fn open_without_password( - storage: &Builders, - master: &Key, - secret: &SecretKey, - ) -> Result { - let k2v = storage.row_store()?; - let (_ident_salt, expected_public) = Self::load_salt_and_public(&k2v).await?; - - // Create CryptoKeys struct from given keys - let public = secret.public_key(); - let keys = CryptoKeys { - master: master.clone(), - secret: secret.clone(), - public, - }; - - // Check public key matches - if keys.public != expected_public { - bail!("Given public key doesn't match stored public key"); } - - Ok(keys) - } - - pub async fn add_password( - &self, - storage: &Builders, - password: &str, - ) -> Result<()> { - let k2v = storage.row_store()?; - let (ident_salt, _public) = Self::load_salt_and_public(&k2v).await?; - - // Generate short password digest (= password identity) - let ident = argon2_kdf(&ident_salt, password.as_bytes(), 16)?; - - // Generate salt for KDF - let mut kdf_salt = [0u8; 32]; - thread_rng().fill(&mut kdf_salt); - - // Calculate key for password secret box - let password_key = derive_password_key(&kdf_salt, password)?; - - // Seal a secret box that contains our crypto keys - let password_sealed = seal(&self.serialize(), &password_key)?; - - let password_sortkey = format!("password:{}", hex::encode(&ident)); - let password_blob = [&kdf_salt[..], &password_sealed].concat(); - - // List existing passwords to overwrite existing entry if necessary - let pass_key = k2v.row("keys", &password_sortkey); - let passwd = match pass_key.fetch().await { - Err(StorageError::NotFound) => pass_key, - v => { - let entry = v?; - if entry.content().iter().any(|x| matches!(x, Alternative::Value(_))) { - bail!("password already exists"); - } - entry.to_ref() - } - }; - - // Write values to storage - passwd - .set_value(&password_blob) - .push() - .await - .context("InsertBatch for new password")?; - - Ok(()) - } - - pub async fn delete_password( - storage: &Builders, - password: &str, - allow_delete_all: bool, - ) -> Result<()> { - let k2v = storage.row_store()?; - let (ident_salt, _public) = Self::load_salt_and_public(&k2v).await?; - - // Generate short password digest (= password identity) - let ident = argon2_kdf(&ident_salt, password.as_bytes(), 16)?; - let password_sortkey = format!("password:{}", hex::encode(&ident)); - - // List existing passwords - let existing_passwords = Self::list_existing_passwords(&k2v).await?; - - // Check password is there - let pw = existing_passwords - .iter() - .map(|x| x.to_ref()) - .find(|x| x.key().1 == &password_sortkey) - //.get(&password_sortkey) - .ok_or(anyhow!("password does not exist"))?; - - if !allow_delete_all && existing_passwords.len() < 2 { - bail!("No other password exists, not deleting last password."); - } - - pw.rm().await.context("DeleteItem for password")?; - - Ok(()) - } - - // ---- STORAGE UTIL ---- - // - async fn check_uninitialized( - k2v: &RowStore, - ) -> Result<(RowRef, RowRef)> { - let params = k2v - .select(Selector::List(vec![ - ("keys", "salt"), - ("keys", "public"), - ])) - .await - .context("ReadBatch for salt and public in check_uninitialized")?; - - if params.len() != 2 { - bail!( - "Invalid response from k2v storage: {:?} (expected two items)", - params - ); - } - - let salt_ct = params[0].to_ref(); - if params[0].content().iter().any(|x| matches!(x, Alternative::Value(_))) { - bail!("key storage already initialized"); - } - - let public_ct = params[1].to_ref(); - if params[1].content().iter().any(|x| matches!(x, Alternative::Value(_))) { - bail!("key storage already initialized"); - } - - Ok((salt_ct, public_ct)) - } - - pub async fn load_salt_and_public(k2v: &RowStore) -> Result<([u8; 32], PublicKey)> { - let params = k2v - .select(Selector::List(vec![ - ("keys", "salt"), - ("keys", "public"), - ])) - .await - .context("ReadBatch for salt and public in load_salt_and_public")?; - - if params.len() != 2 { - bail!( - "Invalid response from k2v storage: {:?} (expected two items)", - params - ); - } - if params[0].content().len() != 1 || params[1].content().len() != 1 { - bail!("cryptographic keys not initialized for user"); - } - - // Retrieve salt from given response - let salt: Vec = match &mut params[0].content().iter_mut().next().unwrap() { - Alternative::Value(v) => std::mem::take(v), - Alternative::Tombstone => bail!("salt is a tombstone"), - }; - if salt.len() != 32 { - bail!("`salt` is not 32 bytes long"); - } - let mut salt_constlen = [0u8; 32]; - salt_constlen.copy_from_slice(&salt); - - // Retrieve public from given response - let public: Vec = match &mut params[1].content().iter_mut().next().unwrap() { - Alternative::Value(v) => std::mem::take(v), - Alternative::Tombstone => bail!("public is a tombstone"), - }; - let public = PublicKey::from_slice(&public).ok_or(anyhow!("Invalid public key length"))?; - - Ok((salt_constlen, public)) - } - - async fn list_existing_passwords(k2v: &RowStore) -> Result> { - let res = k2v.select(Selector::Prefix { shard_key: "keys", prefix: "password:" }) - .await - .context("ReadBatch for prefix password: in list_existing_passwords")?; - - Ok(res) } + // Clear text serialize/deserialize + /// Serialize the root as bytes without encryption fn serialize(&self) -> [u8; 64] { let mut res = [0u8; 64]; res[..32].copy_from_slice(self.master.as_ref()); @@ -413,6 +167,7 @@ impl CryptoKeys { res } + /// Deserialize a clear text crypto root without encryption fn deserialize(bytes: &[u8]) -> Result { if bytes.len() != 64 { bail!("Invalid length: {}, expected 64", bytes.len()); @@ -426,6 +181,31 @@ impl CryptoKeys { public, }) } + + // Password sealed keys serialize/deserialize + pub fn password_open(password: &str, blob: &[u8]) -> Result { + let kdf_salt = &blob[0..32]; + let password_openned = try_open_encrypted_keys(kdf_salt, password, &blob[32..])?; + + let keys = Self::deserialize(&password_openned)?; + Ok(keys) + } + + pub fn password_seal(&self, password: &str) -> Result> { + let mut kdf_salt = [0u8; 32]; + thread_rng().fill(&mut kdf_salt); + + // Calculate key for password secret box + let password_key = derive_password_key(&kdf_salt, password)?; + + // Seal a secret box that contains our crypto keys + let password_sealed = seal(&self.serialize(), &password_key)?; + + // Create blob + let password_blob = [&self.public[..], &kdf_salt[..], &password_sealed].concat(); + + Ok(password_blob) + } } fn derive_password_key(kdf_salt: &[u8], password: &str) -> Result { @@ -452,7 +232,7 @@ pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result { - let master_key = - Key::from_slice(&base64::decode(m)?).ok_or(anyhow!("Invalid master key"))?; - let secret_key = SecretKey::from_slice(&base64::decode(s)?) - .ok_or(anyhow!("Invalid secret key"))?; - CryptoKeys::open_without_password(&storage, &master_key, &secret_key).await? - } - CryptographyRoot::PasswordProtected { root_blob } => { - CryptoKeys::open(password, root_blob).await? - } - CryptographyRoot::Keyring => unimplemented!(), - }; + let cr = CryptoRoot(user.crypto_root); + let keys = cr.crypto_keys(password)?; tracing::debug!(user=%username, "logged"); Ok(Credentials { storage, keys }) @@ -118,8 +106,8 @@ impl LoginProvider for StaticLoginProvider { }), }; - let k2v_client = storage.row_store()?; - let (_, public_key) = CryptoKeys::load_salt_and_public(&k2v_client).await?; + let cr = CryptoRoot(user.crypto_root); + let public_key = cr.public_key()?; Ok(PublicCredentials { storage, diff --git a/src/main.rs b/src/main.rs index c252623..3b5f474 100644 --- a/src/main.rs +++ b/src/main.rs @@ -156,17 +156,6 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - tracing::debug!(user=login, "will-create"); let stp: SetupEntry = read_config(setup.clone())?; tracing::debug!(user=login, "loaded setup entry"); - let crypto_root = match root { - Command::Provider(_) => CryptographyRoot::PasswordProtected, - Command::Companion(_) => { - // @TODO use keyring by default instead of inplace in the future - // @TODO generate keys - CryptographyRoot::InPlace { - master_key: "".to_string(), - secret_key: "".to_string(), - } - } - }; let password = match stp.clear_password { Some(pwd) => pwd, @@ -179,12 +168,19 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - password } }; + + let crypto_keys = CryptoKeys::init(); + let crypto_root = match root { + Command::Provider(_) => CryptoRoot::create_pass(&password, &crypto_keys)?, + Command::Companion(_) => CryptoRoot::create_cleartext(&crypto_keys), + }; + let hash = hash_password(password.as_str()).context("unable to hash password")?; ulist.insert(login.clone(), UserEntry { email_addresses: stp.email_addresses, password: hash, - crypto_root, + crypto_root: crypto_root.0, storage: stp.storage, }); @@ -192,7 +188,7 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - }, AccountManagement::Delete { login } => { tracing::debug!(user=login, "will-delete"); - ulist.remove(&login); + ulist.remove(login); write_config(users.clone(), &ulist)?; }, AccountManagement::ChangePassword { login } => { -- cgit v1.2.3 From 29561dde41b402362f8baa3d9cd87a07f743b9fd Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 13 Dec 2023 18:04:04 +0100 Subject: CLI tools --- src/config.rs | 2 + src/login/mod.rs | 12 ++-- src/login/static_provider.rs | 4 +- src/main.rs | 142 +++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 146 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index eae50f5..e50cd68 100644 --- a/src/config.rs +++ b/src/config.rs @@ -155,6 +155,8 @@ pub fn read_config(config_file: PathBuf) -> Resu pub fn write_config(config_file: PathBuf, config: &T) -> Result<()> { let mut file = std::fs::OpenOptions::new() .write(true) + .create(true) + .truncate(true) .open(config_file.as_path())?; file.write_all(toml::to_string(config)?.as_bytes())?; diff --git a/src/login/mod.rs b/src/login/mod.rs index 3d7a49f..9e0c437 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -109,16 +109,13 @@ impl CryptoRoot { match self.0.splitn(4, ':').collect::>()[..] { [ "aero", "cryptoroot", "pass", b64blob ] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; - if blob.len() < 32 { - bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); - } - CryptoKeys::password_open(password, &blob[32..]) + CryptoKeys::password_open(password, &blob) }, [ "aero", "cryptoroot", "cleartext", b64blob ] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; CryptoKeys::deserialize(&blob) }, - [ "aero", "cryptoroot", "incoming", b64blob ] => { + [ "aero", "cryptoroot", "incoming", _ ] => { bail!("incoming cryptoroot does not contain a crypto key!") }, [ "aero", "cryptoroot", "keyring", _ ] =>{ @@ -184,8 +181,9 @@ impl CryptoKeys { // Password sealed keys serialize/deserialize pub fn password_open(password: &str, blob: &[u8]) -> Result { - let kdf_salt = &blob[0..32]; - let password_openned = try_open_encrypted_keys(kdf_salt, password, &blob[32..])?; + let _pubkey = &blob[0..32]; + let kdf_salt = &blob[32..64]; + let password_openned = try_open_encrypted_keys(kdf_salt, password, &blob[64..])?; let keys = Self::deserialize(&password_openned)?; Ok(keys) diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 178d97e..85d55ef 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -81,7 +81,7 @@ impl LoginProvider for StaticLoginProvider { }), }; - let cr = CryptoRoot(user.crypto_root); + let cr = CryptoRoot(user.crypto_root.clone()); let keys = cr.crypto_keys(password)?; tracing::debug!(user=%username, "logged"); @@ -106,7 +106,7 @@ impl LoginProvider for StaticLoginProvider { }), }; - let cr = CryptoRoot(user.crypto_root); + let cr = CryptoRoot(user.crypto_root.clone()); let public_key = cr.public_key()?; Ok(PublicCredentials { diff --git a/src/main.rs b/src/main.rs index 3b5f474..2beaf21 100644 --- a/src/main.rs +++ b/src/main.rs @@ -34,13 +34,53 @@ struct Args { #[derive(Subcommand, Debug)] enum Command { #[clap(subcommand)] + /// A daemon to be run by the end user, on a personal device Companion(CompanionCommand), #[clap(subcommand)] + /// A daemon to be run by the service provider, on a server Provider(ProviderCommand), + + #[clap(subcommand)] + /// Specific tooling, should not be part of a normal workflow, for debug & experimenting only + Tools(ToolsCommand), //Test, } +#[derive(Subcommand, Debug)] +enum ToolsCommand { + /// Manage crypto roots + #[clap(subcommand)] + CryptoRoot(CryptoRootCommand), +} + +#[derive(Subcommand, Debug)] +enum CryptoRootCommand { + /// Generate a new crypto-root protected with a password + New { + #[clap(env = "AEROGRAMME_PASSWORD")] + maybe_password: Option, + }, + /// Generate a new clear text crypto-root, store it securely! + NewClearText, + /// Change the password of a crypto key + ChangePassword { + #[clap(env = "AEROGRAMME_OLD_PASSWORD")] + maybe_old_password: Option, + + #[clap(env = "AEROGRAMME_NEW_PASSWORD")] + maybe_new_password: Option, + + #[clap(short, long, env = "AEROGRAMME_CRYPTO_ROOT")] + crypto_root: String, + }, + /// From a given crypto-key, derive one containing only the public key + DeriveIncoming { + #[clap(short, long, env = "AEROGRAMME_CRYPTO_ROOT")] + crypto_root: String, + }, +} + #[derive(Subcommand, Debug)] enum CompanionCommand { /// Runs the IMAP proxy @@ -81,6 +121,12 @@ enum AccountManagement { }, /// Change password for a given account ChangePassword { + #[clap(env = "AEROGRAMME_OLD_PASSWORD")] + maybe_old_password: Option, + + #[clap(env = "AEROGRAMME_NEW_PASSWORD")] + maybe_new_password: Option, + #[clap(short, long)] login: String }, @@ -110,7 +156,7 @@ async fn main() -> Result<()> { let server = Server::from_companion_config(config).await?; server.run().await?; }, - CompanionCommand::Reload { pid } => { + CompanionCommand::Reload { pid: _pid } => { unimplemented!(); }, CompanionCommand::Wizard => { @@ -143,18 +189,72 @@ async fn main() -> Result<()> { (Command::Companion(_), AnyConfig::Provider(_)) => { panic!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); }, + (Command::Tools(subcommand), _) => match subcommand { + ToolsCommand::CryptoRoot(crcommand) => { + match crcommand { + CryptoRootCommand::New { maybe_password } => { + let password = match maybe_password { + Some(pwd) => pwd.clone(), + None => { + let password = rpassword::prompt_password("Enter password: ")?; + let password_confirm = rpassword::prompt_password("Confirm password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + password + } + }; + let crypto_keys = CryptoKeys::init(); + let cr = CryptoRoot::create_pass(&password, &crypto_keys)?; + println!("{}", cr.0); + }, + CryptoRootCommand::NewClearText => { + let crypto_keys = CryptoKeys::init(); + let cr = CryptoRoot::create_cleartext(&crypto_keys); + println!("{}", cr.0); + }, + CryptoRootCommand::ChangePassword { maybe_old_password, maybe_new_password, crypto_root } => { + let old_password = match maybe_old_password { + Some(pwd) => pwd.to_string(), + None => rpassword::prompt_password("Enter old password: ")?, + }; + + let new_password = match maybe_new_password { + Some(pwd) => pwd.to_string(), + None => { + let password = rpassword::prompt_password("Enter new password: ")?; + let password_confirm = rpassword::prompt_password("Confirm new password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + password + } + }; + + let keys = CryptoRoot(crypto_root.to_string()).crypto_keys(&old_password)?; + let cr = CryptoRoot::create_pass(&new_password, &keys)?; + println!("{}", cr.0); + }, + CryptoRootCommand::DeriveIncoming { crypto_root } => { + let pubkey = CryptoRoot(crypto_root.to_string()).public_key()?; + let cr = CryptoRoot::create_incoming(&pubkey); + println!("{}", cr.0); + }, + } + }, + } } Ok(()) } fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) -> Result<()> { - let mut ulist: UserList = read_config(users.clone())?; + let mut ulist: UserList = read_config(users.clone()).context(format!("'{:?}' must be a user database", users))?; match cmd { AccountManagement::Add { login, setup } => { tracing::debug!(user=login, "will-create"); - let stp: SetupEntry = read_config(setup.clone())?; + let stp: SetupEntry = read_config(setup.clone()).context(format!("'{:?}' must be a setup file", setup))?; tracing::debug!(user=login, "loaded setup entry"); let password = match stp.clear_password { @@ -173,6 +273,7 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - let crypto_root = match root { Command::Provider(_) => CryptoRoot::create_pass(&password, &crypto_keys)?, Command::Companion(_) => CryptoRoot::create_cleartext(&crypto_keys), + _ => unreachable!(), }; let hash = hash_password(password.as_str()).context("unable to hash password")?; @@ -191,8 +292,39 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - ulist.remove(login); write_config(users.clone(), &ulist)?; }, - AccountManagement::ChangePassword { login } => { - unimplemented!(); + AccountManagement::ChangePassword { maybe_old_password, maybe_new_password, login } => { + let mut user = ulist.remove(login).context("user must exist first")?; + + let old_password = match maybe_old_password { + Some(pwd) => pwd.to_string(), + None => rpassword::prompt_password("Enter old password: ")?, + }; + + if !verify_password(&old_password, &user.password)? { + bail!(format!("invalid password for login {}", login)); + } + + let crypto_keys = CryptoRoot(user.crypto_root).crypto_keys(&old_password)?; + + let new_password = match maybe_new_password { + Some(pwd) => pwd.to_string(), + None => { + let password = rpassword::prompt_password("Enter new password: ")?; + let password_confirm = rpassword::prompt_password("Confirm new password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); + } + password + } + }; + let new_hash = hash_password(&new_password)?; + let new_crypto_root = CryptoRoot::create_pass(&new_password, &crypto_keys)?; + + user.password = new_hash; + user.crypto_root = new_crypto_root.0; + + ulist.insert(login.clone(), user); + write_config(users.clone(), &ulist)?; }, }; -- cgit v1.2.3 From 02626865bf0f076e416db2121b44d7fcad067e29 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 13 Dec 2023 18:06:18 +0100 Subject: use bail! instead of panic! --- src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/main.rs b/src/main.rs index 2beaf21..c854340 100644 --- a/src/main.rs +++ b/src/main.rs @@ -184,10 +184,10 @@ async fn main() -> Result<()> { } }, (Command::Provider(_), AnyConfig::Companion(_)) => { - panic!("Your want to run a 'Provider' command but your configuration file has role 'Companion'."); + bail!("Your want to run a 'Provider' command but your configuration file has role 'Companion'."); }, (Command::Companion(_), AnyConfig::Provider(_)) => { - panic!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); + bail!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); }, (Command::Tools(subcommand), _) => match subcommand { ToolsCommand::CryptoRoot(crcommand) => { -- cgit v1.2.3 From 65f4ceae7835a1bed8ff65ecbc4933b4b3553c84 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 14 Dec 2023 11:30:11 +0100 Subject: add a password hash tool --- src/main.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'src') diff --git a/src/main.rs b/src/main.rs index c854340..3d87d11 100644 --- a/src/main.rs +++ b/src/main.rs @@ -52,6 +52,11 @@ enum ToolsCommand { /// Manage crypto roots #[clap(subcommand)] CryptoRoot(CryptoRootCommand), + + PasswordHash { + #[clap(env = "AEROGRAMME_PASSWORD")] + maybe_password: Option, + } } #[derive(Subcommand, Debug)] @@ -190,6 +195,13 @@ async fn main() -> Result<()> { bail!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); }, (Command::Tools(subcommand), _) => match subcommand { + ToolsCommand::PasswordHash { maybe_password } => { + let password = match maybe_password { + Some(pwd) => pwd.clone(), + None => rpassword::prompt_password("Enter password: ")?, + }; + println!("{}", hash_password(&password)?); + }, ToolsCommand::CryptoRoot(crcommand) => { match crcommand { CryptoRootCommand::New { maybe_password } => { -- cgit v1.2.3 From 1f6e64d34e44b8b7bc7247af38bccf3ade86cf0b Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 14 Dec 2023 13:03:04 +0100 Subject: add support for hot reloading --- src/login/static_provider.rs | 73 ++++++++++++++++++++++++++------------------ src/main.rs | 2 +- src/server.rs | 8 ++--- 3 files changed, 49 insertions(+), 34 deletions(-) (limited to 'src') diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 85d55ef..4a8d484 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -1,6 +1,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::path::PathBuf; +use tokio::sync::watch; +use tokio::signal::unix::{signal, SignalKind}; use anyhow::{anyhow, bail, Result}; use async_trait::async_trait; @@ -9,48 +11,59 @@ use crate::config::*; use crate::login::*; use crate::storage; -pub struct StaticLoginProvider { - user_list: PathBuf, +#[derive(Default)] +pub struct UserDatabase { users: HashMap>, users_by_email: HashMap>, } -impl StaticLoginProvider { - pub fn new(config: LoginStaticConfig) -> Result { - let mut lp = Self { - user_list: config.user_list.clone(), - users: HashMap::new(), - users_by_email: HashMap::new(), - }; - - lp - .update_user_list() - .context( - format!( - "failed to read {:?}, make sure it exists and it's correctly formatted", - config.user_list))?; +pub struct StaticLoginProvider { + user_db: watch::Receiver, +} - Ok(lp) - } +pub async fn update_user_list(config: PathBuf, up: watch::Sender) -> Result<()> { + let mut stream = signal(SignalKind::user_defined1()).expect("failed to install SIGUSR1 signal hander for reload"); - pub fn update_user_list(&mut self) -> Result<()> { - let ulist: UserList = read_config(self.user_list.clone())?; + loop { + let ulist: UserList = match read_config(config.clone()) { + Ok(x) => x, + Err(e) => { + tracing::warn!(path=%config.as_path().to_string_lossy(), error=%e, "Unable to load config"); + continue; + } + }; - self.users = ulist + let users = ulist .into_iter() .map(|(k, v)| (k, Arc::new(v))) .collect::>(); - self.users_by_email.clear(); - for (_, u) in self.users.iter() { + let mut users_by_email = HashMap::new(); + for (_, u) in users.iter() { for m in u.email_addresses.iter() { - if self.users_by_email.contains_key(m) { - bail!("Several users have same email address: {}", m); + if users_by_email.contains_key(m) { + tracing::warn!("Several users have the same email address: {}", m); + continue } - self.users_by_email.insert(m.clone(), u.clone()); + users_by_email.insert(m.clone(), u.clone()); } } - Ok(()) + + tracing::info!("{} users loaded", users.len()); + up.send(UserDatabase { users, users_by_email }).context("update user db config")?; + stream.recv().await; + tracing::info!("Received SIGUSR1, reloading"); + } +} + +impl StaticLoginProvider { + pub async fn new(config: LoginStaticConfig) -> Result { + let (tx, mut rx) = watch::channel(UserDatabase::default()); + + tokio::spawn(update_user_list(config.user_list, tx)); + rx.changed().await?; + + Ok(Self { user_db: rx }) } } @@ -58,7 +71,8 @@ impl StaticLoginProvider { impl LoginProvider for StaticLoginProvider { async fn login(&self, username: &str, password: &str) -> Result { tracing::debug!(user=%username, "login"); - let user = match self.users.get(username) { + let user_db = self.user_db.borrow(); + let user = match user_db.users.get(username) { None => bail!("User {} does not exist", username), Some(u) => u, }; @@ -89,7 +103,8 @@ impl LoginProvider for StaticLoginProvider { } async fn public_login(&self, email: &str) -> Result { - let user = match self.users_by_email.get(email) { + let user_db = self.user_db.borrow(); + let user = match user_db.users_by_email.get(email) { None => bail!("No user for email address {}", email), Some(u) => u, }; diff --git a/src/main.rs b/src/main.rs index 3d87d11..02ba5e4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -42,7 +42,7 @@ enum Command { Provider(ProviderCommand), #[clap(subcommand)] - /// Specific tooling, should not be part of a normal workflow, for debug & experimenting only + /// Specific tooling, should not be part of a normal workflow, for debug & experimentation only Tools(ToolsCommand), //Test, } diff --git a/src/server.rs b/src/server.rs index 2321da8..8abdb86 100644 --- a/src/server.rs +++ b/src/server.rs @@ -18,21 +18,21 @@ pub struct Server { impl Server { pub async fn from_companion_config(config: CompanionConfig) -> Result { - let login = Arc::new(StaticLoginProvider::new(config.users)?); + let login = Arc::new(StaticLoginProvider::new(config.users).await?); let lmtp_server = None; - let imap_server = Some(imap::new(config.imap, login).await?); + let imap_server = Some(imap::new(config.imap, login.clone()).await?); Ok(Self { lmtp_server, imap_server }) } pub async fn from_provider_config(config: ProviderConfig) -> Result { let login: ArcLoginProvider = match config.users { - UserManagement::Static(x) => Arc::new(StaticLoginProvider::new(x)?), + UserManagement::Static(x) => Arc::new(StaticLoginProvider::new(x).await?), UserManagement::Ldap(x) => Arc::new(LdapLoginProvider::new(x)?), }; let lmtp_server = Some(LmtpServer::new(config.lmtp, login.clone())); - let imap_server = Some(imap::new(config.imap, login).await?); + let imap_server = Some(imap::new(config.imap, login.clone()).await?); Ok(Self { lmtp_server, imap_server }) } -- cgit v1.2.3 From 1b5f2eb695d658c57ba9c4264e76ca13bd82a958 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 14 Dec 2023 15:36:54 +0100 Subject: implement the reload feature --- src/config.rs | 4 ++-- src/main.rs | 33 +++++++++++++++++++++++++-------- src/server.rs | 24 +++++++++++++++++++++--- 3 files changed, 48 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/config.rs b/src/config.rs index e50cd68..1438910 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CompanionConfig { - pub pid: Option, + pub pid: Option, pub imap: ImapConfig, #[serde(flatten)] @@ -17,7 +17,7 @@ pub struct CompanionConfig { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ProviderConfig { - pub pid: Option, + pub pid: Option, pub imap: ImapConfig, pub lmtp: LmtpConfig, pub users: UserManagement, diff --git a/src/main.rs b/src/main.rs index 02ba5e4..f08f1a3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -13,9 +13,11 @@ mod server; mod storage; use std::path::PathBuf; +use std::io::Read; use anyhow::{bail, Result, Context}; use clap::{Parser, Subcommand}; +use nix::{unistd::Pid, sys::signal}; use config::*; use server::Server; @@ -92,7 +94,7 @@ enum CompanionCommand { Daemon, Reload { #[clap(short, long, env = "AEROGRAMME_PID")] - pid: Option, + pid: Option, }, Wizard, #[clap(subcommand)] @@ -104,7 +106,10 @@ enum ProviderCommand { /// Runs the IMAP+LMTP server daemon Daemon, /// Reload the daemon - Reload, + Reload { + #[clap(short, long, env = "AEROGRAMME_PID")] + pid: Option, + }, /// Manage static accounts #[clap(subcommand)] Account(AccountManagement), @@ -161,9 +166,7 @@ async fn main() -> Result<()> { let server = Server::from_companion_config(config).await?; server.run().await?; }, - CompanionCommand::Reload { pid: _pid } => { - unimplemented!(); - }, + CompanionCommand::Reload { pid } => reload(*pid, config.pid)?, CompanionCommand::Wizard => { unimplemented!(); }, @@ -177,9 +180,7 @@ async fn main() -> Result<()> { let server = Server::from_provider_config(config).await?; server.run().await?; }, - ProviderCommand::Reload => { - unimplemented!(); - }, + ProviderCommand::Reload { pid } => reload(*pid, config.pid)?, ProviderCommand::Account(cmd) => { let user_file = match config.users { UserManagement::Static(conf) => conf.user_list, @@ -260,6 +261,22 @@ async fn main() -> Result<()> { Ok(()) } +fn reload(pid: Option, pid_path: Option) -> Result<()> { + let final_pid = match (pid, pid_path) { + (Some(pid), _) => pid, + (_, Some(path)) => { + let mut f = std::fs::OpenOptions::new().read(true).open(path)?; + let mut pidstr = String::new(); + f.read_to_string(&mut pidstr)?; + pidstr.parse::()? + }, + _ => bail!("Unable to infer your daemon's PID"), + }; + let pid = Pid::from_raw(final_pid); + signal::kill(pid, signal::Signal::SIGUSR1)?; + Ok(()) +} + fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) -> Result<()> { let mut ulist: UserList = read_config(users.clone()).context(format!("'{:?}' must be a user database", users))?; diff --git a/src/server.rs b/src/server.rs index 8abdb86..552a0e6 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,4 +1,6 @@ use std::sync::Arc; +use std::path::PathBuf; +use std::io::Write; use anyhow::Result; use futures::try_join; @@ -14,18 +16,21 @@ use crate::login::{ldap_provider::*, static_provider::*}; pub struct Server { lmtp_server: Option>, imap_server: Option, + pid_file: Option, } impl Server { pub async fn from_companion_config(config: CompanionConfig) -> Result { + tracing::info!("Init as companion"); let login = Arc::new(StaticLoginProvider::new(config.users).await?); let lmtp_server = None; let imap_server = Some(imap::new(config.imap, login.clone()).await?); - Ok(Self { lmtp_server, imap_server }) + Ok(Self { lmtp_server, imap_server, pid_file: config.pid }) } pub async fn from_provider_config(config: ProviderConfig) -> Result { + tracing::info!("Init as provider"); let login: ArcLoginProvider = match config.users { UserManagement::Static(x) => Arc::new(StaticLoginProvider::new(x).await?), UserManagement::Ldap(x) => Arc::new(LdapLoginProvider::new(x)?), @@ -34,11 +39,24 @@ impl Server { let lmtp_server = Some(LmtpServer::new(config.lmtp, login.clone())); let imap_server = Some(imap::new(config.imap, login.clone()).await?); - Ok(Self { lmtp_server, imap_server }) + Ok(Self { lmtp_server, imap_server, pid_file: config.pid }) } pub async fn run(self) -> Result<()> { - tracing::info!("Starting Aerogramme..."); + let pid = std::process::id(); + tracing::info!(pid=pid, "Starting main loops"); + + // write the pid file + if let Some(pid_file) = self.pid_file { + let mut file = std::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(pid_file)?; + file.write_all(pid.to_string().as_bytes())?; + drop(file); + } + let (exit_signal, provoke_exit) = watch_ctrl_c(); let _exit_on_err = move |err: anyhow::Error| { -- cgit v1.2.3 From 684f4de225c44464abcb6a9cb2ef6dcae90537a8 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Sat, 16 Dec 2023 11:13:32 +0100 Subject: new new new storage interface --- src/login/ldap_provider.rs | 4 +- src/login/static_provider.rs | 27 ++++--- src/storage/garage.rs | 118 ++++------------------------- src/storage/in_memory.rs | 177 ++++++++++++++++++++++++++----------------- src/storage/mod.rs | 95 ++++++++++++++++++----- 5 files changed, 219 insertions(+), 202 deletions(-) (limited to 'src') diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index 6e94061..4e3af3c 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -87,7 +87,9 @@ impl LdapLoginProvider { fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { let storage: Builders = match &self.storage_specific { - StorageSpecific::InMemory => Box::new(storage::in_memory::FullMem {}), + StorageSpecific::InMemory => Box::new(storage::in_memory::FullMem::new( + &get_attr(user, &self.username_attr)? + )), StorageSpecific::Garage { from_config, bucket_source } => { let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?; let aws_secret_access_key = get_attr(user, &from_config.aws_secret_access_key_attr)?; diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 4a8d484..788a4c5 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -11,10 +11,15 @@ use crate::config::*; use crate::login::*; use crate::storage; +pub struct ContextualUserEntry { + pub username: String, + pub config: UserEntry, +} + #[derive(Default)] pub struct UserDatabase { - users: HashMap>, - users_by_email: HashMap>, + users: HashMap>, + users_by_email: HashMap>, } pub struct StaticLoginProvider { @@ -35,12 +40,12 @@ pub async fn update_user_list(config: PathBuf, up: watch::Sender) let users = ulist .into_iter() - .map(|(k, v)| (k, Arc::new(v))) + .map(|(username, config)| (username.clone() , Arc::new(ContextualUserEntry { username, config }))) .collect::>(); let mut users_by_email = HashMap::new(); for (_, u) in users.iter() { - for m in u.email_addresses.iter() { + for m in u.config.email_addresses.iter() { if users_by_email.contains_key(m) { tracing::warn!("Several users have the same email address: {}", m); continue @@ -78,13 +83,13 @@ impl LoginProvider for StaticLoginProvider { }; tracing::debug!(user=%username, "verify password"); - if !verify_password(password, &user.password)? { + if !verify_password(password, &user.config.password)? { bail!("Wrong password"); } tracing::debug!(user=%username, "fetch keys"); - let storage: storage::Builders = match &user.storage { - StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), + let storage: storage::Builders = match &user.config.storage { + StaticStorage::InMemory => Box::new(storage::in_memory::FullMem::new(username)), StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), @@ -95,7 +100,7 @@ impl LoginProvider for StaticLoginProvider { }), }; - let cr = CryptoRoot(user.crypto_root.clone()); + let cr = CryptoRoot(user.config.crypto_root.clone()); let keys = cr.crypto_keys(password)?; tracing::debug!(user=%username, "logged"); @@ -109,8 +114,8 @@ impl LoginProvider for StaticLoginProvider { Some(u) => u, }; - let storage: storage::Builders = match &user.storage { - StaticStorage::InMemory => Box::new(storage::in_memory::FullMem {}), + let storage: storage::Builders = match &user.config.storage { + StaticStorage::InMemory => Box::new(storage::in_memory::FullMem::new(&user.username)), StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), @@ -121,7 +126,7 @@ impl LoginProvider for StaticLoginProvider { }), }; - let cr = CryptoRoot(user.crypto_root.clone()); + let cr = CryptoRoot(user.config.crypto_root.clone()); let public_key = cr.public_key()?; Ok(PublicCredentials { diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 052e812..8276f70 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,7 +1,7 @@ use crate::storage::*; #[derive(Clone, Debug, Hash)] -pub struct GrgCreds { +pub struct GarageBuilder { pub region: String, pub s3_endpoint: String, pub k2v_endpoint: String, @@ -9,133 +9,47 @@ pub struct GrgCreds { pub aws_secret_access_key: String, pub bucket: String, } -pub struct GrgStore {} -pub struct GrgRef {} -pub struct GrgValue {} -#[derive(Clone, Debug, PartialEq)] -pub struct GrgOrphanRowRef {} - -impl IBuilders for GrgCreds { - fn row_store(&self) -> Result { - unimplemented!(); - } - - fn blob_store(&self) -> Result { +impl IBuilder for GarageBuilder { + fn build(&self) -> Box { unimplemented!(); } +} - fn url(&self) -> &str { - return "grg://unimplemented;" - } +pub struct GarageStore { + dummy: String, } -impl IRowStore for GrgStore { - fn row(&self, partition: &str, sort: &str) -> RowRef { +#[async_trait] +impl IStore for GarageStore { + async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { unimplemented!(); } - - fn select(&self, selector: Selector) -> AsyncResult> { + async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { unimplemented!(); } - fn rm(&self, selector: Selector) -> AsyncResult<()> { + async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { unimplemented!(); - } - fn from_orphan(&self, orphan: OrphanRowRef) -> Result { - unimplemented!(); } -} - -impl IRowRef for GrgRef { - /*fn clone_boxed(&self) -> RowRef { + async fn row_poll(&self, value: RowRef) -> Result { unimplemented!(); - }*/ - fn to_orphan(&self) -> OrphanRowRef { - unimplemented!() } - fn key(&self) -> (&str, &str) { + async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { unimplemented!(); - } - fn set_value(&self, content: &[u8]) -> RowValue { - unimplemented!(); - } - fn fetch(&self) -> AsyncResult { - unimplemented!(); } - fn rm(&self) -> AsyncResult<()> { + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { unimplemented!(); - } - fn poll(&self) -> AsyncResult { - unimplemented!(); - } -} -impl std::fmt::Debug for GrgRef { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - unimplemented!(); } -} - -impl IRowValue for GrgValue { - fn to_ref(&self) -> RowRef { + async fn blob_list(&self, prefix: &str) -> Result, StorageError> { unimplemented!(); } - fn content(&self) -> ConcurrentValues { + async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { unimplemented!(); } - fn push(&self) -> AsyncResult<()> { - unimplemented!(); - } -} - -impl std::fmt::Debug for GrgValue { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - unimplemented!(); - } -} - - -/* -/// A custom S3 region, composed of a region name and endpoint. -/// We use this instead of rusoto_signature::Region so that we can -/// derive Hash and Eq - - -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct Region { - pub name: String, - pub endpoint: String, -} - -impl Region { - pub fn as_rusoto_region(&self) -> rusoto_signature::Region { - rusoto_signature::Region::Custom { - name: self.name.clone(), - endpoint: self.endpoint.clone(), - } - } } -*/ - -/* -pub struct Garage { - pub s3_region: Region, - pub k2v_region: Region, - - pub aws_access_key_id: String, - pub aws_secret_access_key: String, - pub bucket: String, -} - -impl StoreBuilder<> for Garage { - fn row_store(&self) -> -} - -pub struct K2V {} -impl RowStore for K2V { -}*/ diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 5ba9461..09c6763 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,97 +1,134 @@ -use futures::FutureExt; use crate::storage::*; - -#[derive(Clone, Debug, Hash)] -pub struct FullMem {} -pub struct MemStore {} -pub struct MemRef {} -pub struct MemValue {} - -#[derive(Clone, Debug, PartialEq)] -pub struct MemOrphanRowRef {} - -impl IBuilders for FullMem { - fn row_store(&self) -> Result { - unimplemented!(); - } - - fn blob_store(&self) -> Result { - unimplemented!(); - } - - fn url(&self) -> &str { - return "mem://unimplemented;" - } +use std::collections::{HashMap, BTreeMap}; +use std::ops::Bound::{Included, Unbounded, Excluded}; +use std::sync::{Arc, RwLock}; + +/// This implementation is very inneficient, and not completely correct +/// Indeed, when the connector is dropped, the memory is freed. +/// It means that when a user disconnects, its data are lost. +/// It's intended only for basic debugging, do not use it for advanced tests... + +pub type ArcRow = Arc>>>>; +pub type ArcBlob = Arc>>>; + +#[derive(Clone, Debug)] +pub struct MemBuilder { + user: String, + url: String, + row: ArcRow, + blob: ArcBlob, } -impl IRowStore for MemStore { - fn row(&self, partition: &str, sort: &str) -> RowRef { - unimplemented!(); - } - - fn select(&self, selector: Selector) -> AsyncResult> { - unimplemented!() - } - - fn rm(&self, selector: Selector) -> AsyncResult<()> { - unimplemented!(); - } +impl IBuilder for MemBuilder { + fn build(&self) -> Box { + Box::new(MemStore { + row: self.row.clone(), + blob: self.blob.clone(), + }) + } +} - fn from_orphan(&self, orphan: OrphanRowRef) -> Result { - unimplemented!(); - } +pub struct MemStore { + row: ArcRow, + blob: ArcBlob, } -impl IRowRef for MemRef { - fn to_orphan(&self) -> OrphanRowRef { - unimplemented!() +impl MemStore { + fn inner_fetch(&self, row_ref: &RowRef) -> Result, StorageError> { + Ok(self.row + .read() + .or(Err(StorageError::Internal))? + .get(&row_ref.uid.shard) + .ok_or(StorageError::NotFound)? + .get(&row_ref.uid.sort) + .ok_or(StorageError::NotFound)? + .clone()) } +} - fn key(&self) -> (&str, &str) { +#[async_trait] +impl IStore for MemStore { + async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { + match select { + Selector::Range { shard, sort_begin, sort_end } => { + Ok(self.row + .read() + .or(Err(StorageError::Internal))? + .get(*shard) + .ok_or(StorageError::NotFound)? + .range((Included(sort_begin.to_string()), Included(sort_end.to_string()))) + .map(|(k, v)| RowVal { + row_ref: RowRef { uid: RowUid { shard: shard.to_string(), sort: k.to_string() }, causality: Some("c".to_string()) }, + value: vec![Alternative::Value(v.clone())], + }) + .collect::>()) + }, + Selector::List(rlist) => { + let mut acc = vec![]; + for row_ref in rlist { + let bytes = self.inner_fetch(row_ref)?; + let row_val = RowVal { + row_ref: row_ref.clone(), + value: vec![Alternative::Value(bytes)] + }; + acc.push(row_val); + } + Ok(acc) + }, + Selector::Prefix { shard, sort_prefix } => { + let mut sort_end = sort_prefix.to_string(); + let last_bound = match sort_end.pop() { + None => Unbounded, + Some(ch) => { + let nc = char::from_u32(ch as u32 + 1).unwrap(); + sort_end.push(nc); + Excluded(sort_end) + } + }; + + Ok(self.row + .read() + .or(Err(StorageError::Internal))? + .get(*shard) + .ok_or(StorageError::NotFound)? + .range((Included(sort_prefix.to_string()), last_bound)) + .map(|(k, v)| RowVal { + row_ref: RowRef { uid: RowUid { shard: shard.to_string(), sort: k.to_string() }, causality: Some("c".to_string()) }, + value: vec![Alternative::Value(v.clone())], + }) + .collect::>()) + }, + Selector::Single(row_ref) => { + let bytes = self.inner_fetch(row_ref)?; + Ok(vec![RowVal{ row_ref: row_ref.clone(), value: vec![Alternative::Value(bytes)]}]) + } + } + } + + async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { unimplemented!(); } - /*fn clone_boxed(&self) -> RowRef { + async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { unimplemented!(); - }*/ - fn set_value(&self, content: &[u8]) -> RowValue { - unimplemented!(); - } - fn fetch(&self) -> AsyncResult { - unimplemented!(); } - fn rm(&self) -> AsyncResult<()> { + async fn row_poll(&self, value: RowRef) -> Result { unimplemented!(); } - fn poll(&self) -> AsyncResult { - async { - let rv: RowValue = Box::new(MemValue{}); - Ok(rv) - }.boxed() - } -} -impl std::fmt::Debug for MemRef { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { unimplemented!(); - } -} -impl IRowValue for MemValue { - fn to_ref(&self) -> RowRef { - unimplemented!(); } - fn content(&self) -> ConcurrentValues { + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { unimplemented!(); + } - fn push(&self) -> AsyncResult<()> { + async fn blob_list(&self, prefix: &str) -> Result, StorageError> { unimplemented!(); } -} - -impl std::fmt::Debug for MemValue { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { unimplemented!(); } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c002278..cb66d58 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -8,39 +8,97 @@ * into the object system so it is not exposed. */ -use std::hash::{Hash, Hasher}; -use futures::future::BoxFuture; - pub mod in_memory; pub mod garage; +use std::hash::{Hash, Hasher}; +use std::collections::HashMap; +use futures::future::BoxFuture; +use async_trait::async_trait; + +#[derive(Debug, Clone)] pub enum Alternative { Tombstone, Value(Vec), } type ConcurrentValues = Vec; +#[derive(Debug)] +pub enum StorageError { + NotFound, + Internal, +} + +#[derive(Debug, Clone)] +pub struct RowUid { + shard: String, + sort: String, +} + +#[derive(Debug, Clone)] +pub struct RowRef { + uid: RowUid, + causality: Option, +} + +#[derive(Debug, Clone)] +pub struct RowVal { + row_ref: RowRef, + value: ConcurrentValues, +} + +#[derive(Debug, Clone)] +pub struct BlobRef(String); + +#[derive(Debug, Clone)] +pub struct BlobVal { + blob_ref: BlobRef, + meta: HashMap, + value: Vec, +} + +pub enum Selector<'a> { + Range { shard: &'a str, sort_begin: &'a str, sort_end: &'a str }, + List (Vec), // list of (shard_key, sort_key) + Prefix { shard: &'a str, sort_prefix: &'a str }, + Single(RowRef), +} + +#[async_trait] +pub trait IStore { + async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError>; + async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError>; + async fn row_insert(&self, values: Vec) -> Result<(), StorageError>; + async fn row_poll(&self, value: RowRef) -> Result; + + async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result; + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result; + async fn blob_list(&self, prefix: &str) -> Result, StorageError>; + async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>; +} + +pub trait IBuilder { + fn build(&self) -> Box; +} + + + + + + +/* #[derive(Clone, Debug, PartialEq)] pub enum OrphanRowRef { Garage(garage::GrgOrphanRowRef), Memory(in_memory::MemOrphanRowRef), } -pub enum Selector<'a> { - Range { shard_key: &'a str, begin: &'a str, end: &'a str }, - List (Vec<(&'a str, &'a str)>), // list of (shard_key, sort_key) - Prefix { shard_key: &'a str, prefix: &'a str }, -} -#[derive(Debug)] -pub enum StorageError { - NotFound, - Internal, - IncompatibleOrphan, -} + + impl std::fmt::Display for StorageError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("Storage Error: "); + f.write_str("Storage Error: ")?; match self { Self::NotFound => f.write_str("Item not found"), Self::Internal => f.write_str("An internal error occured"), @@ -55,6 +113,7 @@ pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; // ----- Builders pub trait IBuilders { + fn box_clone(&self) -> Builders; fn row_store(&self) -> Result; fn blob_store(&self) -> Result; fn url(&self) -> &str; @@ -62,8 +121,7 @@ pub trait IBuilders { pub type Builders = Box; impl Clone for Builders { fn clone(&self) -> Self { - // @FIXME write a real implementation with a box_clone function - Box::new(in_memory::FullMem{}) + self.box_clone() } } impl std::fmt::Debug for Builders { @@ -102,7 +160,7 @@ pub trait IRowRef: std::fmt::Debug fn rm(&self) -> AsyncResult<()>; fn poll(&self) -> AsyncResult; } -pub type RowRef = Box; +pub type RowRef<'a> = Box; pub trait IRowValue: std::fmt::Debug { @@ -138,3 +196,4 @@ pub trait IBlobValue { fn push(&self) -> AsyncResult<()>; } pub type BlobValue = Box; +*/ -- cgit v1.2.3 From 3d41f40dc8cd6bdfa7a9279ab1959564d06eefaf Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Mon, 18 Dec 2023 17:09:44 +0100 Subject: Storage trait new implementation --- src/bayou.rs | 134 +++++++++++---------------- src/login/ldap_provider.rs | 14 +-- src/login/mod.rs | 12 +-- src/login/static_provider.rs | 16 ++-- src/mail/incoming.rs | 103 ++++++++++----------- src/mail/mailbox.rs | 59 ++++++------ src/mail/user.rs | 68 +++++--------- src/storage/garage.rs | 30 ++++++- src/storage/in_memory.rs | 35 ++++++-- src/storage/mod.rs | 209 ++++++++++++++++--------------------------- 10 files changed, 311 insertions(+), 369 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 3042f94..afe3c75 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -45,8 +45,7 @@ pub struct Bayou { path: String, key: Key, - k2v: storage::RowStore, - s3: storage::BlobStore, + storage: storage::Store, checkpoint: (Timestamp, S), history: Vec<(Timestamp, S::Op, Option)>, @@ -60,17 +59,16 @@ pub struct Bayou { impl Bayou { pub fn new(creds: &Credentials, path: String) -> Result { - let k2v_client = creds.row_client()?; - let s3_client = creds.blob_client()?; + let storage = creds.storage.build()?; - let target = k2v_client.row(&path, WATCH_SK); - let watch = K2vWatch::new(creds, path.clone(), WATCH_SK.to_string())?; + //let target = k2v_client.row(&path, WATCH_SK); + let target = storage::RowRef::new(&path, WATCH_SK); + let watch = K2vWatch::new(creds, target.clone())?; Ok(Self { path, + storage, key: creds.keys.master.clone(), - k2v: k2v_client, - s3: s3_client, checkpoint: (Timestamp::zero(), S::default()), history: vec![], last_sync: None, @@ -96,9 +94,7 @@ impl Bayou { } else { debug!("(sync) loading checkpoint: {}", key); - let obj_res = self.s3.blob(key).fetch().await?; - let buf = obj_res.content().ok_or(anyhow!("object can't be empty"))?; - + let buf = self.storage.blob_fetch(&storage::BlobRef(key.to_string())).await?.value; debug!("(sync) checkpoint body length: {}", buf.len()); let ck = open_deserialize::(&buf, &self.key)?; @@ -129,42 +125,26 @@ impl Bayou { // 3. List all operations starting from checkpoint let ts_ser = self.checkpoint.0.to_string(); debug!("(sync) looking up operations starting at {}", ts_ser); - let ops_map = self.k2v.select(storage::Selector::Range { shard_key: &self.path, begin: &ts_ser, end: WATCH_SK }).await?; - /*let ops_map = self - .k2v - .read_batch(&[BatchReadOp { - partition_key: &self.path, - filter: Filter { - start: Some(&ts_ser), - end: Some(WATCH_SK), - prefix: None, - limit: None, - reverse: false, - }, - single_item: false, - conflicts_only: false, - tombstones: false, - }]) - .await? - .into_iter() - .next() - .ok_or(anyhow!("Missing K2V result"))? - .items;*/ + let ops_map = self.storage.row_fetch(&storage::Selector::Range { + shard: &self.path, + sort_begin: &ts_ser, + sort_end: WATCH_SK + }).await?; let mut ops = vec![]; for row_value in ops_map { - let row = row_value.to_ref(); - let sort_key = row.key().1; + let row = row_value.row_ref; + let sort_key = row.uid.sort; let ts = sort_key.parse::().map_err(|_| anyhow!("Invalid operation timestamp: {}", sort_key))?; - let val = row_value.content(); + let val = row_value.value; if val.len() != 1 { - bail!("Invalid operation, has {} values", row_value.content().len()); + bail!("Invalid operation, has {} values", val.len()); } match &val[0] { storage::Alternative::Value(v) => { let op = open_deserialize::(v, &self.key)?; - debug!("(sync) operation {}: {} {:?}", sort_key, base64::encode(v), op); + debug!("(sync) operation {}: {:?}", sort_key, op); ops.push((ts, op)); } storage::Alternative::Tombstone => { @@ -231,7 +211,7 @@ impl Bayou { // Save info that sync has been done self.last_sync = new_last_sync; - self.last_sync_watch_ct = self.k2v.from_orphan(new_last_sync_watch_ct).expect("Source & target storage must be compatible"); + self.last_sync_watch_ct = new_last_sync_watch_ct; Ok(()) } @@ -243,7 +223,7 @@ impl Bayou { Some(t) => Instant::now() > t + (CHECKPOINT_INTERVAL / 5), _ => true, }; - let changed = self.last_sync_watch_ct.to_orphan() != *self.watch.rx.borrow(); + let changed = self.last_sync_watch_ct != *self.watch.rx.borrow(); if too_old || changed { self.sync().await?; } @@ -263,12 +243,12 @@ impl Bayou { .map(|(ts, _, _)| ts) .unwrap_or(&self.checkpoint.0), ); - self.k2v - .row(&self.path, &ts.to_string()) - .set_value(&seal_serialize(&op, &self.key)?) - .push() - .await?; + let row_val = storage::RowVal::new( + storage::RowRef::new(&self.path, &ts.to_string()), + seal_serialize(&op, &self.key)?, + ); + self.storage.row_insert(vec![row_val]).await?; self.watch.notify.notify_one(); let new_state = self.state().apply(&op); @@ -368,12 +348,11 @@ impl Bayou { let cryptoblob = seal_serialize(&state_cp, &self.key)?; debug!("(cp) checkpoint body length: {}", cryptoblob.len()); - self.s3 - .blob(format!("{}/checkpoint/{}", self.path, ts_cp.to_string()).as_str()) - .set_value(cryptoblob.into()) - .push() - .await?; - + let blob_val = storage::BlobVal::new( + storage::BlobRef(format!("{}/checkpoint/{}", self.path, ts_cp.to_string())), + cryptoblob.into(), + ); + self.storage.blob_insert(&blob_val).await?; // Drop old checkpoints (but keep at least CHECKPOINTS_TO_KEEP of them) let ecp_len = existing_checkpoints.len(); @@ -383,22 +362,16 @@ impl Bayou { // Delete blobs for (_ts, key) in existing_checkpoints[..last_to_keep].iter() { debug!("(cp) drop old checkpoint {}", key); - self.s3 - .blob(key) - .rm() - .await?; + self.storage.blob_rm(&storage::BlobRef(key.to_string())).await?; } // Delete corresponding range of operations let ts_ser = existing_checkpoints[last_to_keep].0.to_string(); - self.k2v - .rm(storage::Selector::Range{ - shard_key: &self.path, - begin: "", - end: &ts_ser - }) - .await?; - + self.storage.row_rm(&storage::Selector::Range { + shard: &self.path, + sort_begin: "", + sort_end: &ts_ser + }).await? } Ok(()) @@ -417,11 +390,11 @@ impl Bayou { async fn list_checkpoints(&self) -> Result> { let prefix = format!("{}/checkpoint/", self.path); - let checkpoints_res = self.s3.list(&prefix).await?; + let checkpoints_res = self.storage.blob_list(&prefix).await?; let mut checkpoints = vec![]; for object in checkpoints_res { - let key = object.key(); + let key = object.0; if let Some(ckid) = key.strip_prefix(&prefix) { if let Ok(ts) = ckid.parse::() { checkpoints.push((ts, key.into())); @@ -436,9 +409,8 @@ impl Bayou { // ---- Bayou watch in K2V ---- struct K2vWatch { - pk: String, - sk: String, - rx: watch::Receiver, + target: storage::RowRef, + rx: watch::Receiver, notify: Notify, } @@ -446,17 +418,17 @@ impl K2vWatch { /// Creates a new watch and launches subordinate threads. /// These threads hold Weak pointers to the struct; /// they exit when the Arc is dropped. - fn new(creds: &Credentials, pk: String, sk: String) -> Result> { - let row_client = creds.row_client()?; + fn new(creds: &Credentials, target: storage::RowRef) -> Result> { + let storage = creds.storage.build()?; - let (tx, rx) = watch::channel::(row_client.row(&pk, &sk).to_orphan()); + let (tx, rx) = watch::channel::(target.clone()); let notify = Notify::new(); - let watch = Arc::new(K2vWatch { pk, sk, rx, notify }); + let watch = Arc::new(K2vWatch { target, rx, notify }); tokio::spawn(Self::background_task( Arc::downgrade(&watch), - row_client, + storage, tx, )); @@ -465,11 +437,11 @@ impl K2vWatch { async fn background_task( self_weak: Weak, - k2v: storage::RowStore, - tx: watch::Sender, + storage: storage::Store, + tx: watch::Sender, ) { let mut row = match Weak::upgrade(&self_weak) { - Some(this) => k2v.row(&this.pk, &this.sk), + Some(this) => this.target.clone(), None => { error!("can't start loop"); return @@ -479,20 +451,19 @@ impl K2vWatch { while let Some(this) = Weak::upgrade(&self_weak) { debug!( "bayou k2v watch bg loop iter ({}, {})", - this.pk, this.sk + this.target.uid.shard, this.target.uid.sort ); tokio::select!( _ = tokio::time::sleep(Duration::from_secs(60)) => continue, - update = row.poll() => { - //update = k2v_wait_value_changed(&k2v, &this.pk, &this.sk, &ct) => { + update = storage.row_poll(&row) => { match update { Err(e) => { error!("Error in bayou k2v wait value changed: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } Ok(new_value) => { - row = new_value.to_ref(); - if tx.send(row.to_orphan()).is_err() { + row = new_value.row_ref; + if tx.send(row.clone()).is_err() { break; } } @@ -500,7 +471,8 @@ impl K2vWatch { } _ = this.notify.notified() => { let rand = u128::to_be_bytes(thread_rng().gen()).to_vec(); - if let Err(e) = row.set_value(&rand).push().await + let row_val = storage::RowVal::new(row.clone(), rand); + if let Err(e) = storage.row_insert(vec![row_val]).await { error!("Error in bayou k2v watch updater loop: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index 4e3af3c..009605d 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -85,11 +85,11 @@ impl LdapLoginProvider { }) } - fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { - let storage: Builders = match &self.storage_specific { - StorageSpecific::InMemory => Box::new(storage::in_memory::FullMem::new( + fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { + let storage: Builder = match &self.storage_specific { + StorageSpecific::InMemory => storage::in_memory::MemBuilder::new( &get_attr(user, &self.username_attr)? - )), + ), StorageSpecific::Garage { from_config, bucket_source } => { let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?; let aws_secret_access_key = get_attr(user, &from_config.aws_secret_access_key_attr)?; @@ -99,14 +99,14 @@ impl LdapLoginProvider { }; - Box::new(storage::garage::GrgCreds { + storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: from_config.aws_region.clone(), s3_endpoint: from_config.s3_endpoint.clone(), k2v_endpoint: from_config.k2v_endpoint.clone(), aws_access_key_id, aws_secret_access_key, - bucket - }) + bucket, + })? }, }; diff --git a/src/login/mod.rs b/src/login/mod.rs index 9e0c437..d331522 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -33,23 +33,15 @@ pub type ArcLoginProvider = Arc; #[derive(Clone, Debug)] pub struct Credentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: Builders, + pub storage: Builder, /// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V pub keys: CryptoKeys, } -impl Credentials { - pub fn row_client(&self) -> Result { - Ok(self.storage.row_store()?) - } - pub fn blob_client(&self) -> Result { - Ok(self.storage.blob_store()?) - } -} #[derive(Clone, Debug)] pub struct PublicCredentials { /// The storage credentials are used to authenticate access to the underlying storage (S3, K2V) - pub storage: Builders, + pub storage: Builder, pub public_key: PublicKey, } diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 788a4c5..5896f16 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -88,16 +88,16 @@ impl LoginProvider for StaticLoginProvider { } tracing::debug!(user=%username, "fetch keys"); - let storage: storage::Builders = match &user.config.storage { - StaticStorage::InMemory => Box::new(storage::in_memory::FullMem::new(username)), - StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { + let storage: storage::Builder = match &user.config.storage { + StaticStorage::InMemory => storage::in_memory::MemBuilder::new(username), + StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), s3_endpoint: grgconf.s3_endpoint.clone(), aws_access_key_id: grgconf.aws_access_key_id.clone(), aws_secret_access_key: grgconf.aws_secret_access_key.clone(), bucket: grgconf.bucket.clone(), - }), + })?, }; let cr = CryptoRoot(user.config.crypto_root.clone()); @@ -114,16 +114,16 @@ impl LoginProvider for StaticLoginProvider { Some(u) => u, }; - let storage: storage::Builders = match &user.config.storage { - StaticStorage::InMemory => Box::new(storage::in_memory::FullMem::new(&user.username)), - StaticStorage::Garage(grgconf) => Box::new(storage::garage::GrgCreds { + let storage: storage::Builder = match &user.config.storage { + StaticStorage::InMemory => storage::in_memory::MemBuilder::new(&user.username), + StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), s3_endpoint: grgconf.s3_endpoint.clone(), aws_access_key_id: grgconf.aws_access_key_id.clone(), aws_secret_access_key: grgconf.aws_secret_access_key.clone(), bucket: grgconf.bucket.clone(), - }), + })?, }; let cr = CryptoRoot(user.config.crypto_root.clone()); diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index e3c729f..f6b831d 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -5,6 +5,7 @@ use std::sync::{Arc, Weak}; use std::time::Duration; use anyhow::{anyhow, bail, Result}; +use base64::Engine; use futures::{future::BoxFuture, FutureExt}; //use tokio::io::AsyncReadExt; use tokio::sync::watch; @@ -50,13 +51,11 @@ async fn incoming_mail_watch_process_internal( creds: Credentials, mut rx_inbox_id: watch::Receiver>, ) -> Result<()> { - let mut lock_held = k2v_lock_loop(creds.row_client()?, INCOMING_PK, INCOMING_LOCK_SK); - - let k2v = creds.row_client()?; - let s3 = creds.blob_client()?; + let mut lock_held = k2v_lock_loop(creds.storage.build()?, storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK)); + let storage = creds.storage.build()?; let mut inbox: Option> = None; - let mut incoming_key = k2v.row(INCOMING_PK, INCOMING_WATCH_SK); + let mut incoming_key = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK); loop { let maybe_updated_incoming_key = if *lock_held.borrow() { @@ -64,9 +63,9 @@ async fn incoming_mail_watch_process_internal( let wait_new_mail = async { loop { - match incoming_key.poll().await + match storage.row_poll(&incoming_key).await { - Ok(row_val) => break row_val.to_ref(), + Ok(row_val) => break row_val.row_ref, Err(e) => { error!("Error in wait_new_mail: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; @@ -77,7 +76,7 @@ async fn incoming_mail_watch_process_internal( tokio::select! { inc_k = wait_new_mail => Some(inc_k), - _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(k2v.from_orphan(incoming_key.to_orphan()).expect("Incompatible source & target storage")), + _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key.clone()), _ = lock_held.changed() => None, _ = rx_inbox_id.changed() => None, } @@ -119,7 +118,7 @@ async fn incoming_mail_watch_process_internal( // If we were able to open INBOX, and we have mail, // fetch new mail if let (Some(inbox), Some(updated_incoming_key)) = (&inbox, maybe_updated_incoming_key) { - match handle_incoming_mail(&user, &s3, inbox, &lock_held).await { + match handle_incoming_mail(&user, &storage, inbox, &lock_held).await { Ok(()) => { incoming_key = updated_incoming_key; } @@ -136,20 +135,20 @@ async fn incoming_mail_watch_process_internal( async fn handle_incoming_mail( user: &Arc, - blobs: &storage::BlobStore, + storage: &storage::Store, inbox: &Arc, lock_held: &watch::Receiver, ) -> Result<()> { - let mails_res = blobs.list("incoming/").await?; + let mails_res = storage.blob_list("incoming/").await?; for object in mails_res { if !*lock_held.borrow() { break; } - let key = object.key(); + let key = object.0; if let Some(mail_id) = key.strip_prefix("incoming/") { if let Ok(mail_id) = mail_id.parse::() { - move_incoming_message(user, blobs, inbox, mail_id).await?; + move_incoming_message(user, storage, inbox, mail_id).await?; } } } @@ -159,7 +158,7 @@ async fn handle_incoming_mail( async fn move_incoming_message( user: &Arc, - s3: &storage::BlobStore, + storage: &storage::Store, inbox: &Arc, id: UniqueIdent, ) -> Result<()> { @@ -168,14 +167,15 @@ async fn move_incoming_message( let object_key = format!("incoming/{}", id); // 1. Fetch message from S3 - let object = s3.blob(&object_key).fetch().await?; + let object = storage.blob_fetch(&storage::BlobRef(object_key)).await?; // 1.a decrypt message key from headers //info!("Object metadata: {:?}", get_result.metadata); let key_encrypted_b64 = object - .get_meta(MESSAGE_KEY) + .meta + .get(MESSAGE_KEY) .ok_or(anyhow!("Missing key in metadata"))?; - let key_encrypted = base64::decode(key_encrypted_b64)?; + let key_encrypted = base64::engine::general_purpose::STANDARD.decode(key_encrypted_b64)?; let message_key = sodiumoxide::crypto::sealedbox::open( &key_encrypted, &user.creds.keys.public, @@ -186,28 +186,28 @@ async fn move_incoming_message( cryptoblob::Key::from_slice(&message_key).ok_or(anyhow!("Invalid message key"))?; // 1.b retrieve message body - let obj_body = object.content().ok_or(anyhow!("Missing object body"))?; + let obj_body = object.value; let plain_mail = cryptoblob::open(&obj_body, &message_key) .map_err(|_| anyhow!("Cannot decrypt email content"))?; // 2 parse mail and add to inbox let msg = IMF::try_from(&plain_mail[..]).map_err(|_| anyhow!("Invalid email body"))?; inbox - .append_from_s3(msg, id, object.to_ref(), message_key) + .append_from_s3(msg, id, object.blob_ref.clone(), message_key) .await?; // 3 delete from incoming - object.to_ref().rm().await?; + storage.blob_rm(&object.blob_ref).await?; Ok(()) } // ---- UTIL: K2V locking loop, use this to try to grab a lock using a K2V entry as a signal ---- -fn k2v_lock_loop(k2v: storage::RowStore, pk: &'static str, sk: &'static str) -> watch::Receiver { +fn k2v_lock_loop(storage: storage::Store, row_ref: storage::RowRef) -> watch::Receiver { let (held_tx, held_rx) = watch::channel(false); - tokio::spawn(k2v_lock_loop_internal(k2v, pk, sk, held_tx)); + tokio::spawn(k2v_lock_loop_internal(storage, row_ref, held_tx)); held_rx } @@ -216,13 +216,12 @@ fn k2v_lock_loop(k2v: storage::RowStore, pk: &'static str, sk: &'static str) -> enum LockState { Unknown, Empty, - Held(UniqueIdent, u64, storage::OrphanRowRef), + Held(UniqueIdent, u64, storage::RowRef), } async fn k2v_lock_loop_internal( - k2v: storage::RowStore, - pk: &'static str, - sk: &'static str, + storage: storage::Store, + row_ref: storage::RowRef, held_tx: watch::Sender, ) { let (state_tx, mut state_rx) = watch::channel::(LockState::Unknown); @@ -232,10 +231,10 @@ async fn k2v_lock_loop_internal( // Loop 1: watch state of lock in K2V, save that in corresponding watch channel let watch_lock_loop: BoxFuture> = async { - let mut ct = k2v.row(pk, sk); + let mut ct = row_ref.clone(); loop { info!("k2v watch lock loop iter: ct = {:?}", ct); - match ct.poll().await { + match storage.row_poll(&ct).await { Err(e) => { error!( "Error in k2v wait value changed: {} ; assuming we no longer hold lock.", @@ -246,7 +245,7 @@ async fn k2v_lock_loop_internal( } Ok(cv) => { let mut lock_state = None; - for v in cv.content().iter() { + for v in cv.value.iter() { if let storage::Alternative::Value(vbytes) = v { if vbytes.len() == 32 { let ts = u64::from_be_bytes(vbytes[..8].try_into().unwrap()); @@ -260,7 +259,7 @@ async fn k2v_lock_loop_internal( } } } - let new_ct = cv.to_ref(); + let new_ct = cv.row_ref; info!( "k2v watch lock loop: changed, old ct = {:?}, new ct = {:?}, v = {:?}", @@ -268,7 +267,7 @@ async fn k2v_lock_loop_internal( ); state_tx.send( lock_state - .map(|(pid, ts)| LockState::Held(pid, ts, new_ct.to_orphan())) + .map(|(pid, ts)| LockState::Held(pid, ts, new_ct.clone())) .unwrap_or(LockState::Empty), )?; ct = new_ct; @@ -358,10 +357,10 @@ async fn k2v_lock_loop_internal( )); lock[8..].copy_from_slice(&our_pid.0); let row = match ct { - Some(orphan) => k2v.from_orphan(orphan).expect("Source & target must be storage compatible"), - None => k2v.row(pk, sk), + Some(existing) => existing, + None => row_ref.clone(), }; - if let Err(e) = row.set_value(&lock).push().await { + if let Err(e) = storage.row_insert(vec![storage::RowVal::new(row, lock)]).await { error!("Could not take lock: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } @@ -377,7 +376,7 @@ async fn k2v_lock_loop_internal( info!("lock loop exited, releasing"); if !held_tx.is_closed() { - warn!("wierd..."); + warn!("weird..."); let _ = held_tx.send(false); } @@ -387,8 +386,10 @@ async fn k2v_lock_loop_internal( _ => None, }; if let Some(ct) = release { - let row = k2v.from_orphan(ct).expect("Incompatible source & target storage"); - let _ = row.rm().await; + match storage.row_rm(&storage::Selector::Single(&ct)).await { + Err(e) => warn!("Unable to release lock {:?}: {}", ct, e), + Ok(_) => (), + }; } } @@ -410,30 +411,32 @@ impl EncryptedMessage { } pub async fn deliver_to(self: Arc, creds: PublicCredentials) -> Result<()> { - let s3_client = creds.storage.blob_store()?; - let k2v_client = creds.storage.row_store()?; + let storage = creds.storage.build()?; // Get causality token of previous watch key - let query = k2v_client.row(INCOMING_PK, INCOMING_WATCH_SK); - let watch_ct = match query.fetch().await { + let query = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK); + let watch_ct = match storage.row_fetch(&storage::Selector::Single(&query)).await { Err(_) => query, - Ok(cv) => cv.to_ref(), + Ok(cv) => cv.into_iter().next().map(|v| v.row_ref).unwrap_or(query), }; // Write mail to encrypted storage let encrypted_key = sodiumoxide::crypto::sealedbox::seal(self.key.as_ref(), &creds.public_key); - let key_header = base64::encode(&encrypted_key); + let key_header = base64::engine::general_purpose::STANDARD.encode(&encrypted_key); - let mut send = s3_client - .blob(&format!("incoming/{}", gen_ident())) - .set_value(self.encrypted_body.clone().into()); - send.set_meta(MESSAGE_KEY, &key_header); - send.push().await?; + let blob_val = storage::BlobVal::new( + storage::BlobRef(format!("incoming/{}", gen_ident())), + self.encrypted_body.clone().into(), + ).with_meta(MESSAGE_KEY.to_string(), key_header); + storage.blob_insert(&blob_val).await?; // Update watch key to signal new mail - watch_ct.set_value(gen_ident().0.as_ref()).push().await?; - + let watch_val = storage::RowVal::new( + watch_ct.clone(), + gen_ident().0.to_vec(), + ); + storage.row_insert(vec![watch_val]).await?; Ok(()) } } diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 060267a..b4afd5e 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -8,7 +8,7 @@ use crate::login::Credentials; use crate::mail::uidindex::*; use crate::mail::unique_ident::*; use crate::mail::IMF; -use crate::storage::{RowStore, BlobStore, self}; +use crate::storage::{Store, RowRef, RowVal, BlobRef, BlobVal, Selector, self}; use crate::timestamp::now_msec; pub struct Mailbox { @@ -44,8 +44,7 @@ impl Mailbox { let mbox = RwLock::new(MailboxInternal { id, encryption_key: creds.keys.master.clone(), - k2v: creds.storage.row_store()?, - s3: creds.storage.blob_store()?, + storage: creds.storage.build()?, uid_index, mail_path, }); @@ -178,10 +177,7 @@ struct MailboxInternal { id: UniqueIdent, mail_path: String, encryption_key: Key, - - k2v: RowStore, - s3: BlobStore, - + storage: Store, uid_index: Bayou, } @@ -200,15 +196,15 @@ impl MailboxInternal { async fn fetch_meta(&self, ids: &[UniqueIdent]) -> Result> { let ids = ids.iter().map(|x| x.to_string()).collect::>(); - let ops = ids.iter().map(|id| (self.mail_path.as_str(), id.as_str())).collect::>(); - let res_vec = self.k2v.select(storage::Selector::List(ops)).await?; + let ops = ids.iter().map(|id| RowRef::new(self.mail_path.as_str(), id.as_str())).collect::>(); + let res_vec = self.storage.row_fetch(&Selector::List(ops)).await?; let mut meta_vec = vec![]; for res in res_vec.into_iter() { let mut meta_opt = None; // Resolve conflicts - for v in res.content().iter() { + for v in res.value.iter() { match v { storage::Alternative::Tombstone => (), storage::Alternative::Value(v) => { @@ -227,7 +223,7 @@ impl MailboxInternal { if let Some(meta) = meta_opt { meta_vec.push(meta); } else { - bail!("No valid meta value in k2v for {:?}", res.to_ref().key()); + bail!("No valid meta value in k2v for {:?}", res.row_ref); } } @@ -235,9 +231,9 @@ impl MailboxInternal { } async fn fetch_full(&self, id: UniqueIdent, message_key: &Key) -> Result> { - let obj_res = self.s3.blob(&format!("{}/{}", self.mail_path, id)).fetch().await?; - let body = obj_res.content().ok_or(anyhow!("missing body"))?; - cryptoblob::open(body, message_key) + let obj_res = self.storage.blob_fetch(&BlobRef(format!("{}/{}", self.mail_path, id))).await?; + let body = obj_res.value; + cryptoblob::open(&body, message_key) } // ---- Functions for changing the mailbox ---- @@ -270,7 +266,10 @@ impl MailboxInternal { async { // Encrypt and save mail body let message_blob = cryptoblob::seal(mail.raw, &message_key)?; - self.s3.blob(&format!("{}/{}", self.mail_path, ident)).set_value(message_blob).push().await?; + self.storage.blob_insert(&BlobVal::new( + BlobRef(format!("{}/{}", self.mail_path, ident)), + message_blob, + )).await?; Ok::<_, anyhow::Error>(()) }, async { @@ -282,7 +281,10 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &ident.to_string()).set_value(&meta_blob).push().await?; + self.storage.row_insert(vec![RowVal::new( + RowRef::new(&self.mail_path, &ident.to_string()), + meta_blob, + )]).await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -307,14 +309,14 @@ impl MailboxInternal { &mut self, mail: IMF<'a>, ident: UniqueIdent, - blob_ref: storage::BlobRef, + blob_src: storage::BlobRef, message_key: Key, ) -> Result<()> { futures::try_join!( async { // Copy mail body from previous location - let dst = self.s3.blob(&format!("{}/{}", self.mail_path, ident)); - blob_ref.copy(&dst).await?; + let blob_dst = BlobRef(format!("{}/{}", self.mail_path, ident)); + self.storage.blob_copy(&blob_src, &blob_dst).await?; Ok::<_, anyhow::Error>(()) }, async { @@ -326,7 +328,10 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &ident.to_string()).set_value(&meta_blob).push().await?; + self.storage.row_insert(vec![RowVal::new( + RowRef::new(&self.mail_path, &ident.to_string()), + meta_blob, + )]).await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -350,13 +355,13 @@ impl MailboxInternal { futures::try_join!( async { // Delete mail body from S3 - self.s3.blob(&format!("{}/{}", self.mail_path, ident)).rm().await?; + self.storage.blob_rm(&BlobRef(format!("{}/{}", self.mail_path, ident))).await?; Ok::<_, anyhow::Error>(()) }, async { // Delete mail meta from K2V let sk = ident.to_string(); - self.k2v.row(&self.mail_path, &sk).fetch().await?.to_ref().rm().await?; + self.storage.row_rm(&Selector::Single(&RowRef::new(&self.mail_path, &sk))).await?; Ok::<_, anyhow::Error>(()) } )?; @@ -402,15 +407,19 @@ impl MailboxInternal { futures::try_join!( async { - let dst = self.s3.blob(&format!("{}/{}", self.mail_path, new_id)); - self.s3.blob(&format!("{}/{}", from.mail_path, source_id)).copy(&dst).await?; + let dst = BlobRef(format!("{}/{}", self.mail_path, new_id)); + let src = BlobRef(format!("{}/{}", from.mail_path, source_id)); + self.storage.blob_copy(&src, &dst).await?; Ok::<_, anyhow::Error>(()) }, async { // Copy mail meta in K2V let meta = &from.fetch_meta(&[source_id]).await?[0]; let meta_blob = seal_serialize(meta, &self.encryption_key)?; - self.k2v.row(&self.mail_path, &new_id.to_string()).set_value(&meta_blob).push().await?; + self.storage.row_insert(vec![RowVal::new( + RowRef::new(&self.mail_path, &new_id.to_string()), + meta_blob, + )]).await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync(), diff --git a/src/mail/user.rs b/src/mail/user.rs index bdfb30c..8413cbf 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -33,7 +33,7 @@ const MAILBOX_LIST_SK: &str = "list"; pub struct User { pub username: String, pub creds: Credentials, - pub k2v: storage::RowStore, + pub storage: storage::Store, pub mailboxes: std::sync::Mutex>>, tx_inbox_id: watch::Sender>, @@ -41,7 +41,7 @@ pub struct User { impl User { pub async fn new(username: String, creds: Credentials) -> Result> { - let cache_key = (username.clone(), creds.storage.clone()); + let cache_key = (username.clone(), creds.storage.unique()); { let cache = USER_CACHE.lock().unwrap(); @@ -81,11 +81,7 @@ impl User { let mb_uidvalidity = mb.current_uid_index().await.uidvalidity; if mb_uidvalidity > uidvalidity { list.update_uidvalidity(name, mb_uidvalidity); - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(&list, orphan).await?; + self.save_mailbox_list(&list, ct).await?; } Ok(Some(mb)) } else { @@ -108,11 +104,7 @@ impl User { let (mut list, ct) = self.load_mailbox_list().await?; match list.create_mailbox(name) { CreatedMailbox::Created(_, _) => { - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(&list, orphan).await?; + self.save_mailbox_list(&list, ct).await?; Ok(()) } CreatedMailbox::Existed(_, _) => Err(anyhow!("Mailbox {} already exists", name)), @@ -129,11 +121,7 @@ impl User { if list.has_mailbox(name) { // TODO: actually delete mailbox contents list.set_mailbox(name, None); - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(&list, orphan).await?; + self.save_mailbox_list(&list, ct).await?; Ok(()) } else { bail!("Mailbox {} does not exist", name); @@ -154,11 +142,7 @@ impl User { if old_name == INBOX { list.rename_mailbox(old_name, new_name)?; if !self.ensure_inbox_exists(&mut list, &ct).await? { - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(&list, orphan).await?; + self.save_mailbox_list(&list, ct).await?; } } else { let names = list.existing_mailbox_names(); @@ -182,11 +166,7 @@ impl User { } } - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(&list, orphan).await?; + self.save_mailbox_list(&list, ct).await?; } Ok(()) } @@ -194,14 +174,14 @@ impl User { // ---- Internal user & mailbox management ---- async fn open(username: String, creds: Credentials) -> Result> { - let k2v = creds.row_client()?; + let storage = creds.storage.build()?; let (tx_inbox_id, rx_inbox_id) = watch::channel(None); let user = Arc::new(Self { username, creds: creds.clone(), - k2v, + storage, tx_inbox_id, mailboxes: std::sync::Mutex::new(HashMap::new()), }); @@ -245,19 +225,25 @@ impl User { // ---- Mailbox list management ---- async fn load_mailbox_list(&self) -> Result<(MailboxList, Option)> { - let (mut list, row) = match self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK).fetch().await { + let row_ref = storage::RowRef::new(MAILBOX_LIST_PK, MAILBOX_LIST_SK); + let (mut list, row) = match self.storage.row_fetch(&storage::Selector::Single(&row_ref)).await { Err(storage::StorageError::NotFound) => (MailboxList::new(), None), Err(e) => return Err(e.into()), Ok(rv) => { let mut list = MailboxList::new(); - for v in rv.content() { + let (row_ref, row_vals) = match rv.into_iter().next() { + Some(row_val) => (row_val.row_ref, row_val.value), + None => (row_ref, vec![]), + }; + + for v in row_vals { if let storage::Alternative::Value(vbytes) = v { let list2 = open_deserialize::(&vbytes, &self.creds.keys.master)?; list.merge(list2); } } - (list, Some(rv.to_ref())) + (list, Some(row_ref)) } }; @@ -278,11 +264,7 @@ impl User { let saved; let (inbox_id, inbox_uidvalidity) = match list.create_mailbox(INBOX) { CreatedMailbox::Created(i, v) => { - let orphan = match ct { - Some(x) => Some(x.to_orphan()), - None => None, - }; - self.save_mailbox_list(list, orphan).await?; + self.save_mailbox_list(list, ct.clone()).await?; saved = true; (i, v) } @@ -302,14 +284,12 @@ impl User { async fn save_mailbox_list( &self, list: &MailboxList, - ct: Option, + ct: Option, ) -> Result<()> { let list_blob = seal_serialize(list, &self.creds.keys.master)?; - let rref = match ct { - Some(x) => self.k2v.from_orphan(x).expect("Source & target must be same storage"), - None => self.k2v.row(MAILBOX_LIST_PK, MAILBOX_LIST_SK), - }; - rref.set_value(&list_blob).push().await?; + let rref = ct.unwrap_or(storage::RowRef::new(MAILBOX_LIST_PK, MAILBOX_LIST_SK)); + let row_val = storage::RowVal::new(rref, list_blob); + self.storage.row_insert(vec![row_val]).await?; Ok(()) } } @@ -482,6 +462,6 @@ enum CreatedMailbox { // ---- User cache ---- lazy_static! { - static ref USER_CACHE: std::sync::Mutex>> = + static ref USER_CACHE: std::sync::Mutex>> = std::sync::Mutex::new(HashMap::new()); } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 8276f70..ff37287 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,7 +1,8 @@ use crate::storage::*; +use serde::Serialize; -#[derive(Clone, Debug, Hash)] -pub struct GarageBuilder { +#[derive(Clone, Debug, Serialize)] +pub struct GarageConf { pub region: String, pub s3_endpoint: String, pub k2v_endpoint: String, @@ -10,10 +11,28 @@ pub struct GarageBuilder { pub bucket: String, } +#[derive(Clone, Debug)] +pub struct GarageBuilder { + conf: GarageConf, + unicity: Vec, +} + +impl GarageBuilder { + pub fn new(conf: GarageConf) -> anyhow::Result> { + let mut unicity: Vec = vec![]; + unicity.extend_from_slice(file!().as_bytes()); + unicity.append(&mut rmp_serde::to_vec(&conf)?); + Ok(Arc::new(Self { conf, unicity })) + } +} + impl IBuilder for GarageBuilder { - fn build(&self) -> Box { + fn build(&self) -> Result { unimplemented!(); } + fn unique(&self) -> UnicityBuffer { + UnicityBuffer(self.unicity.clone()) + } } pub struct GarageStore { @@ -33,7 +52,7 @@ impl IStore for GarageStore { unimplemented!(); } - async fn row_poll(&self, value: RowRef) -> Result { + async fn row_poll(&self, value: &RowRef) -> Result { unimplemented!(); } @@ -41,6 +60,9 @@ impl IStore for GarageStore { unimplemented!(); } + async fn blob_insert(&self, blob_val: &BlobVal) -> Result { + unimplemented!(); + } async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { unimplemented!(); diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 09c6763..6d0460f 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -14,18 +14,36 @@ pub type ArcBlob = Arc>>>; #[derive(Clone, Debug)] pub struct MemBuilder { user: String, - url: String, + unicity: Vec, row: ArcRow, blob: ArcBlob, } +impl MemBuilder { + pub fn new(user: &str) -> Arc { + let mut unicity: Vec = vec![]; + unicity.extend_from_slice(file!().as_bytes()); + unicity.extend_from_slice(user.as_bytes()); + Arc::new(Self { + user: user.to_string(), + unicity, + row: Arc::new(RwLock::new(HashMap::new())), + blob: Arc::new(RwLock::new(HashMap::new())), + }) + } +} + impl IBuilder for MemBuilder { - fn build(&self) -> Box { - Box::new(MemStore { + fn build(&self) -> Result { + Ok(Box::new(MemStore { row: self.row.clone(), blob: self.blob.clone(), - }) + })) } + + fn unique(&self) -> UnicityBuffer { + UnicityBuffer(self.unicity.clone()) + } } pub struct MemStore { @@ -56,7 +74,7 @@ impl IStore for MemStore { .or(Err(StorageError::Internal))? .get(*shard) .ok_or(StorageError::NotFound)? - .range((Included(sort_begin.to_string()), Included(sort_end.to_string()))) + .range((Included(sort_begin.to_string()), Excluded(sort_end.to_string()))) .map(|(k, v)| RowVal { row_ref: RowRef { uid: RowUid { shard: shard.to_string(), sort: k.to_string() }, causality: Some("c".to_string()) }, value: vec![Alternative::Value(v.clone())], @@ -100,7 +118,7 @@ impl IStore for MemStore { }, Selector::Single(row_ref) => { let bytes = self.inner_fetch(row_ref)?; - Ok(vec![RowVal{ row_ref: row_ref.clone(), value: vec![Alternative::Value(bytes)]}]) + Ok(vec![RowVal{ row_ref: (*row_ref).clone(), value: vec![Alternative::Value(bytes)]}]) } } } @@ -113,7 +131,7 @@ impl IStore for MemStore { unimplemented!(); } - async fn row_poll(&self, value: RowRef) -> Result { + async fn row_poll(&self, value: &RowRef) -> Result { unimplemented!(); } @@ -121,6 +139,9 @@ impl IStore for MemStore { unimplemented!(); } + async fn blob_insert(&self, blob_val: &BlobVal) -> Result { + unimplemented!(); + } async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { unimplemented!(); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index cb66d58..8004ac5 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -11,9 +11,9 @@ pub mod in_memory; pub mod garage; -use std::hash::{Hash, Hasher}; +use std::sync::Arc; +use std::hash::Hash; use std::collections::HashMap; -use futures::future::BoxFuture; use async_trait::async_trait; #[derive(Debug, Clone)] @@ -23,45 +23,95 @@ pub enum Alternative { } type ConcurrentValues = Vec; -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum StorageError { NotFound, Internal, } +impl std::fmt::Display for StorageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("Storage Error: ")?; + match self { + Self::NotFound => f.write_str("Item not found"), + Self::Internal => f.write_str("An internal error occured"), + } + } +} +impl std::error::Error for StorageError {} -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct RowUid { - shard: String, - sort: String, + pub shard: String, + pub sort: String, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct RowRef { - uid: RowUid, - causality: Option, + pub uid: RowUid, + pub causality: Option, +} + +impl RowRef { + pub fn new(shard: &str, sort: &str) -> Self { + Self { + uid: RowUid { + shard: shard.to_string(), + sort: sort.to_string(), + }, + causality: None, + } + } } #[derive(Debug, Clone)] pub struct RowVal { - row_ref: RowRef, - value: ConcurrentValues, + pub row_ref: RowRef, + pub value: ConcurrentValues, } +impl RowVal { + pub fn new(row_ref: RowRef, value: Vec) -> Self { + Self { + row_ref, + value: vec![Alternative::Value(value)], + } + } +} + + #[derive(Debug, Clone)] -pub struct BlobRef(String); +pub struct BlobRef(pub String); +impl BlobRef { + pub fn new(key: &str) -> Self { + Self(key.to_string()) + } +} #[derive(Debug, Clone)] pub struct BlobVal { - blob_ref: BlobRef, - meta: HashMap, - value: Vec, + pub blob_ref: BlobRef, + pub meta: HashMap, + pub value: Vec, +} +impl BlobVal { + pub fn new(blob_ref: BlobRef, value: Vec) -> Self { + Self { + blob_ref, value, + meta: HashMap::new(), + } + } + + pub fn with_meta(mut self, k: String, v: String) -> Self { + self.meta.insert(k, v); + self + } } pub enum Selector<'a> { Range { shard: &'a str, sort_begin: &'a str, sort_end: &'a str }, List (Vec), // list of (shard_key, sort_key) Prefix { shard: &'a str, sort_prefix: &'a str }, - Single(RowRef), + Single(&'a RowRef), } #[async_trait] @@ -69,131 +119,24 @@ pub trait IStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError>; async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError>; async fn row_insert(&self, values: Vec) -> Result<(), StorageError>; - async fn row_poll(&self, value: RowRef) -> Result; + async fn row_poll(&self, value: &RowRef) -> Result; async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result; + async fn blob_insert(&self, blob_val: &BlobVal) -> Result; async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result; async fn blob_list(&self, prefix: &str) -> Result, StorageError>; async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>; } -pub trait IBuilder { - fn build(&self) -> Box; -} - - - - - - -/* -#[derive(Clone, Debug, PartialEq)] -pub enum OrphanRowRef { - Garage(garage::GrgOrphanRowRef), - Memory(in_memory::MemOrphanRowRef), -} - +#[derive(Clone,Debug,PartialEq,Eq,Hash)] +pub struct UnicityBuffer(Vec); +pub trait IBuilder: std::fmt::Debug { + fn build(&self) -> Result; - -impl std::fmt::Display for StorageError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("Storage Error: ")?; - match self { - Self::NotFound => f.write_str("Item not found"), - Self::Internal => f.write_str("An internal error occured"), - Self::IncompatibleOrphan => f.write_str("Incompatible orphan"), - } - } + /// Returns an opaque buffer that uniquely identifies this builder + fn unique(&self) -> UnicityBuffer; } -impl std::error::Error for StorageError {} -// Utils -pub type AsyncResult<'a, T> = BoxFuture<'a, Result>; - -// ----- Builders -pub trait IBuilders { - fn box_clone(&self) -> Builders; - fn row_store(&self) -> Result; - fn blob_store(&self) -> Result; - fn url(&self) -> &str; -} -pub type Builders = Box; -impl Clone for Builders { - fn clone(&self) -> Self { - self.box_clone() - } -} -impl std::fmt::Debug for Builders { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("aerogramme::storage::Builder") - } -} -impl PartialEq for Builders { - fn eq(&self, other: &Self) -> bool { - self.url() == other.url() - } -} -impl Eq for Builders {} -impl Hash for Builders { - fn hash(&self, state: &mut H) { - self.url().hash(state); - } -} - -// ------ Row -pub trait IRowStore -{ - fn row(&self, partition: &str, sort: &str) -> RowRef; - fn select(&self, selector: Selector) -> AsyncResult>; - fn rm(&self, selector: Selector) -> AsyncResult<()>; - fn from_orphan(&self, orphan: OrphanRowRef) -> Result; -} -pub type RowStore = Box; - -pub trait IRowRef: std::fmt::Debug -{ - fn to_orphan(&self) -> OrphanRowRef; - fn key(&self) -> (&str, &str); - fn set_value(&self, content: &[u8]) -> RowValue; - fn fetch(&self) -> AsyncResult; - fn rm(&self) -> AsyncResult<()>; - fn poll(&self) -> AsyncResult; -} -pub type RowRef<'a> = Box; - -pub trait IRowValue: std::fmt::Debug -{ - fn to_ref(&self) -> RowRef; - fn content(&self) -> ConcurrentValues; - fn push(&self) -> AsyncResult<()>; -} -pub type RowValue = Box; - -// ------- Blob -pub trait IBlobStore -{ - fn blob(&self, key: &str) -> BlobRef; - fn list(&self, prefix: &str) -> AsyncResult>; -} -pub type BlobStore = Box; - -pub trait IBlobRef -{ - fn set_value(&self, content: Vec) -> BlobValue; - fn key(&self) -> &str; - fn fetch(&self) -> AsyncResult; - fn copy(&self, dst: &BlobRef) -> AsyncResult<()>; - fn rm(&self) -> AsyncResult<()>; -} -pub type BlobRef = Box; - -pub trait IBlobValue { - fn to_ref(&self) -> BlobRef; - fn get_meta(&self, key: &str) -> Option<&[u8]>; - fn set_meta(&mut self, key: &str, val: &str); - fn content(&self) -> Option<&[u8]>; - fn push(&self) -> AsyncResult<()>; -} -pub type BlobValue = Box; -*/ +pub type Builder = Arc; +pub type Store = Box; -- cgit v1.2.3 From c75f2d91ff969dd791cb476031ee80870c6ad61a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 19 Dec 2023 19:02:22 +0100 Subject: implemented an in memory storage --- src/mail/incoming.rs | 2 +- src/mail/mailbox.rs | 7 +- src/storage/garage.rs | 8 +- src/storage/in_memory.rs | 242 +++++++++++++++++++++++++++++++++++------------ src/storage/mod.rs | 9 +- 5 files changed, 202 insertions(+), 66 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index f6b831d..2a6c947 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -386,7 +386,7 @@ async fn k2v_lock_loop_internal( _ => None, }; if let Some(ct) = release { - match storage.row_rm(&storage::Selector::Single(&ct)).await { + match storage.row_rm_single(&ct).await { Err(e) => warn!("Unable to release lock {:?}: {}", ct, e), Ok(_) => (), }; diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index b4afd5e..65f44b1 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -361,7 +361,12 @@ impl MailboxInternal { async { // Delete mail meta from K2V let sk = ident.to_string(); - self.storage.row_rm(&Selector::Single(&RowRef::new(&self.mail_path, &sk))).await?; + let res = self.storage + .row_fetch(&storage::Selector::Single(&RowRef::new(&self.mail_path, &sk))) + .await?; + if let Some(row_val) = res.into_iter().next() { + self.storage.row_rm_single(&row_val.row_ref).await?; + } Ok::<_, anyhow::Error>(()) } )?; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index ff37287..f202067 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -56,14 +56,18 @@ impl IStore for GarageStore { unimplemented!(); } + async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { + unimplemented!(); + } + async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { unimplemented!(); } - async fn blob_insert(&self, blob_val: &BlobVal) -> Result { + async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { unimplemented!(); } - async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { unimplemented!(); } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 6d0460f..c18bec3 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,19 +1,67 @@ use crate::storage::*; use std::collections::{HashMap, BTreeMap}; -use std::ops::Bound::{Included, Unbounded, Excluded}; +use std::ops::Bound::{Included, Unbounded, Excluded, self}; use std::sync::{Arc, RwLock}; +use tokio::sync::Notify; /// This implementation is very inneficient, and not completely correct /// Indeed, when the connector is dropped, the memory is freed. /// It means that when a user disconnects, its data are lost. /// It's intended only for basic debugging, do not use it for advanced tests... -pub type ArcRow = Arc>>>>; -pub type ArcBlob = Arc>>>; +#[derive(Debug, Clone)] +enum InternalData { + Tombstone, + Value(Vec), +} +impl InternalData { + fn to_alternative(&self) -> Alternative { + match self { + Self::Tombstone => Alternative::Tombstone, + Self::Value(x) => Alternative::Value(x.clone()), + } + } +} + +#[derive(Debug, Default)] +struct InternalRowVal { + data: Vec, + version: u64, + change: Arc, +} +impl InternalRowVal { + fn concurrent_values(&self) -> Vec { + self.data.iter().map(InternalData::to_alternative).collect() + } + + fn to_row_val(&self, row_ref: RowRef) -> RowVal { + RowVal{ + row_ref: row_ref.with_causality(self.version.to_string()), + value: self.concurrent_values(), + } + } +} + +#[derive(Debug, Default, Clone)] +struct InternalBlobVal { + data: Vec, + metadata: HashMap, +} +impl InternalBlobVal { + fn to_blob_val(&self, bref: &BlobRef) -> BlobVal { + BlobVal { + blob_ref: bref.clone(), + meta: self.metadata.clone(), + value: self.data.clone(), + } + } +} + +type ArcRow = Arc>>>; +type ArcBlob = Arc>>; #[derive(Clone, Debug)] pub struct MemBuilder { - user: String, unicity: Vec, row: ArcRow, blob: ArcBlob, @@ -25,10 +73,9 @@ impl MemBuilder { unicity.extend_from_slice(file!().as_bytes()); unicity.extend_from_slice(user.as_bytes()); Arc::new(Self { - user: user.to_string(), unicity, row: Arc::new(RwLock::new(HashMap::new())), - blob: Arc::new(RwLock::new(HashMap::new())), + blob: Arc::new(RwLock::new(BTreeMap::new())), }) } } @@ -51,105 +98,180 @@ pub struct MemStore { blob: ArcBlob, } -impl MemStore { - fn inner_fetch(&self, row_ref: &RowRef) -> Result, StorageError> { - Ok(self.row - .read() - .or(Err(StorageError::Internal))? - .get(&row_ref.uid.shard) - .ok_or(StorageError::NotFound)? - .get(&row_ref.uid.sort) - .ok_or(StorageError::NotFound)? - .clone()) +fn prefix_last_bound(prefix: &str) -> Bound { + let mut sort_end = prefix.to_string(); + match sort_end.pop() { + None => Unbounded, + Some(ch) => { + let nc = char::from_u32(ch as u32 + 1).unwrap(); + sort_end.push(nc); + Excluded(sort_end) + } } } #[async_trait] impl IStore for MemStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { + let store = self.row.read().or(Err(StorageError::Internal))?; + match select { Selector::Range { shard, sort_begin, sort_end } => { - Ok(self.row - .read() - .or(Err(StorageError::Internal))? + Ok(store .get(*shard) .ok_or(StorageError::NotFound)? .range((Included(sort_begin.to_string()), Excluded(sort_end.to_string()))) - .map(|(k, v)| RowVal { - row_ref: RowRef { uid: RowUid { shard: shard.to_string(), sort: k.to_string() }, causality: Some("c".to_string()) }, - value: vec![Alternative::Value(v.clone())], - }) + .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) .collect::>()) }, Selector::List(rlist) => { let mut acc = vec![]; for row_ref in rlist { - let bytes = self.inner_fetch(row_ref)?; - let row_val = RowVal { - row_ref: row_ref.clone(), - value: vec![Alternative::Value(bytes)] - }; - acc.push(row_val); + let intval = store + .get(&row_ref.uid.shard) + .ok_or(StorageError::NotFound)? + .get(&row_ref.uid.sort) + .ok_or(StorageError::NotFound)?; + acc.push(intval.to_row_val(row_ref.clone())); } Ok(acc) }, Selector::Prefix { shard, sort_prefix } => { - let mut sort_end = sort_prefix.to_string(); - let last_bound = match sort_end.pop() { - None => Unbounded, - Some(ch) => { - let nc = char::from_u32(ch as u32 + 1).unwrap(); - sort_end.push(nc); - Excluded(sort_end) - } - }; - - Ok(self.row - .read() - .or(Err(StorageError::Internal))? + let last_bound = prefix_last_bound(sort_prefix); + + Ok(store .get(*shard) .ok_or(StorageError::NotFound)? .range((Included(sort_prefix.to_string()), last_bound)) - .map(|(k, v)| RowVal { - row_ref: RowRef { uid: RowUid { shard: shard.to_string(), sort: k.to_string() }, causality: Some("c".to_string()) }, - value: vec![Alternative::Value(v.clone())], - }) + .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) .collect::>()) }, Selector::Single(row_ref) => { - let bytes = self.inner_fetch(row_ref)?; - Ok(vec![RowVal{ row_ref: (*row_ref).clone(), value: vec![Alternative::Value(bytes)]}]) + let intval = store + .get(&row_ref.uid.shard) + .ok_or(StorageError::NotFound)? + .get(&row_ref.uid.sort) + .ok_or(StorageError::NotFound)?; + Ok(vec![intval.to_row_val((*row_ref).clone())]) } } } + async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { + let mut store = self.row.write().or(Err(StorageError::Internal))?; + let shard = &entry.uid.shard; + let sort = &entry.uid.sort; + + let cauz = match entry.causality.as_ref().map(|v| v.parse::()) { + Some(Ok(v)) => v, + _ => 0, + }; + + let bt = store.entry(shard.to_string()).or_default(); + let intval = bt.entry(sort.to_string()).or_default(); + + if cauz == intval.version { + intval.data.clear(); + } + intval.data.push(InternalData::Tombstone); + intval.version += 1; + intval.change.notify_waiters(); + + Ok(()) + } + async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { - unimplemented!(); + //@FIXME not efficient at all... + let values = self.row_fetch(select).await?; + + for v in values.into_iter() { + self.row_rm_single(&v.row_ref).await?; + } + Ok(()) } async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { - unimplemented!(); + let mut store = self.row.write().or(Err(StorageError::Internal))?; + for v in values.into_iter() { + let shard = v.row_ref.uid.shard; + let sort = v.row_ref.uid.sort; + + let val = match v.value.into_iter().next() { + Some(Alternative::Value(x)) => x, + _ => vec![], + }; + + let cauz = match v.row_ref.causality.map(|v| v.parse::()) { + Some(Ok(v)) => v, + _ => 0, + }; + let bt = store.entry(shard).or_default(); + let intval = bt.entry(sort).or_default(); + + if cauz == intval.version { + intval.data.clear(); + } + intval.data.push(InternalData::Value(val)); + intval.version += 1; + intval.change.notify_waiters(); + } + Ok(()) } async fn row_poll(&self, value: &RowRef) -> Result { - unimplemented!(); + let shard = &value.uid.shard; + let sort = &value.uid.sort; + let cauz = match value.causality.as_ref().map(|v| v.parse::()) { + Some(Ok(v)) => v, + _ => 0, + }; + + let notify_me = { + let store = self.row.read().or(Err(StorageError::Internal))?; + let intval = store + .get(shard) + .ok_or(StorageError::NotFound)? + .get(sort) + .ok_or(StorageError::NotFound)?; + + if intval.version != cauz { + return Ok(intval.to_row_val(value.clone())); + } + intval.change.clone() + }; + + notify_me.notified().await; + + let res = self.row_fetch(&Selector::Single(value)).await?; + res.into_iter().next().ok_or(StorageError::NotFound) } async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { - unimplemented!(); - + let store = self.blob.read().or(Err(StorageError::Internal))?; + store.get(&blob_ref.0).ok_or(StorageError::NotFound).map(|v| v.to_blob_val(blob_ref)) } - async fn blob_insert(&self, blob_val: &BlobVal) -> Result { - unimplemented!(); + async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { + let mut store = self.blob.write().or(Err(StorageError::Internal))?; + let entry = store.entry(blob_val.blob_ref.0.clone()).or_default(); + entry.data = blob_val.value.clone(); + entry.metadata = blob_val.meta.clone(); + Ok(()) } - async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result { - unimplemented!(); - + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { + let mut store = self.blob.write().or(Err(StorageError::Internal))?; + let blob_src = store.entry(src.0.clone()).or_default().clone(); + store.insert(dst.0.clone(), blob_src); + Ok(()) } async fn blob_list(&self, prefix: &str) -> Result, StorageError> { - unimplemented!(); + let store = self.blob.read().or(Err(StorageError::Internal))?; + let last_bound = prefix_last_bound(prefix); + let blist = store.range((Included(prefix.to_string()), last_bound)).map(|(k, _)| BlobRef(k.to_string())).collect::>(); + Ok(blist) } async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { - unimplemented!(); + let mut store = self.blob.write().or(Err(StorageError::Internal))?; + store.remove(&blob_ref.0); + Ok(()) } } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 8004ac5..a21e07d 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -61,6 +61,10 @@ impl RowRef { causality: None, } } + pub fn with_causality(mut self, causality: String) -> Self { + self.causality = Some(causality); + self + } } #[derive(Debug, Clone)] @@ -118,12 +122,13 @@ pub enum Selector<'a> { pub trait IStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError>; async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError>; + async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError>; async fn row_insert(&self, values: Vec) -> Result<(), StorageError>; async fn row_poll(&self, value: &RowRef) -> Result; async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result; - async fn blob_insert(&self, blob_val: &BlobVal) -> Result; - async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result; + async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError>; + async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError>; async fn blob_list(&self, prefix: &str) -> Result, StorageError>; async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>; } -- cgit v1.2.3 From 8bc40fa0877714aabbc6134357622ab47c628f0c Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 19 Dec 2023 19:21:36 +0100 Subject: wip in mem storage bug fixes --- src/storage/in_memory.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index c18bec3..7d8d108 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -23,12 +23,21 @@ impl InternalData { } } -#[derive(Debug, Default)] +#[derive(Debug)] struct InternalRowVal { data: Vec, version: u64, change: Arc, } +impl std::default::Default for InternalRowVal { + fn default() -> Self { + Self { + data: vec![], + version: 1, + change: Arc::new(Notify::new()), + } + } +} impl InternalRowVal { fn concurrent_values(&self) -> Vec { self.data.iter().map(InternalData::to_alternative).collect() @@ -227,12 +236,9 @@ impl IStore for MemStore { }; let notify_me = { - let store = self.row.read().or(Err(StorageError::Internal))?; - let intval = store - .get(shard) - .ok_or(StorageError::NotFound)? - .get(sort) - .ok_or(StorageError::NotFound)?; + let mut store = self.row.write().or(Err(StorageError::Internal))?; + let bt = store.entry(shard.to_string()).or_default(); + let intval = bt.entry(sort.to_string()).or_default(); if intval.version != cauz { return Ok(intval.to_row_val(value.clone())); -- cgit v1.2.3 From 3a1f68c6bf56b572c1513a8358970536d4555078 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 19 Dec 2023 21:41:35 +0100 Subject: better handle non existing keys --- src/storage/in_memory.rs | 24 ++++++++++++++++-------- src/storage/mod.rs | 21 +++++++++++++++++++++ 2 files changed, 37 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 7d8d108..b1f0508 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -122,13 +122,14 @@ fn prefix_last_bound(prefix: &str) -> Bound { #[async_trait] impl IStore for MemStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { + tracing::trace!(select=%select, command="row_fetch"); let store = self.row.read().or(Err(StorageError::Internal))?; match select { Selector::Range { shard, sort_begin, sort_end } => { Ok(store .get(*shard) - .ok_or(StorageError::NotFound)? + .unwrap_or(&BTreeMap::new()) .range((Included(sort_begin.to_string()), Excluded(sort_end.to_string()))) .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) .collect::>()) @@ -136,12 +137,10 @@ impl IStore for MemStore { Selector::List(rlist) => { let mut acc = vec![]; for row_ref in rlist { - let intval = store - .get(&row_ref.uid.shard) - .ok_or(StorageError::NotFound)? - .get(&row_ref.uid.sort) - .ok_or(StorageError::NotFound)?; - acc.push(intval.to_row_val(row_ref.clone())); + let maybe_intval = store.get(&row_ref.uid.shard).map(|v| v.get(&row_ref.uid.sort)).flatten(); + if let Some(intval) = maybe_intval { + acc.push(intval.to_row_val(row_ref.clone())); + } } Ok(acc) }, @@ -150,7 +149,7 @@ impl IStore for MemStore { Ok(store .get(*shard) - .ok_or(StorageError::NotFound)? + .unwrap_or(&BTreeMap::new()) .range((Included(sort_prefix.to_string()), last_bound)) .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) .collect::>()) @@ -167,6 +166,7 @@ impl IStore for MemStore { } async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { + tracing::trace!(entry=%entry, command="row_rm_single"); let mut store = self.row.write().or(Err(StorageError::Internal))?; let shard = &entry.uid.shard; let sort = &entry.uid.sort; @@ -190,6 +190,7 @@ impl IStore for MemStore { } async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { + tracing::trace!(select=%select, command="row_rm"); //@FIXME not efficient at all... let values = self.row_fetch(select).await?; @@ -200,6 +201,7 @@ impl IStore for MemStore { } async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { + tracing::trace!(entries=%values.iter().map(|v| v.row_ref.to_string()).collect::>().join(","), command="row_insert"); let mut store = self.row.write().or(Err(StorageError::Internal))?; for v in values.into_iter() { let shard = v.row_ref.uid.shard; @@ -228,6 +230,7 @@ impl IStore for MemStore { Ok(()) } async fn row_poll(&self, value: &RowRef) -> Result { + tracing::trace!(entry=%value, command="row_poll"); let shard = &value.uid.shard; let sort = &value.uid.sort; let cauz = match value.causality.as_ref().map(|v| v.parse::()) { @@ -253,10 +256,12 @@ impl IStore for MemStore { } async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { + tracing::trace!(entry=%blob_ref, command="blob_fetch"); let store = self.blob.read().or(Err(StorageError::Internal))?; store.get(&blob_ref.0).ok_or(StorageError::NotFound).map(|v| v.to_blob_val(blob_ref)) } async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { + tracing::trace!(entry=%blob_val.blob_ref, command="blob_insert"); let mut store = self.blob.write().or(Err(StorageError::Internal))?; let entry = store.entry(blob_val.blob_ref.0.clone()).or_default(); entry.data = blob_val.value.clone(); @@ -264,18 +269,21 @@ impl IStore for MemStore { Ok(()) } async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { + tracing::trace!(src=%src, dst=%dst, command="blob_copy"); let mut store = self.blob.write().or(Err(StorageError::Internal))?; let blob_src = store.entry(src.0.clone()).or_default().clone(); store.insert(dst.0.clone(), blob_src); Ok(()) } async fn blob_list(&self, prefix: &str) -> Result, StorageError> { + tracing::trace!(prefix=prefix, command="blob_list"); let store = self.blob.read().or(Err(StorageError::Internal))?; let last_bound = prefix_last_bound(prefix); let blist = store.range((Included(prefix.to_string()), last_bound)).map(|(k, _)| BlobRef(k.to_string())).collect::>(); Ok(blist) } async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { + tracing::trace!(entry=%blob_ref, command="blob_rm"); let mut store = self.blob.write().or(Err(StorageError::Internal))?; store.remove(&blob_ref.0); Ok(()) diff --git a/src/storage/mod.rs b/src/storage/mod.rs index a21e07d..0fedfab 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -50,6 +50,11 @@ pub struct RowRef { pub uid: RowUid, pub causality: Option, } +impl std::fmt::Display for RowRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "RowRef({}, {}, {:?})", self.uid.shard, self.uid.sort, self.causality) + } +} impl RowRef { pub fn new(shard: &str, sort: &str) -> Self { @@ -90,6 +95,11 @@ impl BlobRef { Self(key.to_string()) } } +impl std::fmt::Display for BlobRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "BlobRef({})", self.0) + } +} #[derive(Debug, Clone)] pub struct BlobVal { @@ -111,12 +121,23 @@ impl BlobVal { } } +#[derive(Debug)] pub enum Selector<'a> { Range { shard: &'a str, sort_begin: &'a str, sort_end: &'a str }, List (Vec), // list of (shard_key, sort_key) Prefix { shard: &'a str, sort_prefix: &'a str }, Single(&'a RowRef), } +impl<'a> std::fmt::Display for Selector<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Range { shard, sort_begin, sort_end } => write!(f, "Range({}, [{}, {}[)", shard, sort_begin, sort_end), + Self::List(list) => write!(f, "List({:?})", list), + Self::Prefix { shard, sort_prefix } => write!(f, "Prefix({}, {})", shard, sort_prefix), + Self::Single(row_ref) => write!(f, "Single({})", row_ref), + } + } +} #[async_trait] pub trait IStore { -- cgit v1.2.3 From 2830e62df99dc236cd2ba63a909849cf973d2654 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 20 Dec 2023 13:55:23 +0100 Subject: working in memory storage --- src/login/static_provider.rs | 40 +++++++++++++++++++++++++++++----------- src/storage/in_memory.rs | 1 + 2 files changed, 30 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 5896f16..2e2e034 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -24,6 +24,7 @@ pub struct UserDatabase { pub struct StaticLoginProvider { user_db: watch::Receiver, + in_memory_store: tokio::sync::Mutex>>, } pub async fn update_user_list(config: PathBuf, up: watch::Sender) -> Result<()> { @@ -68,7 +69,7 @@ impl StaticLoginProvider { tokio::spawn(update_user_list(config.user_list, tx)); rx.changed().await?; - Ok(Self { user_db: rx }) + Ok(Self { user_db: rx, in_memory_store: tokio::sync::Mutex::new(HashMap::new()) }) } } @@ -76,10 +77,12 @@ impl StaticLoginProvider { impl LoginProvider for StaticLoginProvider { async fn login(&self, username: &str, password: &str) -> Result { tracing::debug!(user=%username, "login"); - let user_db = self.user_db.borrow(); - let user = match user_db.users.get(username) { - None => bail!("User {} does not exist", username), - Some(u) => u, + let user = { + let user_db = self.user_db.borrow(); + match user_db.users.get(username) { + None => bail!("User {} does not exist", username), + Some(u) => u.clone(), + } }; tracing::debug!(user=%username, "verify password"); @@ -89,7 +92,13 @@ impl LoginProvider for StaticLoginProvider { tracing::debug!(user=%username, "fetch keys"); let storage: storage::Builder = match &user.config.storage { - StaticStorage::InMemory => storage::in_memory::MemBuilder::new(username), + StaticStorage::InMemory => { + let mut global_storage = self.in_memory_store.lock().await; + global_storage + .entry(username.to_string()) + .or_insert(storage::in_memory::MemBuilder::new(username)) + .clone() + }, StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), @@ -108,14 +117,23 @@ impl LoginProvider for StaticLoginProvider { } async fn public_login(&self, email: &str) -> Result { - let user_db = self.user_db.borrow(); - let user = match user_db.users_by_email.get(email) { - None => bail!("No user for email address {}", email), - Some(u) => u, + let user = { + let user_db = self.user_db.borrow(); + match user_db.users_by_email.get(email) { + None => bail!("Email {} does not exist", email), + Some(u) => u.clone(), + } }; + tracing::debug!(user=%user.username, "public_login"); let storage: storage::Builder = match &user.config.storage { - StaticStorage::InMemory => storage::in_memory::MemBuilder::new(&user.username), + StaticStorage::InMemory => { + let mut global_storage = self.in_memory_store.lock().await; + global_storage + .entry(user.username.to_string()) + .or_insert(storage::in_memory::MemBuilder::new(&user.username)) + .clone() + }, StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index b1f0508..00eedab 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -78,6 +78,7 @@ pub struct MemBuilder { impl MemBuilder { pub fn new(user: &str) -> Arc { + tracing::debug!("initialize membuilder for {}", user); let mut unicity: Vec = vec![]; unicity.extend_from_slice(file!().as_bytes()); unicity.extend_from_slice(user.as_bytes()); -- cgit v1.2.3 From a3a9f87d2c1d2f1c01ecba3a00c592e477a6b22b Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 09:32:48 +0100 Subject: avoid infinite loop --- src/login/static_provider.rs | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 2e2e034..76ad6a6 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -35,6 +35,7 @@ pub async fn update_user_list(config: PathBuf, up: watch::Sender) Ok(x) => x, Err(e) => { tracing::warn!(path=%config.as_path().to_string_lossy(), error=%e, "Unable to load config"); + stream.recv().await; continue; } }; @@ -49,6 +50,7 @@ pub async fn update_user_list(config: PathBuf, up: watch::Sender) for m in u.config.email_addresses.iter() { if users_by_email.contains_key(m) { tracing::warn!("Several users have the same email address: {}", m); + stream.recv().await; continue } users_by_email.insert(m.clone(), u.clone()); -- cgit v1.2.3 From e9aabe8e82e3c3a8190c0224cd1fdf2fc4d2505a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 15:36:05 +0100 Subject: move storage logic into the storage module --- src/login/static_provider.rs | 20 ++++---------------- src/storage/in_memory.rs | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/login/static_provider.rs b/src/login/static_provider.rs index 76ad6a6..b11123c 100644 --- a/src/login/static_provider.rs +++ b/src/login/static_provider.rs @@ -24,7 +24,7 @@ pub struct UserDatabase { pub struct StaticLoginProvider { user_db: watch::Receiver, - in_memory_store: tokio::sync::Mutex>>, + in_memory_store: storage::in_memory::MemDb, } pub async fn update_user_list(config: PathBuf, up: watch::Sender) -> Result<()> { @@ -71,7 +71,7 @@ impl StaticLoginProvider { tokio::spawn(update_user_list(config.user_list, tx)); rx.changed().await?; - Ok(Self { user_db: rx, in_memory_store: tokio::sync::Mutex::new(HashMap::new()) }) + Ok(Self { user_db: rx, in_memory_store: storage::in_memory::MemDb::new() }) } } @@ -94,13 +94,7 @@ impl LoginProvider for StaticLoginProvider { tracing::debug!(user=%username, "fetch keys"); let storage: storage::Builder = match &user.config.storage { - StaticStorage::InMemory => { - let mut global_storage = self.in_memory_store.lock().await; - global_storage - .entry(username.to_string()) - .or_insert(storage::in_memory::MemBuilder::new(username)) - .clone() - }, + StaticStorage::InMemory => self.in_memory_store.builder(username).await, StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), @@ -129,13 +123,7 @@ impl LoginProvider for StaticLoginProvider { tracing::debug!(user=%user.username, "public_login"); let storage: storage::Builder = match &user.config.storage { - StaticStorage::InMemory => { - let mut global_storage = self.in_memory_store.lock().await; - global_storage - .entry(user.username.to_string()) - .or_insert(storage::in_memory::MemBuilder::new(&user.username)) - .clone() - }, + StaticStorage::InMemory => self.in_memory_store.builder(&user.username).await, StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { region: grgconf.aws_region.clone(), k2v_endpoint: grgconf.k2v_endpoint.clone(), diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 00eedab..fb6e599 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -9,6 +9,22 @@ use tokio::sync::Notify; /// It means that when a user disconnects, its data are lost. /// It's intended only for basic debugging, do not use it for advanced tests... +#[derive(Debug, Default)] +pub struct MemDb(tokio::sync::Mutex>>); +impl MemDb { + pub fn new() -> Self { + Self(tokio::sync::Mutex::new(HashMap::new())) + } + + pub async fn builder(&self, username: &str) -> Arc { + let mut global_storage = self.0.lock().await; + global_storage + .entry(username.to_string()) + .or_insert(MemBuilder::new(username)) + .clone() + } +} + #[derive(Debug, Clone)] enum InternalData { Tombstone, -- cgit v1.2.3 From e3b11ad1d8249719045329d94f775402aa4ba302 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 16:38:15 +0100 Subject: fix how mem storage is created --- src/login/ldap_provider.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index 009605d..a7f56e4 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -20,6 +20,7 @@ pub struct LdapLoginProvider { crypto_root_attr: String, storage_specific: StorageSpecific, + in_memory_store: storage::in_memory::MemDb, } enum BucketSource { @@ -82,14 +83,15 @@ impl LdapLoginProvider { mail_attr: config.mail_attr, crypto_root_attr: config.crypto_root_attr, storage_specific: specific, + in_memory_store: storage::in_memory::MemDb::new(), }) } - fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { + async fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { let storage: Builder = match &self.storage_specific { - StorageSpecific::InMemory => storage::in_memory::MemBuilder::new( + StorageSpecific::InMemory => self.in_memory_store.builder( &get_attr(user, &self.username_attr)? - ), + ).await, StorageSpecific::Garage { from_config, bucket_source } => { let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?; let aws_secret_access_key = get_attr(user, &from_config.aws_secret_access_key_attr)?; @@ -166,7 +168,7 @@ impl LoginProvider for LdapLoginProvider { let keys = cr.crypto_keys(password)?; // storage - let storage = self.storage_creds_from_ldap_user(&user)?; + let storage = self.storage_creds_from_ldap_user(&user).await?; drop(ldap); @@ -214,7 +216,7 @@ impl LoginProvider for LdapLoginProvider { let public_key = cr.public_key()?; // storage - let storage = self.storage_creds_from_ldap_user(&user)?; + let storage = self.storage_creds_from_ldap_user(&user).await?; drop(ldap); Ok(PublicCredentials { -- cgit v1.2.3 From 4b8b48b48572115b943efdf6356a191871d46a55 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 20:23:43 +0100 Subject: upgrade argon2, add aws-sdk-s3 --- src/login/mod.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/login/mod.rs b/src/login/mod.rs index d331522..3369ac2 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -210,21 +210,18 @@ fn try_open_encrypted_keys(kdf_salt: &[u8], password: &str, encrypted_keys: &[u8 // ---- UTIL ---- pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result> { - use argon2::{Algorithm, Argon2, ParamsBuilder, PasswordHasher, Version}; + use argon2::{Algorithm, Argon2, ParamsBuilder, PasswordHasher, Version, password_hash}; - let mut params = ParamsBuilder::new(); - params + let params = ParamsBuilder::new() .output_len(output_len) - .map_err(|e| anyhow!("Invalid output length: {}", e))?; - - let params = params - .params() + .build() .map_err(|e| anyhow!("Invalid argon2 params: {}", e))?; let argon2 = Argon2::new(Algorithm::default(), Version::default(), params); - let salt = base64::engine::general_purpose::STANDARD_NO_PAD.encode(salt); + let b64_salt = base64::engine::general_purpose::STANDARD_NO_PAD.encode(salt); + let valid_salt = password_hash::Salt::from_b64(&b64_salt).map_err(|e| anyhow!("Invalid salt, error {}", e))?; let hash = argon2 - .hash_password(password, &salt) + .hash_password(password, valid_salt) .map_err(|e| anyhow!("Unable to hash: {}", e))?; let hash = hash.hash.ok_or(anyhow!("Missing output"))?; -- cgit v1.2.3 From 012c6ad6724b6a6e155ee717e6d558e1fe199e43 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 21:54:36 +0100 Subject: initialize aws sdk with our info --- src/bayou.rs | 10 +++++----- src/mail/incoming.rs | 6 +++--- src/mail/mailbox.rs | 4 ++-- src/mail/user.rs | 2 +- src/storage/garage.rs | 24 +++++++++++++++++++++--- src/storage/in_memory.rs | 3 ++- src/storage/mod.rs | 3 ++- 7 files changed, 36 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index afe3c75..3c525b3 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -58,12 +58,12 @@ pub struct Bayou { } impl Bayou { - pub fn new(creds: &Credentials, path: String) -> Result { - let storage = creds.storage.build()?; + pub async fn new(creds: &Credentials, path: String) -> Result { + let storage = creds.storage.build().await?; //let target = k2v_client.row(&path, WATCH_SK); let target = storage::RowRef::new(&path, WATCH_SK); - let watch = K2vWatch::new(creds, target.clone())?; + let watch = K2vWatch::new(creds, target.clone()).await?; Ok(Self { path, @@ -418,8 +418,8 @@ impl K2vWatch { /// Creates a new watch and launches subordinate threads. /// These threads hold Weak pointers to the struct; /// they exit when the Arc is dropped. - fn new(creds: &Credentials, target: storage::RowRef) -> Result> { - let storage = creds.storage.build()?; + async fn new(creds: &Credentials, target: storage::RowRef) -> Result> { + let storage = creds.storage.build().await?; let (tx, rx) = watch::channel::(target.clone()); let notify = Notify::new(); diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 2a6c947..3eafac7 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -51,8 +51,8 @@ async fn incoming_mail_watch_process_internal( creds: Credentials, mut rx_inbox_id: watch::Receiver>, ) -> Result<()> { - let mut lock_held = k2v_lock_loop(creds.storage.build()?, storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK)); - let storage = creds.storage.build()?; + let mut lock_held = k2v_lock_loop(creds.storage.build().await?, storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK)); + let storage = creds.storage.build().await?; let mut inbox: Option> = None; let mut incoming_key = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK); @@ -411,7 +411,7 @@ impl EncryptedMessage { } pub async fn deliver_to(self: Arc, creds: PublicCredentials) -> Result<()> { - let storage = creds.storage.build()?; + let storage = creds.storage.build().await?; // Get causality token of previous watch key let query = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK); diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 65f44b1..60a91dd 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -25,7 +25,7 @@ impl Mailbox { let index_path = format!("index/{}", id); let mail_path = format!("mail/{}", id); - let mut uid_index = Bayou::::new(creds, index_path)?; + let mut uid_index = Bayou::::new(creds, index_path).await?; uid_index.sync().await?; let uidvalidity = uid_index.state().uidvalidity; @@ -44,7 +44,7 @@ impl Mailbox { let mbox = RwLock::new(MailboxInternal { id, encryption_key: creds.keys.master.clone(), - storage: creds.storage.build()?, + storage: creds.storage.build().await?, uid_index, mail_path, }); diff --git a/src/mail/user.rs b/src/mail/user.rs index 8413cbf..8d12c58 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -174,7 +174,7 @@ impl User { // ---- Internal user & mailbox management ---- async fn open(username: String, creds: Credentials) -> Result> { - let storage = creds.storage.build()?; + let storage = creds.storage.build().await?; let (tx_inbox_id, rx_inbox_id) = watch::channel(None); diff --git a/src/storage/garage.rs b/src/storage/garage.rs index f202067..5d00ed6 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,5 +1,6 @@ use crate::storage::*; use serde::Serialize; +use aws_sdk_s3 as s3; #[derive(Clone, Debug, Serialize)] pub struct GarageConf { @@ -26,9 +27,26 @@ impl GarageBuilder { } } +#[async_trait] impl IBuilder for GarageBuilder { - fn build(&self) -> Result { - unimplemented!(); + async fn build(&self) -> Result { + let creds = s3::config::Credentials::new( + self.conf.aws_access_key_id.clone(), + self.conf.aws_secret_access_key.clone(), + None, + None, + "aerogramme" + ); + + let config = aws_config::from_env() + .region(aws_config::Region::new(self.conf.region.clone())) + .credentials_provider(creds) + .endpoint_url(self.conf.s3_endpoint.clone()) + .load() + .await; + + let s3_client = aws_sdk_s3::Client::new(&config); + Ok(Box::new(GarageStore { s3: s3_client })) } fn unique(&self) -> UnicityBuffer { UnicityBuffer(self.unicity.clone()) @@ -36,7 +54,7 @@ impl IBuilder for GarageBuilder { } pub struct GarageStore { - dummy: String, + s3: s3::Client, } #[async_trait] diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index fb6e599..723bca0 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -106,8 +106,9 @@ impl MemBuilder { } } +#[async_trait] impl IBuilder for MemBuilder { - fn build(&self) -> Result { + async fn build(&self) -> Result { Ok(Box::new(MemStore { row: self.row.clone(), blob: self.blob.clone(), diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 0fedfab..10149e9 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -157,8 +157,9 @@ pub trait IStore { #[derive(Clone,Debug,PartialEq,Eq,Hash)] pub struct UnicityBuffer(Vec); +#[async_trait] pub trait IBuilder: std::fmt::Debug { - fn build(&self) -> Result; + async fn build(&self) -> Result; /// Returns an opaque buffer that uniquely identifies this builder fn unique(&self) -> UnicityBuffer; -- cgit v1.2.3 From 1057661da77c3d94e5a1ff51ab7fc58ecdb6a53a Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Dec 2023 22:30:17 +0100 Subject: implemented blob_fetch --- src/storage/garage.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 5d00ed6..97494f6 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,6 +1,10 @@ use crate::storage::*; use serde::Serialize; -use aws_sdk_s3 as s3; +use aws_sdk_s3::{ + self as s3, + error::SdkError, + operation::get_object::GetObjectError, +}; #[derive(Clone, Debug, Serialize)] pub struct GarageConf { @@ -46,7 +50,10 @@ impl IBuilder for GarageBuilder { .await; let s3_client = aws_sdk_s3::Client::new(&config); - Ok(Box::new(GarageStore { s3: s3_client })) + Ok(Box::new(GarageStore { + s3_bucket: self.conf.bucket.clone(), + s3: s3_client + })) } fn unique(&self) -> UnicityBuffer { UnicityBuffer(self.unicity.clone()) @@ -54,6 +61,7 @@ impl IBuilder for GarageBuilder { } pub struct GarageStore { + s3_bucket: String, s3: s3::Client, } @@ -79,8 +87,37 @@ impl IStore for GarageStore { } async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { - unimplemented!(); + let maybe_out = self.s3 + .get_object() + .bucket(self.s3_bucket.to_string()) + .key(blob_ref.0.to_string()) + .send() + .await; + + let object_output = match maybe_out { + Ok(output) => output, + Err(SdkError::ServiceError(x)) => match x.err() { + GetObjectError::NoSuchKey(_) => return Err(StorageError::NotFound), + e => { + tracing::warn!("Blob Fetch Error, Service Error: {}", e); + return Err(StorageError::Internal); + }, + }, + Err(e) => { + tracing::warn!("Blob Fetch Error, {}", e); + return Err(StorageError::Internal); + }, + }; + + let buffer = match object_output.body.collect().await { + Ok(aggreg) => aggreg.to_vec(), + Err(e) => { + tracing::warn!("Fetching body failed with {}", e); + return Err(StorageError::Internal); + } + }; + Ok(BlobVal::new(blob_ref.clone(), buffer)) } async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { unimplemented!(); -- cgit v1.2.3 From 0f7764d9f05b3fccfa30ddebb52997200af13bf2 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 22 Dec 2023 19:32:07 +0100 Subject: s3 is now implemented --- src/bayou.rs | 2 +- src/mail/incoming.rs | 2 +- src/mail/mailbox.rs | 2 +- src/storage/garage.rs | 83 +++++++++++++++++++++++++++++++++++++++++++++--- src/storage/in_memory.rs | 2 +- src/storage/mod.rs | 2 +- 6 files changed, 83 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 3c525b3..1361e49 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -352,7 +352,7 @@ impl Bayou { storage::BlobRef(format!("{}/checkpoint/{}", self.path, ts_cp.to_string())), cryptoblob.into(), ); - self.storage.blob_insert(&blob_val).await?; + self.storage.blob_insert(blob_val).await?; // Drop old checkpoints (but keep at least CHECKPOINTS_TO_KEEP of them) let ecp_len = existing_checkpoints.len(); diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 3eafac7..b17959a 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -429,7 +429,7 @@ impl EncryptedMessage { storage::BlobRef(format!("incoming/{}", gen_ident())), self.encrypted_body.clone().into(), ).with_meta(MESSAGE_KEY.to_string(), key_header); - storage.blob_insert(&blob_val).await?; + storage.blob_insert(blob_val).await?; // Update watch key to signal new mail let watch_val = storage::RowVal::new( diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 60a91dd..c925f39 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -266,7 +266,7 @@ impl MailboxInternal { async { // Encrypt and save mail body let message_blob = cryptoblob::seal(mail.raw, &message_key)?; - self.storage.blob_insert(&BlobVal::new( + self.storage.blob_insert(BlobVal::new( BlobRef(format!("{}/{}", self.mail_path, ident)), message_blob, )).await?; diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 97494f6..ec26e80 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -117,20 +117,93 @@ impl IStore for GarageStore { } }; + tracing::debug!("Fetched {}/{}", self.s3_bucket, blob_ref.0); Ok(BlobVal::new(blob_ref.clone(), buffer)) } - async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { - unimplemented!(); + async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { + let streamable_value = s3::primitives::ByteStream::from(blob_val.value); + + let maybe_send = self.s3 + .put_object() + .bucket(self.s3_bucket.to_string()) + .key(blob_val.blob_ref.0.to_string()) + .body(streamable_value) + .send() + .await; + + match maybe_send { + Err(e) => { + tracing::error!("unable to send object: {}", e); + Err(StorageError::Internal) + } + Ok(_) => { + tracing::debug!("Inserted {}/{}", self.s3_bucket, blob_val.blob_ref.0); + Ok(()) + } + } } async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { - unimplemented!(); + let maybe_copy = self.s3 + .copy_object() + .bucket(self.s3_bucket.to_string()) + .key(dst.0.clone()) + .copy_source(format!("/{}/{}", self.s3_bucket.to_string(), src.0.clone())) + .send() + .await; + + match maybe_copy { + Err(e) => { + tracing::error!("unable to copy object {} to {} (bucket: {}), error: {}", src.0, dst.0, self.s3_bucket, e); + Err(StorageError::Internal) + }, + Ok(_) => { + tracing::debug!("copied {} to {} (bucket: {})", src.0, dst.0, self.s3_bucket); + Ok(()) + } + } } async fn blob_list(&self, prefix: &str) -> Result, StorageError> { - unimplemented!(); + let maybe_list = self.s3 + .list_objects_v2() + .bucket(self.s3_bucket.to_string()) + .prefix(prefix) + .into_paginator() + .send() + .try_collect() + .await; + + match maybe_list { + Err(e) => { + tracing::error!("listing prefix {} on bucket {} failed: {}", prefix, self.s3_bucket, e); + Err(StorageError::Internal) + } + Ok(pagin_list_out) => Ok(pagin_list_out + .into_iter() + .map(|list_out| list_out.contents.unwrap_or(vec![])) + .flatten() + .map(|obj| BlobRef(obj.key.unwrap_or(String::new()))) + .collect::>()), + } } async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { - unimplemented!(); + let maybe_delete = self.s3 + .delete_object() + .bucket(self.s3_bucket.to_string()) + .key(blob_ref.0.clone()) + .send() + .await; + + match maybe_delete { + Err(e) => { + tracing::error!("unable to delete {} (bucket: {}), error {}", blob_ref.0, self.s3_bucket, e); + Err(StorageError::Internal) + }, + Ok(_) => { + tracing::debug!("deleted {} (bucket: {})", blob_ref.0, self.s3_bucket); + Ok(()) + } + } } } diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index 723bca0..d764da1 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -278,7 +278,7 @@ impl IStore for MemStore { let store = self.blob.read().or(Err(StorageError::Internal))?; store.get(&blob_ref.0).ok_or(StorageError::NotFound).map(|v| v.to_blob_val(blob_ref)) } - async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError> { + async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { tracing::trace!(entry=%blob_val.blob_ref, command="blob_insert"); let mut store = self.blob.write().or(Err(StorageError::Internal))?; let entry = store.entry(blob_val.blob_ref.0.clone()).or_default(); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 10149e9..1b1faad 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -148,7 +148,7 @@ pub trait IStore { async fn row_poll(&self, value: &RowRef) -> Result; async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result; - async fn blob_insert(&self, blob_val: &BlobVal) -> Result<(), StorageError>; + async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError>; async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError>; async fn blob_list(&self, prefix: &str) -> Result, StorageError>; async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>; -- cgit v1.2.3 From 78f2d86fc8be8e923d91f069406acd48edcae180 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Fri, 22 Dec 2023 21:52:20 +0100 Subject: WIP k2v --- src/storage/garage.rs | 118 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 20 deletions(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index ec26e80..bc123b5 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -34,7 +34,7 @@ impl GarageBuilder { #[async_trait] impl IBuilder for GarageBuilder { async fn build(&self) -> Result { - let creds = s3::config::Credentials::new( + let s3_creds = s3::config::Credentials::new( self.conf.aws_access_key_id.clone(), self.conf.aws_secret_access_key.clone(), None, @@ -42,17 +42,35 @@ impl IBuilder for GarageBuilder { "aerogramme" ); - let config = aws_config::from_env() + let s3_config = aws_config::from_env() .region(aws_config::Region::new(self.conf.region.clone())) - .credentials_provider(creds) + .credentials_provider(s3_creds) .endpoint_url(self.conf.s3_endpoint.clone()) .load() .await; + let s3_client = aws_sdk_s3::Client::new(&s3_config); + + let k2v_config = k2v_client::K2vClientConfig { + endpoint: self.conf.k2v_endpoint.clone(), + region: self.conf.region.clone(), + aws_access_key_id: self.conf.aws_access_key_id.clone(), + aws_secret_access_key: self.conf.aws_secret_access_key.clone(), + bucket: self.conf.bucket.clone(), + user_agent: None, + }; + + let k2v_client = match k2v_client::K2vClient::new(k2v_config) { + Err(e) => { + tracing::error!("unable to build k2v client: {}", e); + return Err(StorageError::Internal); + } + Ok(v) => v, + }; - let s3_client = aws_sdk_s3::Client::new(&config); Ok(Box::new(GarageStore { - s3_bucket: self.conf.bucket.clone(), - s3: s3_client + bucket: self.conf.bucket.clone(), + s3: s3_client, + k2v: k2v_client, })) } fn unique(&self) -> UnicityBuffer { @@ -61,13 +79,73 @@ impl IBuilder for GarageBuilder { } pub struct GarageStore { - s3_bucket: String, + bucket: String, s3: s3::Client, + k2v: k2v_client::K2vClient, +} + +fn causal_to_row_val(row_ref: RowRef, causal_value: k2v_client::CausalValue) -> RowVal { + let new_row_ref = row_ref.with_causality(causal_value.causality.into()); + let row_values = causal_value.value.into_iter().map(|k2v_value| match k2v_value { + k2v_client::K2vValue::Tombstone => Alternative::Tombstone, + k2v_client::K2vValue::Value(v) => Alternative::Value(v), + }).collect::>(); + + RowVal { row_ref: new_row_ref, value: row_values } } #[async_trait] impl IStore for GarageStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { + let batch_op = match select { + Selector::Range { shard, sort_begin, sort_end } => vec![k2v_client::BatchReadOp { + partition_key: shard, + filter: k2v_client::Filter { + start: Some(sort_begin), + end: Some(sort_end), + ..k2v_client::Filter::default() + }, + ..k2v_client::BatchReadOp::default() + }], + Selector::List(row_ref_list) => row_ref_list.iter().map(|row_ref| k2v_client::BatchReadOp { + partition_key: &row_ref.uid.shard, + filter: k2v_client::Filter { + start: Some(&row_ref.uid.sort), + ..k2v_client::Filter::default() + }, + single_item: true, + ..k2v_client::BatchReadOp::default() + }).collect::>(), + Selector::Prefix { shard, sort_prefix } => vec![k2v_client::BatchReadOp { + partition_key: shard, + filter: k2v_client::Filter { + prefix: Some(sort_prefix), + ..k2v_client::Filter::default() + }, + ..k2v_client::BatchReadOp::default() + }], + Selector::Single(row_ref) => { + let causal_value = match self.k2v.read_item(&row_ref.uid.shard, &row_ref.uid.sort).await { + Err(e) => { + tracing::error!("K2V read item shard={}, sort={}, bucket={} failed: {}", row_ref.uid.shard, row_ref.uid.sort, self.bucket, e); + return Err(StorageError::Internal); + }, + Ok(v) => v, + }; + + let row_val = causal_to_row_val((*row_ref).clone(), causal_value); + return Ok(vec![row_val]) + }, + }; + + let all_res = match self.k2v.read_batch(&batch_op).await { + Err(e) => { + tracing::error!("k2v read batch failed for {:?}, bucket {} with err: {}", select, self.bucket, e); + return Err(StorageError::Internal); + }, + Ok(v) => v, + }; + unimplemented!(); } async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { @@ -89,7 +167,7 @@ impl IStore for GarageStore { async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { let maybe_out = self.s3 .get_object() - .bucket(self.s3_bucket.to_string()) + .bucket(self.bucket.to_string()) .key(blob_ref.0.to_string()) .send() .await; @@ -117,7 +195,7 @@ impl IStore for GarageStore { } }; - tracing::debug!("Fetched {}/{}", self.s3_bucket, blob_ref.0); + tracing::debug!("Fetched {}/{}", self.bucket, blob_ref.0); Ok(BlobVal::new(blob_ref.clone(), buffer)) } async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { @@ -125,7 +203,7 @@ impl IStore for GarageStore { let maybe_send = self.s3 .put_object() - .bucket(self.s3_bucket.to_string()) + .bucket(self.bucket.to_string()) .key(blob_val.blob_ref.0.to_string()) .body(streamable_value) .send() @@ -137,7 +215,7 @@ impl IStore for GarageStore { Err(StorageError::Internal) } Ok(_) => { - tracing::debug!("Inserted {}/{}", self.s3_bucket, blob_val.blob_ref.0); + tracing::debug!("Inserted {}/{}", self.bucket, blob_val.blob_ref.0); Ok(()) } } @@ -145,19 +223,19 @@ impl IStore for GarageStore { async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { let maybe_copy = self.s3 .copy_object() - .bucket(self.s3_bucket.to_string()) + .bucket(self.bucket.to_string()) .key(dst.0.clone()) - .copy_source(format!("/{}/{}", self.s3_bucket.to_string(), src.0.clone())) + .copy_source(format!("/{}/{}", self.bucket.to_string(), src.0.clone())) .send() .await; match maybe_copy { Err(e) => { - tracing::error!("unable to copy object {} to {} (bucket: {}), error: {}", src.0, dst.0, self.s3_bucket, e); + tracing::error!("unable to copy object {} to {} (bucket: {}), error: {}", src.0, dst.0, self.bucket, e); Err(StorageError::Internal) }, Ok(_) => { - tracing::debug!("copied {} to {} (bucket: {})", src.0, dst.0, self.s3_bucket); + tracing::debug!("copied {} to {} (bucket: {})", src.0, dst.0, self.bucket); Ok(()) } } @@ -166,7 +244,7 @@ impl IStore for GarageStore { async fn blob_list(&self, prefix: &str) -> Result, StorageError> { let maybe_list = self.s3 .list_objects_v2() - .bucket(self.s3_bucket.to_string()) + .bucket(self.bucket.to_string()) .prefix(prefix) .into_paginator() .send() @@ -175,7 +253,7 @@ impl IStore for GarageStore { match maybe_list { Err(e) => { - tracing::error!("listing prefix {} on bucket {} failed: {}", prefix, self.s3_bucket, e); + tracing::error!("listing prefix {} on bucket {} failed: {}", prefix, self.bucket, e); Err(StorageError::Internal) } Ok(pagin_list_out) => Ok(pagin_list_out @@ -189,18 +267,18 @@ impl IStore for GarageStore { async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { let maybe_delete = self.s3 .delete_object() - .bucket(self.s3_bucket.to_string()) + .bucket(self.bucket.to_string()) .key(blob_ref.0.clone()) .send() .await; match maybe_delete { Err(e) => { - tracing::error!("unable to delete {} (bucket: {}), error {}", blob_ref.0, self.s3_bucket, e); + tracing::error!("unable to delete {} (bucket: {}), error {}", blob_ref.0, self.bucket, e); Err(StorageError::Internal) }, Ok(_) => { - tracing::debug!("deleted {} (bucket: {})", blob_ref.0, self.s3_bucket); + tracing::debug!("deleted {} (bucket: {})", blob_ref.0, self.bucket); Ok(()) } } -- cgit v1.2.3 From 18bba784eee2331e7f27fe31b89e7c674e24ded0 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 26 Dec 2023 18:33:56 +0100 Subject: insert logic --- src/storage/garage.rs | 82 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 24 deletions(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index bc123b5..665a0c6 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -97,33 +97,41 @@ fn causal_to_row_val(row_ref: RowRef, causal_value: k2v_client::CausalValue) -> #[async_trait] impl IStore for GarageStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { - let batch_op = match select { - Selector::Range { shard, sort_begin, sort_end } => vec![k2v_client::BatchReadOp { - partition_key: shard, - filter: k2v_client::Filter { - start: Some(sort_begin), - end: Some(sort_end), - ..k2v_client::Filter::default() - }, - ..k2v_client::BatchReadOp::default() - }], - Selector::List(row_ref_list) => row_ref_list.iter().map(|row_ref| k2v_client::BatchReadOp { - partition_key: &row_ref.uid.shard, - filter: k2v_client::Filter { - start: Some(&row_ref.uid.sort), - ..k2v_client::Filter::default() - }, - single_item: true, - ..k2v_client::BatchReadOp::default() - }).collect::>(), - Selector::Prefix { shard, sort_prefix } => vec![k2v_client::BatchReadOp { + let (pk_list, batch_op) = match select { + Selector::Range { shard, sort_begin, sort_end } => ( + vec![shard.to_string()], + vec![k2v_client::BatchReadOp { + partition_key: shard, + filter: k2v_client::Filter { + start: Some(sort_begin), + end: Some(sort_end), + ..k2v_client::Filter::default() + }, + ..k2v_client::BatchReadOp::default() + }] + ), + Selector::List(row_ref_list) => ( + row_ref_list.iter().map(|row_ref| row_ref.uid.shard.to_string()).collect::>(), + row_ref_list.iter().map(|row_ref| k2v_client::BatchReadOp { + partition_key: &row_ref.uid.shard, + filter: k2v_client::Filter { + start: Some(&row_ref.uid.sort), + ..k2v_client::Filter::default() + }, + single_item: true, + ..k2v_client::BatchReadOp::default() + }).collect::>() + ), + Selector::Prefix { shard, sort_prefix } => ( + vec![shard.to_string()], + vec![k2v_client::BatchReadOp { partition_key: shard, filter: k2v_client::Filter { prefix: Some(sort_prefix), ..k2v_client::Filter::default() }, ..k2v_client::BatchReadOp::default() - }], + }]), Selector::Single(row_ref) => { let causal_value = match self.k2v.read_item(&row_ref.uid.shard, &row_ref.uid.sort).await { Err(e) => { @@ -138,7 +146,7 @@ impl IStore for GarageStore { }, }; - let all_res = match self.k2v.read_batch(&batch_op).await { + let all_raw_res = match self.k2v.read_batch(&batch_op).await { Err(e) => { tracing::error!("k2v read batch failed for {:?}, bucket {} with err: {}", select, self.bucket, e); return Err(StorageError::Internal); @@ -146,15 +154,41 @@ impl IStore for GarageStore { Ok(v) => v, }; - unimplemented!(); + let row_vals = all_raw_res + .into_iter() + .fold(vec![], |mut acc, v| { + acc.extend(v.items); + acc + }) + .into_iter() + .zip(pk_list.into_iter()) + .map(|((sk, cv), pk)| causal_to_row_val(RowRef::new(&pk, &sk), cv)) + .collect::>(); + + Ok(row_vals) } async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { unimplemented!(); } async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { - unimplemented!(); + let batch_ops = values.iter().map(|v| k2v_client::BatchInsertOp { + partition_key: &v.row_ref.uid.shard, + sort_key: &v.row_ref.uid.sort, + causality: v.row_ref.causality.clone().map(|ct| ct.into()), + value: v.value.iter().next().map(|cv| match cv { + Alternative::Value(buff) => k2v_client::K2vValue::Value(buff.clone()), + Alternative::Tombstone => k2v_client::K2vValue::Tombstone, + }).unwrap_or(k2v_client::K2vValue::Tombstone) + }).collect::>(); + match self.k2v.insert_batch(&batch_ops).await { + Err(e) => { + tracing::error!("k2v can't insert some value: {}", e); + Err(StorageError::Internal) + }, + Ok(v) => Ok(v), + } } async fn row_poll(&self, value: &RowRef) -> Result { unimplemented!(); -- cgit v1.2.3 From 477a784e45d07d414fea77cf5b49ee241dc01f65 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Tue, 26 Dec 2023 20:02:13 +0100 Subject: implement poll --- src/storage/garage.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 665a0c6..fa6fbc1 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -191,7 +191,36 @@ impl IStore for GarageStore { } } async fn row_poll(&self, value: &RowRef) -> Result { - unimplemented!(); + loop { + if let Some(ct) = &value.causality { + match self.k2v.poll_item(&value.uid.shard, &value.uid.sort, ct.clone().into(), None).await { + Err(e) => { + tracing::error!("Unable to poll item: {}", e); + return Err(StorageError::Internal); + } + Ok(None) => continue, + Ok(Some(cv)) => return Ok(causal_to_row_val(value.clone(), cv)), + } + } else { + match self.k2v.read_item(&value.uid.shard, &value.uid.sort).await { + Err(k2v_client::Error::NotFound) => { + self + .k2v + .insert_item(&value.uid.shard, &value.uid.sort, vec![0u8], None) + .await + .map_err(|e| { + tracing::error!("Unable to insert item in polling logic: {}", e); + StorageError::Internal + })?; + } + Err(e) => { + tracing::error!("Unable to read item in polling logic: {}", e); + return Err(StorageError::Internal) + }, + Ok(cv) => return Ok(causal_to_row_val(value.clone(), cv)), + } + } + } } async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { -- cgit v1.2.3 From 54c9736a247bb3534a285caa637c9afb052bc2dd Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 27 Dec 2023 14:58:09 +0100 Subject: implemente garage storage --- src/mail/incoming.rs | 2 +- src/mail/mailbox.rs | 2 +- src/storage/garage.rs | 64 ++++++++++++++++++++++++++++++++++++++++++++---- src/storage/in_memory.rs | 60 +++++++++++++++++++++++++-------------------- src/storage/mod.rs | 7 +----- 5 files changed, 95 insertions(+), 40 deletions(-) (limited to 'src') diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index b17959a..7e33a9a 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -386,7 +386,7 @@ async fn k2v_lock_loop_internal( _ => None, }; if let Some(ct) = release { - match storage.row_rm_single(&ct).await { + match storage.row_rm(&storage::Selector::Single(&ct)).await { Err(e) => warn!("Unable to release lock {:?}: {}", ct, e), Ok(_) => (), }; diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index c925f39..6fb7dea 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -365,7 +365,7 @@ impl MailboxInternal { .row_fetch(&storage::Selector::Single(&RowRef::new(&self.mail_path, &sk))) .await?; if let Some(row_val) = res.into_iter().next() { - self.storage.row_rm_single(&row_val.row_ref).await?; + self.storage.row_rm(&storage::Selector::Single(&row_val.row_ref)).await?; } Ok::<_, anyhow::Error>(()) } diff --git a/src/storage/garage.rs b/src/storage/garage.rs index fa6fbc1..f9ba756 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -168,7 +168,65 @@ impl IStore for GarageStore { Ok(row_vals) } async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { - unimplemented!(); + let del_op = match select { + Selector::Range { shard, sort_begin, sort_end } => vec![k2v_client::BatchDeleteOp { + partition_key: shard, + prefix: None, + start: Some(sort_begin), + end: Some(sort_end), + single_item: false, + }], + Selector::List(row_ref_list) => { + // Insert null values with causality token = delete + let batch_op = row_ref_list.iter().map(|v| k2v_client::BatchInsertOp { + partition_key: &v.uid.shard, + sort_key: &v.uid.sort, + causality: v.causality.clone().map(|ct| ct.into()), + value: k2v_client::K2vValue::Tombstone, + }).collect::>(); + + return match self.k2v.insert_batch(&batch_op).await { + Err(e) => { + tracing::error!("Unable to delete the list of values: {}", e); + Err(StorageError::Internal) + }, + Ok(_) => Ok(()), + }; + }, + Selector::Prefix { shard, sort_prefix } => vec![k2v_client::BatchDeleteOp { + partition_key: shard, + prefix: Some(sort_prefix), + start: None, + end: None, + single_item: false, + }], + Selector::Single(row_ref) => { + // Insert null values with causality token = delete + let batch_op = vec![k2v_client::BatchInsertOp { + partition_key: &row_ref.uid.shard, + sort_key: &row_ref.uid.sort, + causality: row_ref.causality.clone().map(|ct| ct.into()), + value: k2v_client::K2vValue::Tombstone, + }]; + + return match self.k2v.insert_batch(&batch_op).await { + Err(e) => { + tracing::error!("Unable to delete the list of values: {}", e); + Err(StorageError::Internal) + }, + Ok(_) => Ok(()), + }; + }, + }; + + // Finally here we only have prefix & range + match self.k2v.delete_batch(&del_op).await { + Err(e) => { + tracing::error!("delete batch error: {}", e); + Err(StorageError::Internal) + }, + Ok(_) => Ok(()), + } } async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { @@ -223,10 +281,6 @@ impl IStore for GarageStore { } } - async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { - unimplemented!(); - } - async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { let maybe_out = self.s3 .get_object() diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index d764da1..ee7c9a6 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -137,6 +137,32 @@ fn prefix_last_bound(prefix: &str) -> Bound { } } +impl MemStore { + fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { + tracing::trace!(entry=%entry, command="row_rm_single"); + let mut store = self.row.write().or(Err(StorageError::Internal))?; + let shard = &entry.uid.shard; + let sort = &entry.uid.sort; + + let cauz = match entry.causality.as_ref().map(|v| v.parse::()) { + Some(Ok(v)) => v, + _ => 0, + }; + + let bt = store.entry(shard.to_string()).or_default(); + let intval = bt.entry(sort.to_string()).or_default(); + + if cauz == intval.version { + intval.data.clear(); + } + intval.data.push(InternalData::Tombstone); + intval.version += 1; + intval.change.notify_waiters(); + + Ok(()) + } +} + #[async_trait] impl IStore for MemStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { @@ -183,37 +209,17 @@ impl IStore for MemStore { } } - async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> { - tracing::trace!(entry=%entry, command="row_rm_single"); - let mut store = self.row.write().or(Err(StorageError::Internal))?; - let shard = &entry.uid.shard; - let sort = &entry.uid.sort; - - let cauz = match entry.causality.as_ref().map(|v| v.parse::()) { - Some(Ok(v)) => v, - _ => 0, - }; - - let bt = store.entry(shard.to_string()).or_default(); - let intval = bt.entry(sort.to_string()).or_default(); - - if cauz == intval.version { - intval.data.clear(); - } - intval.data.push(InternalData::Tombstone); - intval.version += 1; - intval.change.notify_waiters(); - - Ok(()) - } - async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { tracing::trace!(select=%select, command="row_rm"); - //@FIXME not efficient at all... - let values = self.row_fetch(select).await?; + + let values = match select { + Selector::Range { .. } | Selector::Prefix { .. } => self.row_fetch(select).await?.into_iter().map(|rv| rv.row_ref).collect::>(), + Selector::List(rlist) => rlist.clone(), + Selector::Single(row_ref) => vec![(*row_ref).clone()], + }; for v in values.into_iter() { - self.row_rm_single(&v.row_ref).await?; + self.row_rm_single(&v)?; } Ok(()) } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 1b1faad..c81ffe4 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -90,11 +90,6 @@ impl RowVal { #[derive(Debug, Clone)] pub struct BlobRef(pub String); -impl BlobRef { - pub fn new(key: &str) -> Self { - Self(key.to_string()) - } -} impl std::fmt::Display for BlobRef { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BlobRef({})", self.0) @@ -125,6 +120,7 @@ impl BlobVal { pub enum Selector<'a> { Range { shard: &'a str, sort_begin: &'a str, sort_end: &'a str }, List (Vec), // list of (shard_key, sort_key) + #[allow(dead_code)] Prefix { shard: &'a str, sort_prefix: &'a str }, Single(&'a RowRef), } @@ -143,7 +139,6 @@ impl<'a> std::fmt::Display for Selector<'a> { pub trait IStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError>; async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError>; - async fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError>; async fn row_insert(&self, values: Vec) -> Result<(), StorageError>; async fn row_poll(&self, value: &RowRef) -> Result; -- cgit v1.2.3 From 7ac24ad913fa081e1bd6f5b042b9da0173dad267 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 27 Dec 2023 14:58:28 +0100 Subject: cargo format --- src/bayou.rs | 54 ++++++---- src/login/ldap_provider.rs | 53 +++++---- src/login/mod.rs | 70 +++++++----- src/login/static_provider.rs | 63 +++++++---- src/mail/incoming.rs | 21 ++-- src/mail/mailbox.rs | 64 +++++++---- src/mail/user.rs | 6 +- src/main.rs | 179 ++++++++++++++++-------------- src/server.rs | 19 ++-- src/storage/garage.rs | 252 +++++++++++++++++++++++++++---------------- src/storage/in_memory.rs | 71 +++++++----- src/storage/mod.rs | 41 ++++--- 12 files changed, 546 insertions(+), 347 deletions(-) (limited to 'src') diff --git a/src/bayou.rs b/src/bayou.rs index 1361e49..7253a30 100644 --- a/src/bayou.rs +++ b/src/bayou.rs @@ -9,9 +9,8 @@ use tokio::sync::{watch, Notify}; use crate::cryptoblob::*; use crate::login::Credentials; -use crate::timestamp::*; use crate::storage; - +use crate::timestamp::*; const KEEP_STATE_EVERY: usize = 64; @@ -94,7 +93,11 @@ impl Bayou { } else { debug!("(sync) loading checkpoint: {}", key); - let buf = self.storage.blob_fetch(&storage::BlobRef(key.to_string())).await?.value; + let buf = self + .storage + .blob_fetch(&storage::BlobRef(key.to_string())) + .await? + .value; debug!("(sync) checkpoint body length: {}", buf.len()); let ck = open_deserialize::(&buf, &self.key)?; @@ -125,17 +128,22 @@ impl Bayou { // 3. List all operations starting from checkpoint let ts_ser = self.checkpoint.0.to_string(); debug!("(sync) looking up operations starting at {}", ts_ser); - let ops_map = self.storage.row_fetch(&storage::Selector::Range { - shard: &self.path, - sort_begin: &ts_ser, - sort_end: WATCH_SK - }).await?; + let ops_map = self + .storage + .row_fetch(&storage::Selector::Range { + shard: &self.path, + sort_begin: &ts_ser, + sort_end: WATCH_SK, + }) + .await?; let mut ops = vec![]; for row_value in ops_map { let row = row_value.row_ref; let sort_key = row.uid.sort; - let ts = sort_key.parse::().map_err(|_| anyhow!("Invalid operation timestamp: {}", sort_key))?; + let ts = sort_key + .parse::() + .map_err(|_| anyhow!("Invalid operation timestamp: {}", sort_key))?; let val = row_value.value; if val.len() != 1 { @@ -211,7 +219,7 @@ impl Bayou { // Save info that sync has been done self.last_sync = new_last_sync; - self.last_sync_watch_ct = new_last_sync_watch_ct; + self.last_sync_watch_ct = new_last_sync_watch_ct; Ok(()) } @@ -362,16 +370,20 @@ impl Bayou { // Delete blobs for (_ts, key) in existing_checkpoints[..last_to_keep].iter() { debug!("(cp) drop old checkpoint {}", key); - self.storage.blob_rm(&storage::BlobRef(key.to_string())).await?; + self.storage + .blob_rm(&storage::BlobRef(key.to_string())) + .await?; } // Delete corresponding range of operations let ts_ser = existing_checkpoints[last_to_keep].0.to_string(); - self.storage.row_rm(&storage::Selector::Range { - shard: &self.path, - sort_begin: "", - sort_end: &ts_ser - }).await? + self.storage + .row_rm(&storage::Selector::Range { + shard: &self.path, + sort_begin: "", + sort_end: &ts_ser, + }) + .await? } Ok(()) @@ -426,11 +438,7 @@ impl K2vWatch { let watch = Arc::new(K2vWatch { target, rx, notify }); - tokio::spawn(Self::background_task( - Arc::downgrade(&watch), - storage, - tx, - )); + tokio::spawn(Self::background_task(Arc::downgrade(&watch), storage, tx)); Ok(watch) } @@ -444,8 +452,8 @@ impl K2vWatch { Some(this) => this.target.clone(), None => { error!("can't start loop"); - return - }, + return; + } }; while let Some(this) = Weak::upgrade(&self_weak) { diff --git a/src/login/ldap_provider.rs b/src/login/ldap_provider.rs index a7f56e4..81e5879 100644 --- a/src/login/ldap_provider.rs +++ b/src/login/ldap_provider.rs @@ -30,7 +30,10 @@ enum BucketSource { enum StorageSpecific { InMemory, - Garage { from_config: LdapGarageConfig, bucket_source: BucketSource }, + Garage { + from_config: LdapGarageConfig, + bucket_source: BucketSource, + }, } impl LdapLoginProvider { @@ -57,22 +60,24 @@ impl LdapLoginProvider { let specific = match config.storage { LdapStorage::InMemory => StorageSpecific::InMemory, LdapStorage::Garage(grgconf) => { - let bucket_source = match (grgconf.default_bucket.clone(), grgconf.bucket_attr.clone()) { - (Some(b), None) => BucketSource::Constant(b), - (None, Some(a)) => BucketSource::Attr(a), - _ => bail!("Must set `bucket` or `bucket_attr`, but not both"), - }; + let bucket_source = + match (grgconf.default_bucket.clone(), grgconf.bucket_attr.clone()) { + (Some(b), None) => BucketSource::Constant(b), + (None, Some(a)) => BucketSource::Attr(a), + _ => bail!("Must set `bucket` or `bucket_attr`, but not both"), + }; if let BucketSource::Attr(a) = &bucket_source { attrs_to_retrieve.push(a.clone()); } - StorageSpecific::Garage { from_config: grgconf, bucket_source } - }, + StorageSpecific::Garage { + from_config: grgconf, + bucket_source, + } + } }; - - Ok(Self { ldap_server: config.ldap_server, pre_bind_on_login: config.pre_bind_on_login, @@ -89,27 +94,32 @@ impl LdapLoginProvider { async fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result { let storage: Builder = match &self.storage_specific { - StorageSpecific::InMemory => self.in_memory_store.builder( - &get_attr(user, &self.username_attr)? - ).await, - StorageSpecific::Garage { from_config, bucket_source } => { + StorageSpecific::InMemory => { + self.in_memory_store + .builder(&get_attr(user, &self.username_attr)?) + .await + } + StorageSpecific::Garage { + from_config, + bucket_source, + } => { let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?; - let aws_secret_access_key = get_attr(user, &from_config.aws_secret_access_key_attr)?; + let aws_secret_access_key = + get_attr(user, &from_config.aws_secret_access_key_attr)?; let bucket = match bucket_source { BucketSource::Constant(b) => b.clone(), BucketSource::Attr(a) => get_attr(user, &a)?, }; - storage::garage::GarageBuilder::new(storage::garage::GarageConf { - region: from_config.aws_region.clone(), + region: from_config.aws_region.clone(), s3_endpoint: from_config.s3_endpoint.clone(), k2v_endpoint: from_config.k2v_endpoint.clone(), - aws_access_key_id, - aws_secret_access_key, + aws_access_key_id, + aws_secret_access_key, bucket, })? - }, + } }; Ok(storage) @@ -172,7 +182,6 @@ impl LoginProvider for LdapLoginProvider { drop(ldap); - Ok(Credentials { storage, keys }) } @@ -215,7 +224,7 @@ impl LoginProvider for LdapLoginProvider { let cr = CryptoRoot(crstr); let public_key = cr.public_key()?; - // storage + // storage let storage = self.storage_creds_from_ldap_user(&user).await?; drop(ldap); diff --git a/src/login/mod.rs b/src/login/mod.rs index 3369ac2..2926738 100644 --- a/src/login/mod.rs +++ b/src/login/mod.rs @@ -1,8 +1,8 @@ pub mod ldap_provider; pub mod static_provider; -use std::sync::Arc; use base64::Engine; +use std::sync::Arc; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; @@ -45,7 +45,7 @@ pub struct PublicCredentials { pub public_key: PublicKey, } -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct CryptoRoot(pub String); @@ -73,47 +73,59 @@ impl CryptoRoot { pub fn public_key(&self) -> Result { match self.0.splitn(4, ':').collect::>()[..] { - [ "aero", "cryptoroot", "pass", b64blob ] => { + ["aero", "cryptoroot", "pass", b64blob] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; if blob.len() < 32 { - bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); + bail!( + "Decoded data is {} bytes long, expect at least 32 bytes", + blob.len() + ); } PublicKey::from_slice(&blob[..32]).context("must be a valid public key") - }, - [ "aero", "cryptoroot", "cleartext", b64blob ] => { + } + ["aero", "cryptoroot", "cleartext", b64blob] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; Ok(CryptoKeys::deserialize(&blob)?.public) - }, - [ "aero", "cryptoroot", "incoming", b64blob ] => { + } + ["aero", "cryptoroot", "incoming", b64blob] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; if blob.len() < 32 { - bail!("Decoded data is {} bytes long, expect at least 32 bytes", blob.len()); + bail!( + "Decoded data is {} bytes long, expect at least 32 bytes", + blob.len() + ); } PublicKey::from_slice(&blob[..32]).context("must be a valid public key") - }, - [ "aero", "cryptoroot", "keyring", _ ] => { + } + ["aero", "cryptoroot", "keyring", _] => { bail!("keyring is not yet implemented!") - }, - _ => bail!(format!("passed string '{}' is not a valid cryptoroot", self.0)), + } + _ => bail!(format!( + "passed string '{}' is not a valid cryptoroot", + self.0 + )), } } pub fn crypto_keys(&self, password: &str) -> Result { match self.0.splitn(4, ':').collect::>()[..] { - [ "aero", "cryptoroot", "pass", b64blob ] => { + ["aero", "cryptoroot", "pass", b64blob] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; CryptoKeys::password_open(password, &blob) - }, - [ "aero", "cryptoroot", "cleartext", b64blob ] => { + } + ["aero", "cryptoroot", "cleartext", b64blob] => { let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?; CryptoKeys::deserialize(&blob) - }, - [ "aero", "cryptoroot", "incoming", _ ] => { + } + ["aero", "cryptoroot", "incoming", _] => { bail!("incoming cryptoroot does not contain a crypto key!") - }, - [ "aero", "cryptoroot", "keyring", _ ] =>{ + } + ["aero", "cryptoroot", "keyring", _] => { bail!("keyring is not yet implemented!") - }, - _ => bail!(format!("passed string '{}' is not a valid cryptoroot", self.0)), + } + _ => bail!(format!( + "passed string '{}' is not a valid cryptoroot", + self.0 + )), } } } @@ -132,9 +144,6 @@ pub struct CryptoKeys { // ---- - - - impl CryptoKeys { /// Initialize a new cryptography root pub fn init() -> Self { @@ -202,7 +211,11 @@ fn derive_password_key(kdf_salt: &[u8], password: &str) -> Result { Ok(Key::from_slice(&argon2_kdf(kdf_salt, password.as_bytes(), 32)?).unwrap()) } -fn try_open_encrypted_keys(kdf_salt: &[u8], password: &str, encrypted_keys: &[u8]) -> Result> { +fn try_open_encrypted_keys( + kdf_salt: &[u8], + password: &str, + encrypted_keys: &[u8], +) -> Result> { let password_key = derive_password_key(kdf_salt, password)?; open(encrypted_keys, &password_key) } @@ -210,7 +223,7 @@ fn try_open_encrypted_keys(kdf_salt: &[u8], password: &str, encrypted_keys: &[u8 // ---- UTIL ---- pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result> { - use argon2::{Algorithm, Argon2, ParamsBuilder, PasswordHasher, Version, password_hash}; + use argon2::{password_hash, Algorithm, Argon2, ParamsBuilder, PasswordHasher, Version}; let params = ParamsBuilder::new() .output_len(output_len) @@ -219,7 +232,8 @@ pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result) -> Result<()> { - let mut stream = signal(SignalKind::user_defined1()).expect("failed to install SIGUSR1 signal hander for reload"); + let mut stream = signal(SignalKind::user_defined1()) + .expect("failed to install SIGUSR1 signal hander for reload"); loop { let ulist: UserList = match read_config(config.clone()) { @@ -42,7 +43,12 @@ pub async fn update_user_list(config: PathBuf, up: watch::Sender) let users = ulist .into_iter() - .map(|(username, config)| (username.clone() , Arc::new(ContextualUserEntry { username, config }))) + .map(|(username, config)| { + ( + username.clone(), + Arc::new(ContextualUserEntry { username, config }), + ) + }) .collect::>(); let mut users_by_email = HashMap::new(); @@ -51,14 +57,18 @@ pub async fn update_user_list(config: PathBuf, up: watch::Sender) if users_by_email.contains_key(m) { tracing::warn!("Several users have the same email address: {}", m); stream.recv().await; - continue + continue; } users_by_email.insert(m.clone(), u.clone()); } } tracing::info!("{} users loaded", users.len()); - up.send(UserDatabase { users, users_by_email }).context("update user db config")?; + up.send(UserDatabase { + users, + users_by_email, + }) + .context("update user db config")?; stream.recv().await; tracing::info!("Received SIGUSR1, reloading"); } @@ -71,7 +81,10 @@ impl StaticLoginProvider { tokio::spawn(update_user_list(config.user_list, tx)); rx.changed().await?; - Ok(Self { user_db: rx, in_memory_store: storage::in_memory::MemDb::new() }) + Ok(Self { + user_db: rx, + in_memory_store: storage::in_memory::MemDb::new(), + }) } } @@ -95,14 +108,16 @@ impl LoginProvider for StaticLoginProvider { tracing::debug!(user=%username, "fetch keys"); let storage: storage::Builder = match &user.config.storage { StaticStorage::InMemory => self.in_memory_store.builder(username).await, - StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { - region: grgconf.aws_region.clone(), - k2v_endpoint: grgconf.k2v_endpoint.clone(), - s3_endpoint: grgconf.s3_endpoint.clone(), - aws_access_key_id: grgconf.aws_access_key_id.clone(), - aws_secret_access_key: grgconf.aws_secret_access_key.clone(), - bucket: grgconf.bucket.clone(), - })?, + StaticStorage::Garage(grgconf) => { + storage::garage::GarageBuilder::new(storage::garage::GarageConf { + region: grgconf.aws_region.clone(), + k2v_endpoint: grgconf.k2v_endpoint.clone(), + s3_endpoint: grgconf.s3_endpoint.clone(), + aws_access_key_id: grgconf.aws_access_key_id.clone(), + aws_secret_access_key: grgconf.aws_secret_access_key.clone(), + bucket: grgconf.bucket.clone(), + })? + } }; let cr = CryptoRoot(user.config.crypto_root.clone()); @@ -124,14 +139,16 @@ impl LoginProvider for StaticLoginProvider { let storage: storage::Builder = match &user.config.storage { StaticStorage::InMemory => self.in_memory_store.builder(&user.username).await, - StaticStorage::Garage(grgconf) => storage::garage::GarageBuilder::new(storage::garage::GarageConf { - region: grgconf.aws_region.clone(), - k2v_endpoint: grgconf.k2v_endpoint.clone(), - s3_endpoint: grgconf.s3_endpoint.clone(), - aws_access_key_id: grgconf.aws_access_key_id.clone(), - aws_secret_access_key: grgconf.aws_secret_access_key.clone(), - bucket: grgconf.bucket.clone(), - })?, + StaticStorage::Garage(grgconf) => { + storage::garage::GarageBuilder::new(storage::garage::GarageConf { + region: grgconf.aws_region.clone(), + k2v_endpoint: grgconf.k2v_endpoint.clone(), + s3_endpoint: grgconf.s3_endpoint.clone(), + aws_access_key_id: grgconf.aws_access_key_id.clone(), + aws_secret_access_key: grgconf.aws_secret_access_key.clone(), + bucket: grgconf.bucket.clone(), + })? + } }; let cr = CryptoRoot(user.config.crypto_root.clone()); diff --git a/src/mail/incoming.rs b/src/mail/incoming.rs index 7e33a9a..04d2ef1 100644 --- a/src/mail/incoming.rs +++ b/src/mail/incoming.rs @@ -51,7 +51,10 @@ async fn incoming_mail_watch_process_internal( creds: Credentials, mut rx_inbox_id: watch::Receiver>, ) -> Result<()> { - let mut lock_held = k2v_lock_loop(creds.storage.build().await?, storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK)); + let mut lock_held = k2v_lock_loop( + creds.storage.build().await?, + storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK), + ); let storage = creds.storage.build().await?; let mut inbox: Option> = None; @@ -63,8 +66,7 @@ async fn incoming_mail_watch_process_internal( let wait_new_mail = async { loop { - match storage.row_poll(&incoming_key).await - { + match storage.row_poll(&incoming_key).await { Ok(row_val) => break row_val.row_ref, Err(e) => { error!("Error in wait_new_mail: {}", e); @@ -360,7 +362,10 @@ async fn k2v_lock_loop_internal( Some(existing) => existing, None => row_ref.clone(), }; - if let Err(e) = storage.row_insert(vec![storage::RowVal::new(row, lock)]).await { + if let Err(e) = storage + .row_insert(vec![storage::RowVal::new(row, lock)]) + .await + { error!("Could not take lock: {}", e); tokio::time::sleep(Duration::from_secs(30)).await; } @@ -428,14 +433,12 @@ impl EncryptedMessage { let blob_val = storage::BlobVal::new( storage::BlobRef(format!("incoming/{}", gen_ident())), self.encrypted_body.clone().into(), - ).with_meta(MESSAGE_KEY.to_string(), key_header); + ) + .with_meta(MESSAGE_KEY.to_string(), key_header); storage.blob_insert(blob_val).await?; // Update watch key to signal new mail - let watch_val = storage::RowVal::new( - watch_ct.clone(), - gen_ident().0.to_vec(), - ); + let watch_val = storage::RowVal::new(watch_ct.clone(), gen_ident().0.to_vec()); storage.row_insert(vec![watch_val]).await?; Ok(()) } diff --git a/src/mail/mailbox.rs b/src/mail/mailbox.rs index 6fb7dea..e424ba3 100644 --- a/src/mail/mailbox.rs +++ b/src/mail/mailbox.rs @@ -8,7 +8,7 @@ use crate::login::Credentials; use crate::mail::uidindex::*; use crate::mail::unique_ident::*; use crate::mail::IMF; -use crate::storage::{Store, RowRef, RowVal, BlobRef, BlobVal, Selector, self}; +use crate::storage::{self, BlobRef, BlobVal, RowRef, RowVal, Selector, Store}; use crate::timestamp::now_msec; pub struct Mailbox { @@ -196,7 +196,10 @@ impl MailboxInternal { async fn fetch_meta(&self, ids: &[UniqueIdent]) -> Result> { let ids = ids.iter().map(|x| x.to_string()).collect::>(); - let ops = ids.iter().map(|id| RowRef::new(self.mail_path.as_str(), id.as_str())).collect::>(); + let ops = ids + .iter() + .map(|id| RowRef::new(self.mail_path.as_str(), id.as_str())) + .collect::>(); let res_vec = self.storage.row_fetch(&Selector::List(ops)).await?; let mut meta_vec = vec![]; @@ -231,7 +234,10 @@ impl MailboxInternal { } async fn fetch_full(&self, id: UniqueIdent, message_key: &Key) -> Result> { - let obj_res = self.storage.blob_fetch(&BlobRef(format!("{}/{}", self.mail_path, id))).await?; + let obj_res = self + .storage + .blob_fetch(&BlobRef(format!("{}/{}", self.mail_path, id))) + .await?; let body = obj_res.value; cryptoblob::open(&body, message_key) } @@ -266,10 +272,12 @@ impl MailboxInternal { async { // Encrypt and save mail body let message_blob = cryptoblob::seal(mail.raw, &message_key)?; - self.storage.blob_insert(BlobVal::new( - BlobRef(format!("{}/{}", self.mail_path, ident)), - message_blob, - )).await?; + self.storage + .blob_insert(BlobVal::new( + BlobRef(format!("{}/{}", self.mail_path, ident)), + message_blob, + )) + .await?; Ok::<_, anyhow::Error>(()) }, async { @@ -281,10 +289,12 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.storage.row_insert(vec![RowVal::new( - RowRef::new(&self.mail_path, &ident.to_string()), - meta_blob, - )]).await?; + self.storage + .row_insert(vec![RowVal::new( + RowRef::new(&self.mail_path, &ident.to_string()), + meta_blob, + )]) + .await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -328,10 +338,12 @@ impl MailboxInternal { rfc822_size: mail.raw.len(), }; let meta_blob = seal_serialize(&meta, &self.encryption_key)?; - self.storage.row_insert(vec![RowVal::new( + self.storage + .row_insert(vec![RowVal::new( RowRef::new(&self.mail_path, &ident.to_string()), meta_blob, - )]).await?; + )]) + .await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync() @@ -355,17 +367,25 @@ impl MailboxInternal { futures::try_join!( async { // Delete mail body from S3 - self.storage.blob_rm(&BlobRef(format!("{}/{}", self.mail_path, ident))).await?; + self.storage + .blob_rm(&BlobRef(format!("{}/{}", self.mail_path, ident))) + .await?; Ok::<_, anyhow::Error>(()) }, async { // Delete mail meta from K2V let sk = ident.to_string(); - let res = self.storage - .row_fetch(&storage::Selector::Single(&RowRef::new(&self.mail_path, &sk))) + let res = self + .storage + .row_fetch(&storage::Selector::Single(&RowRef::new( + &self.mail_path, + &sk, + ))) .await?; if let Some(row_val) = res.into_iter().next() { - self.storage.row_rm(&storage::Selector::Single(&row_val.row_ref)).await?; + self.storage + .row_rm(&storage::Selector::Single(&row_val.row_ref)) + .await?; } Ok::<_, anyhow::Error>(()) } @@ -421,10 +441,12 @@ impl MailboxInternal { // Copy mail meta in K2V let meta = &from.fetch_meta(&[source_id]).await?[0]; let meta_blob = seal_serialize(meta, &self.encryption_key)?; - self.storage.row_insert(vec![RowVal::new( - RowRef::new(&self.mail_path, &new_id.to_string()), - meta_blob, - )]).await?; + self.storage + .row_insert(vec![RowVal::new( + RowRef::new(&self.mail_path, &new_id.to_string()), + meta_blob, + )]) + .await?; Ok::<_, anyhow::Error>(()) }, self.uid_index.opportunistic_sync(), diff --git a/src/mail/user.rs b/src/mail/user.rs index 8d12c58..da0d509 100644 --- a/src/mail/user.rs +++ b/src/mail/user.rs @@ -226,7 +226,11 @@ impl User { async fn load_mailbox_list(&self) -> Result<(MailboxList, Option)> { let row_ref = storage::RowRef::new(MAILBOX_LIST_PK, MAILBOX_LIST_SK); - let (mut list, row) = match self.storage.row_fetch(&storage::Selector::Single(&row_ref)).await { + let (mut list, row) = match self + .storage + .row_fetch(&storage::Selector::Single(&row_ref)) + .await + { Err(storage::StorageError::NotFound) => (MailboxList::new(), None), Err(e) => return Err(e.into()), Ok(rv) => { diff --git a/src/main.rs b/src/main.rs index f08f1a3..3221c2e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,5 @@ #![feature(async_fn_in_trait)] -mod timestamp; mod bayou; mod config; mod cryptoblob; @@ -11,17 +10,18 @@ mod login; mod mail; mod server; mod storage; +mod timestamp; -use std::path::PathBuf; use std::io::Read; +use std::path::PathBuf; -use anyhow::{bail, Result, Context}; +use anyhow::{bail, Context, Result}; use clap::{Parser, Subcommand}; -use nix::{unistd::Pid, sys::signal}; +use nix::{sys::signal, unistd::Pid}; use config::*; -use server::Server; use login::{static_provider::*, *}; +use server::Server; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] @@ -58,7 +58,7 @@ enum ToolsCommand { PasswordHash { #[clap(env = "AEROGRAMME_PASSWORD")] maybe_password: Option, - } + }, } #[derive(Subcommand, Debug)] @@ -138,7 +138,7 @@ enum AccountManagement { maybe_new_password: Option, #[clap(short, long)] - login: String + login: String, }, } @@ -165,11 +165,11 @@ async fn main() -> Result<()> { CompanionCommand::Daemon => { let server = Server::from_companion_config(config).await?; server.run().await?; - }, + } CompanionCommand::Reload { pid } => reload(*pid, config.pid)?, CompanionCommand::Wizard => { unimplemented!(); - }, + } CompanionCommand::Account(cmd) => { let user_file = config.users.user_list; account_management(&args.command, cmd, user_file)?; @@ -179,22 +179,24 @@ async fn main() -> Result<()> { ProviderCommand::Daemon => { let server = Server::from_provider_config(config).await?; server.run().await?; - }, + } ProviderCommand::Reload { pid } => reload(*pid, config.pid)?, ProviderCommand::Account(cmd) => { let user_file = match config.users { UserManagement::Static(conf) => conf.user_list, - UserManagement::Ldap(_) => panic!("LDAP account management is not supported from Aerogramme.") + UserManagement::Ldap(_) => { + panic!("LDAP account management is not supported from Aerogramme.") + } }; account_management(&args.command, cmd, user_file)?; } }, (Command::Provider(_), AnyConfig::Companion(_)) => { bail!("Your want to run a 'Provider' command but your configuration file has role 'Companion'."); - }, + } (Command::Companion(_), AnyConfig::Provider(_)) => { bail!("Your want to run a 'Companion' command but your configuration file has role 'Provider'."); - }, + } (Command::Tools(subcommand), _) => match subcommand { ToolsCommand::PasswordHash { maybe_password } => { let password = match maybe_password { @@ -202,60 +204,64 @@ async fn main() -> Result<()> { None => rpassword::prompt_password("Enter password: ")?, }; println!("{}", hash_password(&password)?); - }, - ToolsCommand::CryptoRoot(crcommand) => { - match crcommand { - CryptoRootCommand::New { maybe_password } => { - let password = match maybe_password { - Some(pwd) => pwd.clone(), - None => { - let password = rpassword::prompt_password("Enter password: ")?; - let password_confirm = rpassword::prompt_password("Confirm password: ")?; - if password != password_confirm { - bail!("Passwords don't match."); - } - password + } + ToolsCommand::CryptoRoot(crcommand) => match crcommand { + CryptoRootCommand::New { maybe_password } => { + let password = match maybe_password { + Some(pwd) => pwd.clone(), + None => { + let password = rpassword::prompt_password("Enter password: ")?; + let password_confirm = + rpassword::prompt_password("Confirm password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); } - }; - let crypto_keys = CryptoKeys::init(); - let cr = CryptoRoot::create_pass(&password, &crypto_keys)?; - println!("{}", cr.0); - }, - CryptoRootCommand::NewClearText => { - let crypto_keys = CryptoKeys::init(); - let cr = CryptoRoot::create_cleartext(&crypto_keys); - println!("{}", cr.0); - }, - CryptoRootCommand::ChangePassword { maybe_old_password, maybe_new_password, crypto_root } => { - let old_password = match maybe_old_password { - Some(pwd) => pwd.to_string(), - None => rpassword::prompt_password("Enter old password: ")?, - }; - - let new_password = match maybe_new_password { - Some(pwd) => pwd.to_string(), - None => { - let password = rpassword::prompt_password("Enter new password: ")?; - let password_confirm = rpassword::prompt_password("Confirm new password: ")?; - if password != password_confirm { - bail!("Passwords don't match."); - } - password + password + } + }; + let crypto_keys = CryptoKeys::init(); + let cr = CryptoRoot::create_pass(&password, &crypto_keys)?; + println!("{}", cr.0); + } + CryptoRootCommand::NewClearText => { + let crypto_keys = CryptoKeys::init(); + let cr = CryptoRoot::create_cleartext(&crypto_keys); + println!("{}", cr.0); + } + CryptoRootCommand::ChangePassword { + maybe_old_password, + maybe_new_password, + crypto_root, + } => { + let old_password = match maybe_old_password { + Some(pwd) => pwd.to_string(), + None => rpassword::prompt_password("Enter old password: ")?, + }; + + let new_password = match maybe_new_password { + Some(pwd) => pwd.to_string(), + None => { + let password = rpassword::prompt_password("Enter new password: ")?; + let password_confirm = + rpassword::prompt_password("Confirm new password: ")?; + if password != password_confirm { + bail!("Passwords don't match."); } - }; - - let keys = CryptoRoot(crypto_root.to_string()).crypto_keys(&old_password)?; - let cr = CryptoRoot::create_pass(&new_password, &keys)?; - println!("{}", cr.0); - }, - CryptoRootCommand::DeriveIncoming { crypto_root } => { - let pubkey = CryptoRoot(crypto_root.to_string()).public_key()?; - let cr = CryptoRoot::create_incoming(&pubkey); - println!("{}", cr.0); - }, + password + } + }; + + let keys = CryptoRoot(crypto_root.to_string()).crypto_keys(&old_password)?; + let cr = CryptoRoot::create_pass(&new_password, &keys)?; + println!("{}", cr.0); + } + CryptoRootCommand::DeriveIncoming { crypto_root } => { + let pubkey = CryptoRoot(crypto_root.to_string()).public_key()?; + let cr = CryptoRoot::create_incoming(&pubkey); + println!("{}", cr.0); } }, - } + }, } Ok(()) @@ -264,12 +270,12 @@ async fn main() -> Result<()> { fn reload(pid: Option, pid_path: Option) -> Result<()> { let final_pid = match (pid, pid_path) { (Some(pid), _) => pid, - (_, Some(path)) => { + (_, Some(path)) => { let mut f = std::fs::OpenOptions::new().read(true).open(path)?; let mut pidstr = String::new(); f.read_to_string(&mut pidstr)?; pidstr.parse::()? - }, + } _ => bail!("Unable to infer your daemon's PID"), }; let pid = Pid::from_raw(final_pid); @@ -278,13 +284,15 @@ fn reload(pid: Option, pid_path: Option) -> Result<()> { } fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) -> Result<()> { - let mut ulist: UserList = read_config(users.clone()).context(format!("'{:?}' must be a user database", users))?; + let mut ulist: UserList = + read_config(users.clone()).context(format!("'{:?}' must be a user database", users))?; match cmd { AccountManagement::Add { login, setup } => { - tracing::debug!(user=login, "will-create"); - let stp: SetupEntry = read_config(setup.clone()).context(format!("'{:?}' must be a setup file", setup))?; - tracing::debug!(user=login, "loaded setup entry"); + tracing::debug!(user = login, "will-create"); + let stp: SetupEntry = read_config(setup.clone()) + .context(format!("'{:?}' must be a setup file", setup))?; + tracing::debug!(user = login, "loaded setup entry"); let password = match stp.clear_password { Some(pwd) => pwd, @@ -307,21 +315,28 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - let hash = hash_password(password.as_str()).context("unable to hash password")?; - ulist.insert(login.clone(), UserEntry { - email_addresses: stp.email_addresses, - password: hash, - crypto_root: crypto_root.0, - storage: stp.storage, - }); + ulist.insert( + login.clone(), + UserEntry { + email_addresses: stp.email_addresses, + password: hash, + crypto_root: crypto_root.0, + storage: stp.storage, + }, + ); write_config(users.clone(), &ulist)?; - }, + } AccountManagement::Delete { login } => { - tracing::debug!(user=login, "will-delete"); + tracing::debug!(user = login, "will-delete"); ulist.remove(login); write_config(users.clone(), &ulist)?; - }, - AccountManagement::ChangePassword { maybe_old_password, maybe_new_password, login } => { + } + AccountManagement::ChangePassword { + maybe_old_password, + maybe_new_password, + login, + } => { let mut user = ulist.remove(login).context("user must exist first")?; let old_password = match maybe_old_password { @@ -345,16 +360,16 @@ fn account_management(root: &Command, cmd: &AccountManagement, users: PathBuf) - } password } - }; + }; let new_hash = hash_password(&new_password)?; let new_crypto_root = CryptoRoot::create_pass(&new_password, &crypto_keys)?; - + user.password = new_hash; user.crypto_root = new_crypto_root.0; ulist.insert(login.clone(), user); write_config(users.clone(), &ulist)?; - }, + } }; Ok(()) diff --git a/src/server.rs b/src/server.rs index 552a0e6..28e0b27 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,6 +1,6 @@ -use std::sync::Arc; -use std::path::PathBuf; use std::io::Write; +use std::path::PathBuf; +use std::sync::Arc; use anyhow::Result; use futures::try_join; @@ -26,7 +26,11 @@ impl Server { let lmtp_server = None; let imap_server = Some(imap::new(config.imap, login.clone()).await?); - Ok(Self { lmtp_server, imap_server, pid_file: config.pid }) + Ok(Self { + lmtp_server, + imap_server, + pid_file: config.pid, + }) } pub async fn from_provider_config(config: ProviderConfig) -> Result { @@ -39,12 +43,16 @@ impl Server { let lmtp_server = Some(LmtpServer::new(config.lmtp, login.clone())); let imap_server = Some(imap::new(config.imap, login.clone()).await?); - Ok(Self { lmtp_server, imap_server, pid_file: config.pid }) + Ok(Self { + lmtp_server, + imap_server, + pid_file: config.pid, + }) } pub async fn run(self) -> Result<()> { let pid = std::process::id(); - tracing::info!(pid=pid, "Starting main loops"); + tracing::info!(pid = pid, "Starting main loops"); // write the pid file if let Some(pid_file) = self.pid_file { @@ -57,7 +65,6 @@ impl Server { drop(file); } - let (exit_signal, provoke_exit) = watch_ctrl_c(); let _exit_on_err = move |err: anyhow::Error| { error!("Error: {}", err); diff --git a/src/storage/garage.rs b/src/storage/garage.rs index f9ba756..d08585f 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -1,10 +1,6 @@ use crate::storage::*; +use aws_sdk_s3::{self as s3, error::SdkError, operation::get_object::GetObjectError}; use serde::Serialize; -use aws_sdk_s3::{ - self as s3, - error::SdkError, - operation::get_object::GetObjectError, -}; #[derive(Clone, Debug, Serialize)] pub struct GarageConf { @@ -28,18 +24,18 @@ impl GarageBuilder { unicity.extend_from_slice(file!().as_bytes()); unicity.append(&mut rmp_serde::to_vec(&conf)?); Ok(Arc::new(Self { conf, unicity })) - } + } } #[async_trait] impl IBuilder for GarageBuilder { async fn build(&self) -> Result { let s3_creds = s3::config::Credentials::new( - self.conf.aws_access_key_id.clone(), - self.conf.aws_secret_access_key.clone(), - None, - None, - "aerogramme" + self.conf.aws_access_key_id.clone(), + self.conf.aws_secret_access_key.clone(), + None, + None, + "aerogramme", ); let s3_config = aws_config::from_env() @@ -51,12 +47,12 @@ impl IBuilder for GarageBuilder { let s3_client = aws_sdk_s3::Client::new(&s3_config); let k2v_config = k2v_client::K2vClientConfig { - endpoint: self.conf.k2v_endpoint.clone(), - region: self.conf.region.clone(), - aws_access_key_id: self.conf.aws_access_key_id.clone(), - aws_secret_access_key: self.conf.aws_secret_access_key.clone(), - bucket: self.conf.bucket.clone(), - user_agent: None, + endpoint: self.conf.k2v_endpoint.clone(), + region: self.conf.region.clone(), + aws_access_key_id: self.conf.aws_access_key_id.clone(), + aws_secret_access_key: self.conf.aws_secret_access_key.clone(), + bucket: self.conf.bucket.clone(), + user_agent: None, }; let k2v_client = match k2v_client::K2vClient::new(k2v_config) { @@ -67,7 +63,7 @@ impl IBuilder for GarageBuilder { Ok(v) => v, }; - Ok(Box::new(GarageStore { + Ok(Box::new(GarageStore { bucket: self.conf.bucket.clone(), s3: s3_client, k2v: k2v_client, @@ -86,19 +82,30 @@ pub struct GarageStore { fn causal_to_row_val(row_ref: RowRef, causal_value: k2v_client::CausalValue) -> RowVal { let new_row_ref = row_ref.with_causality(causal_value.causality.into()); - let row_values = causal_value.value.into_iter().map(|k2v_value| match k2v_value { - k2v_client::K2vValue::Tombstone => Alternative::Tombstone, - k2v_client::K2vValue::Value(v) => Alternative::Value(v), - }).collect::>(); + let row_values = causal_value + .value + .into_iter() + .map(|k2v_value| match k2v_value { + k2v_client::K2vValue::Tombstone => Alternative::Tombstone, + k2v_client::K2vValue::Value(v) => Alternative::Value(v), + }) + .collect::>(); - RowVal { row_ref: new_row_ref, value: row_values } + RowVal { + row_ref: new_row_ref, + value: row_values, + } } #[async_trait] impl IStore for GarageStore { async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result, StorageError> { let (pk_list, batch_op) = match select { - Selector::Range { shard, sort_begin, sort_end } => ( + Selector::Range { + shard, + sort_begin, + sort_end, + } => ( vec![shard.to_string()], vec![k2v_client::BatchReadOp { partition_key: shard, @@ -108,49 +115,71 @@ impl IStore for GarageStore { ..k2v_client::Filter::default() }, ..k2v_client::BatchReadOp::default() - }] + }], ), Selector::List(row_ref_list) => ( - row_ref_list.iter().map(|row_ref| row_ref.uid.shard.to_string()).collect::>(), - row_ref_list.iter().map(|row_ref| k2v_client::BatchReadOp { - partition_key: &row_ref.uid.shard, + row_ref_list + .iter() + .map(|row_ref| row_ref.uid.shard.to_string()) + .collect::>(), + row_ref_list + .iter() + .map(|row_ref| k2v_client::BatchReadOp { + partition_key: &row_ref.uid.shard, + filter: k2v_client::Filter { + start: Some(&row_ref.uid.sort), + ..k2v_client::Filter::default() + }, + single_item: true, + ..k2v_client::BatchReadOp::default() + }) + .collect::>(), + ), + Selector::Prefix { shard, sort_prefix } => ( + vec![shard.to_string()], + vec![k2v_client::BatchReadOp { + partition_key: shard, filter: k2v_client::Filter { - start: Some(&row_ref.uid.sort), + prefix: Some(sort_prefix), ..k2v_client::Filter::default() }, - single_item: true, ..k2v_client::BatchReadOp::default() - }).collect::>() + }], ), - Selector::Prefix { shard, sort_prefix } => ( - vec![shard.to_string()], - vec![k2v_client::BatchReadOp { - partition_key: shard, - filter: k2v_client::Filter { - prefix: Some(sort_prefix), - ..k2v_client::Filter::default() - }, - ..k2v_client::BatchReadOp::default() - }]), Selector::Single(row_ref) => { - let causal_value = match self.k2v.read_item(&row_ref.uid.shard, &row_ref.uid.sort).await { + let causal_value = match self + .k2v + .read_item(&row_ref.uid.shard, &row_ref.uid.sort) + .await + { Err(e) => { - tracing::error!("K2V read item shard={}, sort={}, bucket={} failed: {}", row_ref.uid.shard, row_ref.uid.sort, self.bucket, e); + tracing::error!( + "K2V read item shard={}, sort={}, bucket={} failed: {}", + row_ref.uid.shard, + row_ref.uid.sort, + self.bucket, + e + ); return Err(StorageError::Internal); - }, + } Ok(v) => v, }; let row_val = causal_to_row_val((*row_ref).clone(), causal_value); - return Ok(vec![row_val]) - }, + return Ok(vec![row_val]); + } }; let all_raw_res = match self.k2v.read_batch(&batch_op).await { Err(e) => { - tracing::error!("k2v read batch failed for {:?}, bucket {} with err: {}", select, self.bucket, e); + tracing::error!( + "k2v read batch failed for {:?}, bucket {} with err: {}", + select, + self.bucket, + e + ); return Err(StorageError::Internal); - }, + } Ok(v) => v, }; @@ -163,13 +192,17 @@ impl IStore for GarageStore { .into_iter() .zip(pk_list.into_iter()) .map(|((sk, cv), pk)| causal_to_row_val(RowRef::new(&pk, &sk), cv)) - .collect::>(); + .collect::>(); Ok(row_vals) } async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> { let del_op = match select { - Selector::Range { shard, sort_begin, sort_end } => vec![k2v_client::BatchDeleteOp { + Selector::Range { + shard, + sort_begin, + sort_end, + } => vec![k2v_client::BatchDeleteOp { partition_key: shard, prefix: None, start: Some(sort_begin), @@ -178,21 +211,24 @@ impl IStore for GarageStore { }], Selector::List(row_ref_list) => { // Insert null values with causality token = delete - let batch_op = row_ref_list.iter().map(|v| k2v_client::BatchInsertOp { - partition_key: &v.uid.shard, - sort_key: &v.uid.sort, - causality: v.causality.clone().map(|ct| ct.into()), - value: k2v_client::K2vValue::Tombstone, - }).collect::>(); - + let batch_op = row_ref_list + .iter() + .map(|v| k2v_client::BatchInsertOp { + partition_key: &v.uid.shard, + sort_key: &v.uid.sort, + causality: v.causality.clone().map(|ct| ct.into()), + value: k2v_client::K2vValue::Tombstone, + }) + .collect::>(); + return match self.k2v.insert_batch(&batch_op).await { Err(e) => { tracing::error!("Unable to delete the list of values: {}", e); Err(StorageError::Internal) - }, + } Ok(_) => Ok(()), }; - }, + } Selector::Prefix { shard, sort_prefix } => vec![k2v_client::BatchDeleteOp { partition_key: shard, prefix: Some(sort_prefix), @@ -208,15 +244,15 @@ impl IStore for GarageStore { causality: row_ref.causality.clone().map(|ct| ct.into()), value: k2v_client::K2vValue::Tombstone, }]; - + return match self.k2v.insert_batch(&batch_op).await { Err(e) => { tracing::error!("Unable to delete the list of values: {}", e); Err(StorageError::Internal) - }, + } Ok(_) => Ok(()), }; - }, + } }; // Finally here we only have prefix & range @@ -224,34 +260,46 @@ impl IStore for GarageStore { Err(e) => { tracing::error!("delete batch error: {}", e); Err(StorageError::Internal) - }, + } Ok(_) => Ok(()), } } async fn row_insert(&self, values: Vec) -> Result<(), StorageError> { - let batch_ops = values.iter().map(|v| k2v_client::BatchInsertOp { - partition_key: &v.row_ref.uid.shard, - sort_key: &v.row_ref.uid.sort, - causality: v.row_ref.causality.clone().map(|ct| ct.into()), - value: v.value.iter().next().map(|cv| match cv { - Alternative::Value(buff) => k2v_client::K2vValue::Value(buff.clone()), - Alternative::Tombstone => k2v_client::K2vValue::Tombstone, - }).unwrap_or(k2v_client::K2vValue::Tombstone) - }).collect::>(); + let batch_ops = values + .iter() + .map(|v| k2v_client::BatchInsertOp { + partition_key: &v.row_ref.uid.shard, + sort_key: &v.row_ref.uid.sort, + causality: v.row_ref.causality.clone().map(|ct| ct.into()), + value: v + .value + .iter() + .next() + .map(|cv| match cv { + Alternative::Value(buff) => k2v_client::K2vValue::Value(buff.clone()), + Alternative::Tombstone => k2v_client::K2vValue::Tombstone, + }) + .unwrap_or(k2v_client::K2vValue::Tombstone), + }) + .collect::>(); match self.k2v.insert_batch(&batch_ops).await { Err(e) => { tracing::error!("k2v can't insert some value: {}", e); Err(StorageError::Internal) - }, + } Ok(v) => Ok(v), } } async fn row_poll(&self, value: &RowRef) -> Result { loop { if let Some(ct) = &value.causality { - match self.k2v.poll_item(&value.uid.shard, &value.uid.sort, ct.clone().into(), None).await { + match self + .k2v + .poll_item(&value.uid.shard, &value.uid.sort, ct.clone().into(), None) + .await + { Err(e) => { tracing::error!("Unable to poll item: {}", e); return Err(StorageError::Internal); @@ -262,8 +310,7 @@ impl IStore for GarageStore { } else { match self.k2v.read_item(&value.uid.shard, &value.uid.sort).await { Err(k2v_client::Error::NotFound) => { - self - .k2v + self.k2v .insert_item(&value.uid.shard, &value.uid.sort, vec![0u8], None) .await .map_err(|e| { @@ -273,8 +320,8 @@ impl IStore for GarageStore { } Err(e) => { tracing::error!("Unable to read item in polling logic: {}", e); - return Err(StorageError::Internal) - }, + return Err(StorageError::Internal); + } Ok(cv) => return Ok(causal_to_row_val(value.clone(), cv)), } } @@ -282,7 +329,8 @@ impl IStore for GarageStore { } async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { - let maybe_out = self.s3 + let maybe_out = self + .s3 .get_object() .bucket(self.bucket.to_string()) .key(blob_ref.0.to_string()) @@ -296,12 +344,12 @@ impl IStore for GarageStore { e => { tracing::warn!("Blob Fetch Error, Service Error: {}", e); return Err(StorageError::Internal); - }, + } }, Err(e) => { tracing::warn!("Blob Fetch Error, {}", e); return Err(StorageError::Internal); - }, + } }; let buffer = match object_output.body.collect().await { @@ -316,9 +364,10 @@ impl IStore for GarageStore { Ok(BlobVal::new(blob_ref.clone(), buffer)) } async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { - let streamable_value = s3::primitives::ByteStream::from(blob_val.value); + let streamable_value = s3::primitives::ByteStream::from(blob_val.value); - let maybe_send = self.s3 + let maybe_send = self + .s3 .put_object() .bucket(self.bucket.to_string()) .key(blob_val.blob_ref.0.to_string()) @@ -338,7 +387,8 @@ impl IStore for GarageStore { } } async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> { - let maybe_copy = self.s3 + let maybe_copy = self + .s3 .copy_object() .bucket(self.bucket.to_string()) .key(dst.0.clone()) @@ -348,18 +398,24 @@ impl IStore for GarageStore { match maybe_copy { Err(e) => { - tracing::error!("unable to copy object {} to {} (bucket: {}), error: {}", src.0, dst.0, self.bucket, e); + tracing::error!( + "unable to copy object {} to {} (bucket: {}), error: {}", + src.0, + dst.0, + self.bucket, + e + ); Err(StorageError::Internal) - }, + } Ok(_) => { tracing::debug!("copied {} to {} (bucket: {})", src.0, dst.0, self.bucket); Ok(()) } } - } async fn blob_list(&self, prefix: &str) -> Result, StorageError> { - let maybe_list = self.s3 + let maybe_list = self + .s3 .list_objects_v2() .bucket(self.bucket.to_string()) .prefix(prefix) @@ -370,7 +426,12 @@ impl IStore for GarageStore { match maybe_list { Err(e) => { - tracing::error!("listing prefix {} on bucket {} failed: {}", prefix, self.bucket, e); + tracing::error!( + "listing prefix {} on bucket {} failed: {}", + prefix, + self.bucket, + e + ); Err(StorageError::Internal) } Ok(pagin_list_out) => Ok(pagin_list_out @@ -382,7 +443,8 @@ impl IStore for GarageStore { } } async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { - let maybe_delete = self.s3 + let maybe_delete = self + .s3 .delete_object() .bucket(self.bucket.to_string()) .key(blob_ref.0.clone()) @@ -391,9 +453,14 @@ impl IStore for GarageStore { match maybe_delete { Err(e) => { - tracing::error!("unable to delete {} (bucket: {}), error {}", blob_ref.0, self.bucket, e); + tracing::error!( + "unable to delete {} (bucket: {}), error {}", + blob_ref.0, + self.bucket, + e + ); Err(StorageError::Internal) - }, + } Ok(_) => { tracing::debug!("deleted {} (bucket: {})", blob_ref.0, self.bucket); Ok(()) @@ -401,4 +468,3 @@ impl IStore for GarageStore { } } } - diff --git a/src/storage/in_memory.rs b/src/storage/in_memory.rs index ee7c9a6..3c3a94c 100644 --- a/src/storage/in_memory.rs +++ b/src/storage/in_memory.rs @@ -1,6 +1,6 @@ use crate::storage::*; -use std::collections::{HashMap, BTreeMap}; -use std::ops::Bound::{Included, Unbounded, Excluded, self}; +use std::collections::{BTreeMap, HashMap}; +use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::sync::{Arc, RwLock}; use tokio::sync::Notify; @@ -16,7 +16,7 @@ impl MemDb { Self(tokio::sync::Mutex::new(HashMap::new())) } - pub async fn builder(&self, username: &str) -> Arc { + pub async fn builder(&self, username: &str) -> Arc { let mut global_storage = self.0.lock().await; global_storage .entry(username.to_string()) @@ -60,8 +60,8 @@ impl InternalRowVal { } fn to_row_val(&self, row_ref: RowRef) -> RowVal { - RowVal{ - row_ref: row_ref.with_causality(self.version.to_string()), + RowVal { + row_ref: row_ref.with_causality(self.version.to_string()), value: self.concurrent_values(), } } @@ -75,7 +75,7 @@ struct InternalBlobVal { impl InternalBlobVal { fn to_blob_val(&self, bref: &BlobRef) -> BlobVal { BlobVal { - blob_ref: bref.clone(), + blob_ref: bref.clone(), meta: self.metadata.clone(), value: self.data.clone(), } @@ -113,7 +113,7 @@ impl IBuilder for MemBuilder { row: self.row.clone(), blob: self.blob.clone(), })) - } + } fn unique(&self) -> UnicityBuffer { UnicityBuffer(self.unicity.clone()) @@ -170,24 +170,32 @@ impl IStore for MemStore { let store = self.row.read().or(Err(StorageError::Internal))?; match select { - Selector::Range { shard, sort_begin, sort_end } => { - Ok(store - .get(*shard) - .unwrap_or(&BTreeMap::new()) - .range((Included(sort_begin.to_string()), Excluded(sort_end.to_string()))) - .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) - .collect::>()) - }, + Selector::Range { + shard, + sort_begin, + sort_end, + } => Ok(store + .get(*shard) + .unwrap_or(&BTreeMap::new()) + .range(( + Included(sort_begin.to_string()), + Excluded(sort_end.to_string()), + )) + .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) + .collect::>()), Selector::List(rlist) => { let mut acc = vec![]; for row_ref in rlist { - let maybe_intval = store.get(&row_ref.uid.shard).map(|v| v.get(&row_ref.uid.sort)).flatten(); + let maybe_intval = store + .get(&row_ref.uid.shard) + .map(|v| v.get(&row_ref.uid.sort)) + .flatten(); if let Some(intval) = maybe_intval { acc.push(intval.to_row_val(row_ref.clone())); } } Ok(acc) - }, + } Selector::Prefix { shard, sort_prefix } => { let last_bound = prefix_last_bound(sort_prefix); @@ -197,13 +205,13 @@ impl IStore for MemStore { .range((Included(sort_prefix.to_string()), last_bound)) .map(|(k, v)| v.to_row_val(RowRef::new(shard, k))) .collect::>()) - }, + } Selector::Single(row_ref) => { let intval = store - .get(&row_ref.uid.shard) - .ok_or(StorageError::NotFound)? - .get(&row_ref.uid.sort) - .ok_or(StorageError::NotFound)?; + .get(&row_ref.uid.shard) + .ok_or(StorageError::NotFound)? + .get(&row_ref.uid.sort) + .ok_or(StorageError::NotFound)?; Ok(vec![intval.to_row_val((*row_ref).clone())]) } } @@ -213,7 +221,12 @@ impl IStore for MemStore { tracing::trace!(select=%select, command="row_rm"); let values = match select { - Selector::Range { .. } | Selector::Prefix { .. } => self.row_fetch(select).await?.into_iter().map(|rv| rv.row_ref).collect::>(), + Selector::Range { .. } | Selector::Prefix { .. } => self + .row_fetch(select) + .await? + .into_iter() + .map(|rv| rv.row_ref) + .collect::>(), Selector::List(rlist) => rlist.clone(), Selector::Single(row_ref) => vec![(*row_ref).clone()], }; @@ -282,7 +295,10 @@ impl IStore for MemStore { async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result { tracing::trace!(entry=%blob_ref, command="blob_fetch"); let store = self.blob.read().or(Err(StorageError::Internal))?; - store.get(&blob_ref.0).ok_or(StorageError::NotFound).map(|v| v.to_blob_val(blob_ref)) + store + .get(&blob_ref.0) + .ok_or(StorageError::NotFound) + .map(|v| v.to_blob_val(blob_ref)) } async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { tracing::trace!(entry=%blob_val.blob_ref, command="blob_insert"); @@ -300,10 +316,13 @@ impl IStore for MemStore { Ok(()) } async fn blob_list(&self, prefix: &str) -> Result, StorageError> { - tracing::trace!(prefix=prefix, command="blob_list"); + tracing::trace!(prefix = prefix, command = "blob_list"); let store = self.blob.read().or(Err(StorageError::Internal))?; let last_bound = prefix_last_bound(prefix); - let blist = store.range((Included(prefix.to_string()), last_bound)).map(|(k, _)| BlobRef(k.to_string())).collect::>(); + let blist = store + .range((Included(prefix.to_string()), last_bound)) + .map(|(k, _)| BlobRef(k.to_string())) + .collect::>(); Ok(blist) } async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c81ffe4..1f86f71 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -8,13 +8,13 @@ * into the object system so it is not exposed. */ -pub mod in_memory; pub mod garage; +pub mod in_memory; -use std::sync::Arc; -use std::hash::Hash; -use std::collections::HashMap; use async_trait::async_trait; +use std::collections::HashMap; +use std::hash::Hash; +use std::sync::Arc; #[derive(Debug, Clone)] pub enum Alternative { @@ -52,14 +52,18 @@ pub struct RowRef { } impl std::fmt::Display for RowRef { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "RowRef({}, {}, {:?})", self.uid.shard, self.uid.sort, self.causality) + write!( + f, + "RowRef({}, {}, {:?})", + self.uid.shard, self.uid.sort, self.causality + ) } } impl RowRef { pub fn new(shard: &str, sort: &str) -> Self { Self { - uid: RowUid { + uid: RowUid { shard: shard.to_string(), sort: sort.to_string(), }, @@ -87,7 +91,6 @@ impl RowVal { } } - #[derive(Debug, Clone)] pub struct BlobRef(pub String); impl std::fmt::Display for BlobRef { @@ -105,7 +108,8 @@ pub struct BlobVal { impl BlobVal { pub fn new(blob_ref: BlobRef, value: Vec) -> Self { Self { - blob_ref, value, + blob_ref, + value, meta: HashMap::new(), } } @@ -118,16 +122,27 @@ impl BlobVal { #[derive(Debug)] pub enum Selector<'a> { - Range { shard: &'a str, sort_begin: &'a str, sort_end: &'a str }, - List (Vec), // list of (shard_key, sort_key) + Range { + shard: &'a str, + sort_begin: &'a str, + sort_end: &'a str, + }, + List(Vec), // list of (shard_key, sort_key) #[allow(dead_code)] - Prefix { shard: &'a str, sort_prefix: &'a str }, + Prefix { + shard: &'a str, + sort_prefix: &'a str, + }, Single(&'a RowRef), } impl<'a> std::fmt::Display for Selector<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::Range { shard, sort_begin, sort_end } => write!(f, "Range({}, [{}, {}[)", shard, sort_begin, sort_end), + Self::Range { + shard, + sort_begin, + sort_end, + } => write!(f, "Range({}, [{}, {}[)", shard, sort_begin, sort_end), Self::List(list) => write!(f, "List({:?})", list), Self::Prefix { shard, sort_prefix } => write!(f, "Prefix({}, {})", shard, sort_prefix), Self::Single(row_ref) => write!(f, "Single({})", row_ref), @@ -149,7 +164,7 @@ pub trait IStore { async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>; } -#[derive(Clone,Debug,PartialEq,Eq,Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct UnicityBuffer(Vec); #[async_trait] -- cgit v1.2.3 From dea6cd00399655b6b34545d059005160902bea9e Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 27 Dec 2023 16:38:27 +0100 Subject: debug implementation --- src/storage/garage.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index d08585f..00b0214 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -38,13 +38,18 @@ impl IBuilder for GarageBuilder { "aerogramme", ); - let s3_config = aws_config::from_env() + let sdk_config = aws_config::from_env() .region(aws_config::Region::new(self.conf.region.clone())) .credentials_provider(s3_creds) .endpoint_url(self.conf.s3_endpoint.clone()) .load() .await; - let s3_client = aws_sdk_s3::Client::new(&s3_config); + + let s3_config = aws_sdk_s3::config::Builder::from(&sdk_config) + .force_path_style(true) + .build(); + + let s3_client = aws_sdk_s3::Client::from_conf(s3_config); let k2v_config = k2v_client::K2vClientConfig { endpoint: self.conf.k2v_endpoint.clone(), @@ -152,6 +157,15 @@ impl IStore for GarageStore { .read_item(&row_ref.uid.shard, &row_ref.uid.sort) .await { + Err(k2v_client::Error::NotFound) => { + tracing::debug!( + "K2V item not found shard={}, sort={}, bucket={}", + row_ref.uid.shard, + row_ref.uid.sort, + self.bucket, + ); + return Err(StorageError::NotFound); + } Err(e) => { tracing::error!( "K2V read item shard={}, sort={}, bucket={} failed: {}", -- cgit v1.2.3 From ea4cd48bba96027882a637df08e313af92a3db46 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Wed, 27 Dec 2023 17:34:49 +0100 Subject: fix metadata --- src/storage/garage.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/storage/garage.rs b/src/storage/garage.rs index 00b0214..90b84d6 100644 --- a/src/storage/garage.rs +++ b/src/storage/garage.rs @@ -374,8 +374,12 @@ impl IStore for GarageStore { } }; + let mut bv = BlobVal::new(blob_ref.clone(), buffer); + if let Some(meta) = object_output.metadata { + bv.meta = meta; + } tracing::debug!("Fetched {}/{}", self.bucket, blob_ref.0); - Ok(BlobVal::new(blob_ref.clone(), buffer)) + Ok(bv) } async fn blob_insert(&self, blob_val: BlobVal) -> Result<(), StorageError> { let streamable_value = s3::primitives::ByteStream::from(blob_val.value); @@ -385,6 +389,7 @@ impl IStore for GarageStore { .put_object() .bucket(self.bucket.to_string()) .key(blob_val.blob_ref.0.to_string()) + .set_metadata(Some(blob_val.meta)) .body(streamable_value) .send() .await; -- cgit v1.2.3