aboutsummaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/book/connect/apps/index.md47
-rw-r--r--doc/book/cookbook/real-world.md2
-rw-r--r--doc/book/operations/layout.md2
-rw-r--r--doc/book/quick-start/_index.md2
-rw-r--r--doc/book/reference-manual/configuration.md129
-rw-r--r--doc/book/reference-manual/features.md6
-rw-r--r--doc/book/reference-manual/s3-compatibility.md1
-rw-r--r--doc/drafts/admin-api.md139
8 files changed, 203 insertions, 125 deletions
diff --git a/doc/book/connect/apps/index.md b/doc/book/connect/apps/index.md
index c8571fac..5def3851 100644
--- a/doc/book/connect/apps/index.md
+++ b/doc/book/connect/apps/index.md
@@ -80,6 +80,53 @@ To test your new configuration, just reload your Nextcloud webpage and start sen
*External link:* [Nextcloud Documentation > Primary Storage](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/primary_storage.html)
+#### SSE-C encryption (since Garage v1.0)
+
+Since version 1.0, Garage supports server-side encryption with customer keys
+(SSE-C). In this mode, Garage is responsible for encrypting and decrypting
+objects, but it does not store the encryption key itself. The encryption key
+should be provided by Nextcloud upon each request. This mode of operation is
+supported by Nextcloud and it has successfully been tested together with
+Garage.
+
+To enable SSE-C encryption:
+
+1. Make sure your Garage server is accessible via SSL through a reverse proxy
+ such as Nginx, and that it is using a valid public certificate (Nextcloud
+ might be able to connect to an S3 server that is using a self-signed
+ certificate, but you will lose many hours while trying, so don't).
+ Configure values for `use_ssl` and `port` accordingly in your `config.php`
+ file.
+
+2. Generate an encryption key using the following command:
+
+ ```
+ openssl rand -base64 32
+ ```
+
+ Make sure to keep this key **secret**!
+
+3. Add the encryption key in your `config.php` file as follows:
+
+
+ ```php
+ <?php
+ $CONFIG = array(
+ 'objectstore' => [
+ 'class' => '\\OC\\Files\\ObjectStore\\S3',
+ 'arguments' => [
+ ...
+ 'sse_c_key' => 'exampleencryptionkeyLbU+5fKYQcVoqnn+RaIOXgo=',
+ ...
+ ],
+ ],
+ ```
+
+Nextcloud will now make Garage encrypt files at rest in the storage bucket.
+These files will not be readable by an S3 client that has credentials to the
+bucket but doesn't also know the secret encryption key.
+
+
### External Storage
**From the GUI.** Activate the "External storage support" app from the "Applications" page (click on your account icon on the top right corner of your screen to display the menu). Go to your parameters page (also located below your account icon). Click on external storage (or the corresponding translation in your language).
diff --git a/doc/book/cookbook/real-world.md b/doc/book/cookbook/real-world.md
index c15ea384..cb10b550 100644
--- a/doc/book/cookbook/real-world.md
+++ b/doc/book/cookbook/real-world.md
@@ -116,7 +116,7 @@ metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
db_engine = "lmdb"
-replication_mode = "3"
+replication_factor = 3
compression_level = 2
diff --git a/doc/book/operations/layout.md b/doc/book/operations/layout.md
index cf1372b0..667e89d2 100644
--- a/doc/book/operations/layout.md
+++ b/doc/book/operations/layout.md
@@ -12,7 +12,7 @@ An introduction to building cluster layouts can be found in the [production depl
In Garage, all of the data that can be stored in a given cluster is divided
into slices which we call *partitions*. Each partition is stored by
one or several nodes in the cluster
-(see [`replication_mode`](@/documentation/reference-manual/configuration.md#replication_mode)).
+(see [`replication_factor`](@/documentation/reference-manual/configuration.md#replication_factor)).
The layout determines the correspondence between these partitions,
which exist on a logical level, and actual storage nodes.
diff --git a/doc/book/quick-start/_index.md b/doc/book/quick-start/_index.md
index f359843d..be9fe329 100644
--- a/doc/book/quick-start/_index.md
+++ b/doc/book/quick-start/_index.md
@@ -59,7 +59,7 @@ metadata_dir = "/tmp/meta"
data_dir = "/tmp/data"
db_engine = "lmdb"
-replication_mode = "none"
+replication_factor = 1
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
diff --git a/doc/book/reference-manual/configuration.md b/doc/book/reference-manual/configuration.md
index af7690f4..580e9fbc 100644
--- a/doc/book/reference-manual/configuration.md
+++ b/doc/book/reference-manual/configuration.md
@@ -8,7 +8,8 @@ weight = 20
Here is an example `garage.toml` configuration file that illustrates all of the possible options:
```toml
-replication_mode = "3"
+replication_factor = 3
+consistency_mode = "consistent"
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
@@ -90,7 +91,8 @@ Top-level configuration options:
[`lmdb_map_size`](#lmdb_map_size),
[`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync),
-[`replication_mode`](#replication_mode),
+[`replication_factor`](#replication_factor),
+[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr),
@@ -133,11 +135,12 @@ The `[admin]` section:
### Top-level configuration options
-#### `replication_mode` {#replication_mode}
+#### `replication_factor` {#replication_factor}
-Garage supports the following replication modes:
+The replication factor can be any positive integer smaller or equal the node count in your cluster.
+The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
-- `none` or `1`: data stored on Garage is stored on a single node. There is no
+- `1`: data stored on Garage is stored on a single node. There is no
redundancy, and data will be unavailable as soon as one node fails or its
network is disconnected. Do not use this for anything else than test
deployments.
@@ -148,17 +151,6 @@ Garage supports the following replication modes:
before losing data. Data remains available in read-only mode when one node is
down, but write operations will fail.
- - `2-dangerous`: a variant of mode `2`, where written objects are written to
- the second replica asynchronously. This means that Garage will return `200
- OK` to a PutObject request before the second copy is fully written (or even
- before it even starts being written). This means that data can more easily
- be lost if the node crashes before a second copy can be completed. This
- also means that written objects might not be visible immediately in read
- operations. In other words, this mode severely breaks the consistency and
- durability guarantees of standard Garage cluster operation. Benefits of
- this mode: you can still write to your cluster when one node is
- unavailable.
-
- `3`: data stored on Garage will be stored on three different nodes, if
possible each in a different zones. Garage tolerates two node failure, or
several node failures but in no more than two zones (in a deployment with at
@@ -166,55 +158,84 @@ Garage supports the following replication modes:
or node failures are only in a single zone, reading and writing data to
Garage can continue normally.
- - `3-degraded`: a variant of replication mode `3`, that lowers the read
+- `5`, `7`, ...: When setting the replication factor above 3, it is most useful to
+ choose an uneven value, since for every two copies added, one more node can fail
+ before losing the ability to write and read to the cluster.
+
+Note that in modes `2` and `3`,
+if at least the same number of zones are available, an arbitrary number of failures in
+any given zone is tolerated as copies of data will be spread over several zones.
+
+**Make sure `replication_factor` is the same in the configuration files of all nodes.
+Never run a Garage cluster where that is not the case.**
+
+It is technically possible to change the replication factor although it's a
+dangerous operation that is not officially supported. This requires you to
+delete the existing cluster layout and create a new layout from scratch,
+meaning that a full rebalancing of your cluster's data will be needed. To do
+it, shut down your cluster entirely, delete the `custer_layout` files in the
+meta directories of all your nodes, update all your configuration files with
+the new `replication_factor` parameter, restart your cluster, and then create a
+new layout with all the nodes you want to keep. Rebalancing data will take
+some time, and data might temporarily appear unavailable to your users.
+It is recommended to shut down public access to the cluster while rebalancing
+is in progress. In theory, no data should be lost as rebalancing is a
+routine operation for Garage, although we cannot guarantee you that everything
+ will go right in such an extreme scenario.
+
+#### `consistency_mode` {#consistency_mode}
+
+The consistency mode setting determines the read and write behaviour of your cluster.
+
+ - `consistent`: The default setting. This is what the paragraph above describes.
+ The read and write quorum will be determined so that read-after-write consistency
+ is guaranteed.
+ - `degraded`: Lowers the read
quorum to `1`, to allow you to read data from your cluster when several
nodes (or nodes in several zones) are unavailable. In this mode, Garage
- does not provide read-after-write consistency anymore. The write quorum is
- still 2, ensuring that data successfully written to Garage is stored on at
- least two nodes.
-
- - `3-dangerous`: a variant of replication mode `3` that lowers both the read
+ does not provide read-after-write consistency anymore.
+ The write quorum stays the same as in the `consistent` mode, ensuring that
+ data successfully written to Garage is stored on multiple nodes (depending
+ the replication factor).
+ - `dangerous`: This mode lowers both the read
and write quorums to `1`, to allow you to both read and write to your
cluster when several nodes (or nodes in several zones) are unavailable. It
is the least consistent mode of operation proposed by Garage, and also one
that should probably never be used.
-Note that in modes `2` and `3`,
-if at least the same number of zones are available, an arbitrary number of failures in
-any given zone is tolerated as copies of data will be spread over several zones.
+Changing the `consistency_mode` between modes while leaving the `replication_factor` untouched
+(e.g. setting your node's `consistency_mode` to `degraded` when it was previously unset, or from
+`dangerous` to `consistent`), can be done easily by just changing the `consistency_mode`
+parameter in your config files and restarting all your Garage nodes.
-**Make sure `replication_mode` is the same in the configuration files of all nodes.
-Never run a Garage cluster where that is not the case.**
+The consistency mode can be used together with various replication factors, to achieve
+a wide range of read and write characteristics. Some examples:
+
+ - Replication factor `2`, consistency mode `degraded`: While this mode
+ technically exists, its properties are the same as with consistency mode `consistent`,
+ since the read quorum with replication factor `2`, consistency mode `consistent` is already 1.
+
+ - Replication factor `2`, consistency mode `dangerous`: written objects are written to
+ the second replica asynchronously. This means that Garage will return `200
+ OK` to a PutObject request before the second copy is fully written (or even
+ before it even starts being written). This means that data can more easily
+ be lost if the node crashes before a second copy can be completed. This
+ also means that written objects might not be visible immediately in read
+ operations. In other words, this configuration severely breaks the consistency and
+ durability guarantees of standard Garage cluster operation. Benefits of
+ this configuration: you can still write to your cluster when one node is
+ unavailable.
The quorums associated with each replication mode are described below:
-| `replication_mode` | Number of replicas | Write quorum | Read quorum | Read-after-write consistency? |
-| ------------------ | ------------------ | ------------ | ----------- | ----------------------------- |
-| `none` or `1` | 1 | 1 | 1 | yes |
-| `2` | 2 | 2 | 1 | yes |
-| `2-dangerous` | 2 | 1 | 1 | NO |
-| `3` | 3 | 2 | 2 | yes |
-| `3-degraded` | 3 | 2 | 1 | NO |
-| `3-dangerous` | 3 | 1 | 1 | NO |
-
-Changing the `replication_mode` between modes with the same number of replicas
-(e.g. from `3` to `3-degraded`, or from `2-dangerous` to `2`), can be done easily by
-just changing the `replication_mode` parameter in your config files and restarting all your
-Garage nodes.
-
-It is also technically possible to change the replication mode to a mode with a
-different numbers of replicas, although it's a dangerous operation that is not
-officially supported. This requires you to delete the existing cluster layout
-and create a new layout from scratch, meaning that a full rebalancing of your
-cluster's data will be needed. To do it, shut down your cluster entirely,
-delete the `custer_layout` files in the meta directories of all your nodes,
-update all your configuration files with the new `replication_mode` parameter,
-restart your cluster, and then create a new layout with all the nodes you want
-to keep. Rebalancing data will take some time, and data might temporarily
-appear unavailable to your users. It is recommended to shut down public access
-to the cluster while rebalancing is in progress. In theory, no data should be
-lost as rebalancing is a routine operation for Garage, although we cannot
-guarantee you that everything will go right in such an extreme scenario.
+| `consistency_mode` | `replication_factor` | Write quorum | Read quorum | Read-after-write consistency? |
+| ------------------ | -------------------- | ------------ | ----------- | ----------------------------- |
+| `consistent` | 1 | 1 | 1 | yes |
+| `consistent` | 2 | 2 | 1 | yes |
+| `dangerous` | 2 | 1 | 1 | NO |
+| `consistent` | 3 | 2 | 2 | yes |
+| `degraded` | 3 | 2 | 1 | NO |
+| `dangerous` | 3 | 1 | 1 | NO |
#### `metadata_dir` {#metadata_dir}
diff --git a/doc/book/reference-manual/features.md b/doc/book/reference-manual/features.md
index f7014b26..34f692cc 100644
--- a/doc/book/reference-manual/features.md
+++ b/doc/book/reference-manual/features.md
@@ -39,10 +39,10 @@ Read about cluster layout management [here](@/documentation/operations/layout.md
### Several replication modes
-Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data,
+Garage supports a variety of replication modes, with configurable replica count,
and with various levels of consistency, in order to adapt to a variety of usage scenarios.
-Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode)
-to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want).
+Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_factor)
+to select the replication mode best suited to your use case (hint: in most cases, `replication_factor = 3` is what you want).
### Compression and deduplication
diff --git a/doc/book/reference-manual/s3-compatibility.md b/doc/book/reference-manual/s3-compatibility.md
index 1bcfd123..d2c47f3e 100644
--- a/doc/book/reference-manual/s3-compatibility.md
+++ b/doc/book/reference-manual/s3-compatibility.md
@@ -33,6 +33,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
| [URL path-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access) (eg. `host.tld/bucket/key`) | ✅ Implemented | ✅ | ✅ | ❓| ✅ |
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
+| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
of signature v4 and they claim they support it without additional precisions,
diff --git a/doc/drafts/admin-api.md b/doc/drafts/admin-api.md
index ce56d8e0..e7851ab1 100644
--- a/doc/drafts/admin-api.md
+++ b/doc/drafts/admin-api.md
@@ -69,8 +69,8 @@ Example response body:
```json
{
- "node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
- "garageVersion": "git:v0.9.0-dev",
+ "node": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
+ "garageVersion": "v0.10.0",
"garageFeatures": [
"k2v",
"sled",
@@ -81,83 +81,92 @@ Example response body:
],
"rustVersion": "1.68.0",
"dbEngine": "LMDB (using Heed crate)",
- "knownNodes": [
+ "layoutVersion": 5,
+ "nodes": [
{
- "id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
- "addr": "10.0.0.11:3901",
+ "id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
+ "role": {
+ "id": "62b218d848e86a64f7fe1909735f29a4350547b54c4b204f91246a14eb0a1a8c",
+ "zone": "dc1",
+ "capacity": 100000000000,
+ "tags": []
+ },
+ "addr": "10.0.0.3:3901",
+ "hostname": "node3",
"isUp": true,
- "lastSeenSecsAgo": 9,
- "hostname": "node1"
+ "lastSeenSecsAgo": 12,
+ "draining": false,
+ "dataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ },
+ "metadataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ }
},
{
- "id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
- "addr": "10.0.0.12:3901",
+ "id": "a11c7cf18af297379eff8688360155fe68d9061654449ba0ce239252f5a7487f",
+ "role": null,
+ "addr": "10.0.0.2:3901",
+ "hostname": "node2",
"isUp": true,
- "lastSeenSecsAgo": 1,
- "hostname": "node2"
+ "lastSeenSecsAgo": 11,
+ "draining": true,
+ "dataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ },
+ "metadataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ }
},
{
- "id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
- "addr": "10.0.0.21:3901",
+ "id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
+ "role": {
+ "id": "a235ac7695e0c54d7b403943025f57504d500fdcc5c3e42c71c5212faca040a2",
+ "zone": "dc1",
+ "capacity": 100000000000,
+ "tags": []
+ },
+ "addr": "127.0.0.1:3904",
+ "hostname": "lindy",
"isUp": true,
- "lastSeenSecsAgo": 7,
- "hostname": "node3"
+ "lastSeenSecsAgo": 2,
+ "draining": false,
+ "dataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ },
+ "metadataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
+ }
},
{
- "id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
- "addr": "10.0.0.22:3901",
- "isUp": true,
- "lastSeenSecsAgo": 1,
- "hostname": "node4"
- }
- ],
- "layout": {
- "version": 12,
- "roles": [
- {
- "id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f",
+ "id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
+ "role": {
+ "id": "b10c110e4e854e5aa3f4637681befac755154b20059ec163254ddbfae86b09df",
"zone": "dc1",
- "capacity": 10737418240,
- "tags": [
- "node1"
- ]
+ "capacity": 100000000000,
+ "tags": []
},
- {
- "id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff",
- "zone": "dc1",
- "capacity": 10737418240,
- "tags": [
- "node2"
- ]
+ "addr": "10.0.0.1:3901",
+ "hostname": "node1",
+ "isUp": true,
+ "lastSeenSecsAgo": 3,
+ "draining": false,
+ "dataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
},
- {
- "id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
- "zone": "dc2",
- "capacity": 10737418240,
- "tags": [
- "node3"
- ]
+ "metadataPartition": {
+ "available": 660270088192,
+ "total": 873862266880
}
- ],
- "stagedRoleChanges": [
- {
- "id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b",
- "remove": false,
- "zone": "dc2",
- "capacity": 10737418240,
- "tags": [
- "node4"
- ]
- }
- {
- "id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27",
- "remove": true,
- "zone": null,
- "capacity": null,
- "tags": null,
- }
- ]
- }
+ }
+ ]
}
```