aboutsummaryrefslogtreecommitdiff
path: root/experimental
diff options
context:
space:
mode:
Diffstat (limited to 'experimental')
-rw-r--r--experimental/bad.csi-s3/deploy/csi-s3.hcl39
-rw-r--r--experimental/bad.csi-s3/deploy/dummy-volume.hcl20
-rw-r--r--experimental/bad.nextcloud/config/litestream.yml10
-rw-r--r--experimental/bad.nextcloud/deploy/nextcloud.hcl137
-rw-r--r--experimental/bad.nextcloud/secrets/nextcloud/admin_pass1
-rw-r--r--experimental/bad.nextcloud/secrets/nextcloud/admin_user1
-rw-r--r--experimental/bad.nextcloud/secrets/nextcloud/s3_access_key1
-rw-r--r--experimental/bad.nextcloud/secrets/nextcloud/s3_secret_key1
-rw-r--r--experimental/bad.yugabyte/deploy/yugabyte.hcl204
9 files changed, 414 insertions, 0 deletions
diff --git a/experimental/bad.csi-s3/deploy/csi-s3.hcl b/experimental/bad.csi-s3/deploy/csi-s3.hcl
new file mode 100644
index 0000000..8e70c6a
--- /dev/null
+++ b/experimental/bad.csi-s3/deploy/csi-s3.hcl
@@ -0,0 +1,39 @@
+job "plugin-csi-s3-nodes" {
+ datacenters = ["neptune", "pluton"]
+
+ # you can run node plugins as service jobs as well, but this ensures
+ # that all nodes in the DC have a copy.
+ type = "system"
+
+ group "nodes" {
+ task "plugin" {
+ driver = "docker"
+
+ config {
+ image = "ctrox/csi-s3:v1.2.0-rc.1"
+
+ args = [
+ "--endpoint=unix://csi/csi.sock",
+ "--nodeid=${node.unique.id}",
+ "--logtostderr",
+ "--v=5",
+ ]
+
+ # node plugins must run as privileged jobs because they
+ # mount disks to the host
+ privileged = true
+ }
+
+ csi_plugin {
+ id = "csi-s3"
+ type = "node"
+ mount_dir = "/csi"
+ }
+
+ resources {
+ cpu = 500
+ memory = 256
+ }
+ }
+ }
+}
diff --git a/experimental/bad.csi-s3/deploy/dummy-volume.hcl b/experimental/bad.csi-s3/deploy/dummy-volume.hcl
new file mode 100644
index 0000000..67dfd39
--- /dev/null
+++ b/experimental/bad.csi-s3/deploy/dummy-volume.hcl
@@ -0,0 +1,20 @@
+id = "dummy-volume"
+name = "dummy-volume"
+type = "csi"
+plugin_id = "csi-s3"
+
+capability {
+ access_mode = "single-node-writer"
+ attachment_mode = "file-system"
+}
+
+secrets {
+ accessKeyId = "GKfd94f06139bb73de5642baf5"
+ secretAccessKey = "a4fa6c956d847b145a823c4615e4655126c67babf3cce2337b4d73cd381d7f06"
+ endpoint = "https://garage-staging.home.adnab.me"
+ region = "garage-staging"
+}
+
+parameters {
+ mounter = "rclone"
+}
diff --git a/experimental/bad.nextcloud/config/litestream.yml b/experimental/bad.nextcloud/config/litestream.yml
new file mode 100644
index 0000000..46eca93
--- /dev/null
+++ b/experimental/bad.nextcloud/config/litestream.yml
@@ -0,0 +1,10 @@
+dbs:
+ - path: /ephemeral/nextcloud.db
+ replicas:
+ - url: s3://nextcloud-db/nextcloud.db
+ region: garage-staging
+ endpoint: http://{{ env "attr.unique.network.ip-address" }}:3990
+ access-key-id: {{ key "secrets/nextcloud/s3_access_key" | trimSpace }}
+ secret-access-key: {{ key "secrets/nextcloud/s3_secret_key" | trimSpace }}
+ force-path-style: true
+ sync-interval: 60s
diff --git a/experimental/bad.nextcloud/deploy/nextcloud.hcl b/experimental/bad.nextcloud/deploy/nextcloud.hcl
new file mode 100644
index 0000000..45d1b6e
--- /dev/null
+++ b/experimental/bad.nextcloud/deploy/nextcloud.hcl
@@ -0,0 +1,137 @@
+job "nextcloud" {
+ datacenters = ["neptune"]
+ type = "service"
+
+ group "nextcloud" {
+ count = 1
+
+ network {
+ port "http" {
+ to = 80
+ }
+ }
+
+ ephemeral_disk {
+ size = 10000
+ }
+
+ restart {
+ attempts = 10
+ delay = "30s"
+ }
+
+ task "restore-db" {
+ lifecycle {
+ hook = "prestart"
+ sidecar = false
+ }
+
+ driver = "docker"
+ config {
+ image = "litestream/litestream:0.3.7"
+ args = [
+ "restore", "-config", "/etc/litestream.yml", "/ephemeral/nextcloud.db"
+ ]
+ volumes = [
+ "../alloc/data:/ephemeral",
+ "secrets/litestream.yml:/etc/litestream.yml"
+ ]
+ }
+ user = "33"
+
+ template {
+ data = file("../config/litestream.yml")
+ destination = "secrets/litestream.yml"
+ }
+
+ resources {
+ memory = 200
+ cpu = 1000
+ }
+ }
+
+ task "nextcloud" {
+ driver = "docker"
+ config {
+ image = "nextcloud:22.2.3-apache"
+ ports = [ "http" ]
+ #entrypoint = [ "/bin/sh", "-c" ]
+ #command = "apache2-foreground"
+
+ volumes = [
+ "../alloc/data:/var/www/html/data",
+ ]
+ }
+ user = "33"
+
+ template {
+ data = <<EOH
+SQLITE_DATABASE=nextcloud
+NEXTCLOUD_ADMIN_USER={{ key "secrets/nextcloud/admin_user" }}
+NEXTCLOUD_ADMIN_PASSWORD={{ key "secrets/nextcloud/admin_pass" }}
+NEXTCLOUD_TRUSTED_DOMAINS=cloud.home.adnab.me
+OVERWRITEHOST=cloud.home.adnab.me
+OVERWRITEPROTOCOL=https
+OBJECTSTORE_S3_HOST={{ env "attr.unique.network.ip-address" }}
+OBJECTSTORE_S3_PORT=3990
+OBJECTSTORE_S3_BUCKET=nextcloud-data
+OBJECTSTORE_S3_KEY={{ key "secrets/nextcloud/s3_access_key" }}
+OBJECTSTORE_S3_SECRET={{ key "secrets/nextcloud/s3_secret_key" }}
+OBJECTSTORE_S3_SSL=false
+OBJECTSTORE_S3_REGION=garage-staging
+OBJECTSTORE_S3_USEPATH_STYLE=true
+EOH
+ destination = "secrets/env"
+ env = true
+ }
+
+ resources {
+ memory = 2500
+ cpu = 1000
+ }
+
+ service {
+ port = "http"
+ tags = [
+ "tricot cloud.home.adnab.me 100",
+ ]
+ check {
+ type = "tcp"
+ port = "http"
+ interval = "60s"
+ timeout = "5s"
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+ }
+
+ task "replicate-db" {
+ driver = "docker"
+ config {
+ image = "litestream/litestream:0.3.7"
+ args = [
+ "replicate", "-config", "/etc/litestream.yml"
+ ]
+ volumes = [
+ "../alloc/data:/ephemeral",
+ "secrets/litestream.yml:/etc/litestream.yml"
+ ]
+ }
+ user = "33"
+
+ template {
+ data = file("../config/litestream.yml")
+ destination = "secrets/litestream.yml"
+ }
+
+ resources {
+ memory = 200
+ cpu = 100
+ }
+ }
+ }
+}
diff --git a/experimental/bad.nextcloud/secrets/nextcloud/admin_pass b/experimental/bad.nextcloud/secrets/nextcloud/admin_pass
new file mode 100644
index 0000000..ffc9830
--- /dev/null
+++ b/experimental/bad.nextcloud/secrets/nextcloud/admin_pass
@@ -0,0 +1 @@
+CMD_ONCE openssl rand -base64 9
diff --git a/experimental/bad.nextcloud/secrets/nextcloud/admin_user b/experimental/bad.nextcloud/secrets/nextcloud/admin_user
new file mode 100644
index 0000000..7ff2967
--- /dev/null
+++ b/experimental/bad.nextcloud/secrets/nextcloud/admin_user
@@ -0,0 +1 @@
+USER Username for administrator account
diff --git a/experimental/bad.nextcloud/secrets/nextcloud/s3_access_key b/experimental/bad.nextcloud/secrets/nextcloud/s3_access_key
new file mode 100644
index 0000000..692dc34
--- /dev/null
+++ b/experimental/bad.nextcloud/secrets/nextcloud/s3_access_key
@@ -0,0 +1 @@
+USER S3 access key ID for database storage
diff --git a/experimental/bad.nextcloud/secrets/nextcloud/s3_secret_key b/experimental/bad.nextcloud/secrets/nextcloud/s3_secret_key
new file mode 100644
index 0000000..8bef13c
--- /dev/null
+++ b/experimental/bad.nextcloud/secrets/nextcloud/s3_secret_key
@@ -0,0 +1 @@
+USER S3 secret key for database storage
diff --git a/experimental/bad.yugabyte/deploy/yugabyte.hcl b/experimental/bad.yugabyte/deploy/yugabyte.hcl
new file mode 100644
index 0000000..e7efa7a
--- /dev/null
+++ b/experimental/bad.yugabyte/deploy/yugabyte.hcl
@@ -0,0 +1,204 @@
+job "yugabytedb" {
+ type = "service"
+ datacenters = [ "neptune", "pluton" ]
+
+ priority = 80
+
+ constraint {
+ attribute = "${attr.cpu.arch}"
+ value = "amd64"
+ }
+
+ group "master" {
+ count = 3
+
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "regexp"
+ value = "(caribou|cariacou|carcajou)"
+ }
+
+ network {
+ port "admin" { static = 7000 }
+ port "master-rpc" { static = 7100 }
+ }
+
+ update {
+ max_parallel = 1
+ min_healthy_time = "30s"
+ healthy_deadline = "5m"
+ }
+
+ task "master" {
+ driver = "docker"
+
+ config {
+ image = "yugabytedb/yugabyte:2.11.1.0-b305"
+ command = "/home/yugabyte/bin/yb-master"
+ args = [
+ "--fs_data_dirs=/mnt/master",
+ "--replication_factor=3",
+ "--master_addresses=10.42.0.21:7100,10.42.0.22:7100,10.42.0.23:7100",
+ "--rpc_bind_addresses=0.0.0.0:7100",
+ "--placement_cloud=deuxfleurs",
+ "--placement_region=staging",
+ "--placement_zone=neptune",
+ "--minloglevel=1",
+ ]
+ volumes = [
+ "/mnt/ssd/yugabyte/master:/mnt/master",
+ ]
+ network_mode = "host"
+ logging {
+ type = "journald"
+ }
+ }
+
+ resources {
+ memory = 1000
+ cpu = 1000
+ }
+
+ kill_signal = "SIGINT"
+ kill_timeout = "20s"
+
+ service {
+ tags = ["yugabyte-master-rpc"]
+ port = 7100
+ address_mode = "driver"
+ name = "yugabyte-master-rpc"
+ check {
+ type = "tcp"
+ port = 7100
+ address_mode = "driver"
+ interval = "60s"
+ timeout = "5s"
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+
+ service {
+ tags = ["yugabyte-admin"]
+ port = 7000
+ address_mode = "driver"
+ name = "yugabyte-admin"
+ check {
+ type = "tcp"
+ port = 7000
+ address_mode = "driver"
+ interval = "60s"
+ timeout = "5s"
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+
+ restart {
+ interval = "30m"
+ attempts = 10
+ delay = "15s"
+ mode = "delay"
+ }
+ }
+ }
+
+ group "tserver" {
+ count = 3
+
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "!="
+ value = "spoutnik"
+ }
+
+ network {
+ port "ysql" { static = 5433 }
+ port "tserver-rpc" { static = 9100 }
+ }
+
+ update {
+ max_parallel = 1
+ min_healthy_time = "30s"
+ healthy_deadline = "5m"
+ }
+
+ task "tserver" {
+ driver = "docker"
+
+ config {
+ image = "yugabytedb/yugabyte:2.11.1.0-b305"
+ command = "/home/yugabyte/bin/yb-tserver"
+ args = [
+ "--fs_data_dirs=/mnt/tserver",
+ "--start_pgsql_proxy",
+ "--tserver_master_addrs=10.42.0.21:7100,10.42.0.22:7100,10.42.0.23:7100",
+ "--rpc_bind_addresses=0.0.0.0:9100",
+ "--placement_cloud=deuxfleurs",
+ "--placement_region=staging",
+ "--placement_zone=neptune",
+ "--minloglevel=1",
+ ]
+ volumes = [
+ "/mnt/ssd/yugabyte/tserver:/mnt/tserver",
+ ]
+ network_mode = "host"
+ logging {
+ type = "journald"
+ }
+ }
+
+ resources {
+ memory = 1000
+ cpu = 1000
+ }
+
+ kill_signal = "SIGINT"
+ kill_timeout = "20s"
+
+ service {
+ tags = ["yugabyte-tserver-rpc"]
+ port = 9100
+ address_mode = "driver"
+ name = "yugabyte-tserver-rpc"
+ check {
+ type = "tcp"
+ port = 9100
+ address_mode = "driver"
+ interval = "60s"
+ timeout = "5s"
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+
+ service {
+ tags = [ "yugabyte-ysql" ]
+ port = 5433
+ address_mode = "driver"
+ name = "yugabyte-ysql"
+ check {
+ type = "tcp"
+ port = 5433
+ address_mode = "driver"
+ interval = "60s"
+ timeout = "5s"
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+ }
+ }
+}