diff options
author | Alex Auvolat <alex@adnab.me> | 2022-12-24 22:59:37 +0100 |
---|---|---|
committer | Alex Auvolat <alex@adnab.me> | 2022-12-24 22:59:37 +0100 |
commit | 8d17a07c9be5cd9d400644c34ea50177535d15f6 (patch) | |
tree | cac734f62d4c04c898d4e70d1e2ba65f933317ca /experimental/app | |
parent | 4b527c4db8060679d21e5bb596bde91ce39df393 (diff) | |
download | nixcfg-8d17a07c9be5cd9d400644c34ea50177535d15f6.tar.gz nixcfg-8d17a07c9be5cd9d400644c34ea50177535d15f6.zip |
reorganize some things
Diffstat (limited to 'experimental/app')
18 files changed, 1072 insertions, 0 deletions
diff --git a/experimental/app/csi-s3/deploy/csi-s3.hcl b/experimental/app/csi-s3/deploy/csi-s3.hcl new file mode 100644 index 0000000..8e70c6a --- /dev/null +++ b/experimental/app/csi-s3/deploy/csi-s3.hcl @@ -0,0 +1,39 @@ +job "plugin-csi-s3-nodes" { + datacenters = ["neptune", "pluton"] + + # you can run node plugins as service jobs as well, but this ensures + # that all nodes in the DC have a copy. + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "ctrox/csi-s3:v1.2.0-rc.1" + + args = [ + "--endpoint=unix://csi/csi.sock", + "--nodeid=${node.unique.id}", + "--logtostderr", + "--v=5", + ] + + # node plugins must run as privileged jobs because they + # mount disks to the host + privileged = true + } + + csi_plugin { + id = "csi-s3" + type = "node" + mount_dir = "/csi" + } + + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/experimental/app/csi-s3/deploy/dummy-volume.hcl b/experimental/app/csi-s3/deploy/dummy-volume.hcl new file mode 100644 index 0000000..67dfd39 --- /dev/null +++ b/experimental/app/csi-s3/deploy/dummy-volume.hcl @@ -0,0 +1,20 @@ +id = "dummy-volume" +name = "dummy-volume" +type = "csi" +plugin_id = "csi-s3" + +capability { + access_mode = "single-node-writer" + attachment_mode = "file-system" +} + +secrets { + accessKeyId = "GKfd94f06139bb73de5642baf5" + secretAccessKey = "a4fa6c956d847b145a823c4615e4655126c67babf3cce2337b4d73cd381d7f06" + endpoint = "https://garage-staging.home.adnab.me" + region = "garage-staging" +} + +parameters { + mounter = "rclone" +} diff --git a/experimental/app/nextcloud/config/litestream.yml b/experimental/app/nextcloud/config/litestream.yml new file mode 100644 index 0000000..46eca93 --- /dev/null +++ b/experimental/app/nextcloud/config/litestream.yml @@ -0,0 +1,10 @@ +dbs: + - path: /ephemeral/nextcloud.db + replicas: + - url: s3://nextcloud-db/nextcloud.db + region: garage-staging + endpoint: http://{{ env "attr.unique.network.ip-address" }}:3990 + access-key-id: {{ key "secrets/nextcloud/s3_access_key" | trimSpace }} + secret-access-key: {{ key "secrets/nextcloud/s3_secret_key" | trimSpace }} + force-path-style: true + sync-interval: 60s diff --git a/experimental/app/nextcloud/deploy/nextcloud.hcl b/experimental/app/nextcloud/deploy/nextcloud.hcl new file mode 100644 index 0000000..45d1b6e --- /dev/null +++ b/experimental/app/nextcloud/deploy/nextcloud.hcl @@ -0,0 +1,137 @@ +job "nextcloud" { + datacenters = ["neptune"] + type = "service" + + group "nextcloud" { + count = 1 + + network { + port "http" { + to = 80 + } + } + + ephemeral_disk { + size = 10000 + } + + restart { + attempts = 10 + delay = "30s" + } + + task "restore-db" { + lifecycle { + hook = "prestart" + sidecar = false + } + + driver = "docker" + config { + image = "litestream/litestream:0.3.7" + args = [ + "restore", "-config", "/etc/litestream.yml", "/ephemeral/nextcloud.db" + ] + volumes = [ + "../alloc/data:/ephemeral", + "secrets/litestream.yml:/etc/litestream.yml" + ] + } + user = "33" + + template { + data = file("../config/litestream.yml") + destination = "secrets/litestream.yml" + } + + resources { + memory = 200 + cpu = 1000 + } + } + + task "nextcloud" { + driver = "docker" + config { + image = "nextcloud:22.2.3-apache" + ports = [ "http" ] + #entrypoint = [ "/bin/sh", "-c" ] + #command = "apache2-foreground" + + volumes = [ + "../alloc/data:/var/www/html/data", + ] + } + user = "33" + + template { + data = <<EOH +SQLITE_DATABASE=nextcloud +NEXTCLOUD_ADMIN_USER={{ key "secrets/nextcloud/admin_user" }} +NEXTCLOUD_ADMIN_PASSWORD={{ key "secrets/nextcloud/admin_pass" }} +NEXTCLOUD_TRUSTED_DOMAINS=cloud.home.adnab.me +OVERWRITEHOST=cloud.home.adnab.me +OVERWRITEPROTOCOL=https +OBJECTSTORE_S3_HOST={{ env "attr.unique.network.ip-address" }} +OBJECTSTORE_S3_PORT=3990 +OBJECTSTORE_S3_BUCKET=nextcloud-data +OBJECTSTORE_S3_KEY={{ key "secrets/nextcloud/s3_access_key" }} +OBJECTSTORE_S3_SECRET={{ key "secrets/nextcloud/s3_secret_key" }} +OBJECTSTORE_S3_SSL=false +OBJECTSTORE_S3_REGION=garage-staging +OBJECTSTORE_S3_USEPATH_STYLE=true +EOH + destination = "secrets/env" + env = true + } + + resources { + memory = 2500 + cpu = 1000 + } + + service { + port = "http" + tags = [ + "tricot cloud.home.adnab.me 100", + ] + check { + type = "tcp" + port = "http" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + } + + task "replicate-db" { + driver = "docker" + config { + image = "litestream/litestream:0.3.7" + args = [ + "replicate", "-config", "/etc/litestream.yml" + ] + volumes = [ + "../alloc/data:/ephemeral", + "secrets/litestream.yml:/etc/litestream.yml" + ] + } + user = "33" + + template { + data = file("../config/litestream.yml") + destination = "secrets/litestream.yml" + } + + resources { + memory = 200 + cpu = 100 + } + } + } +} diff --git a/experimental/app/nextcloud/secrets/nextcloud/admin_pass b/experimental/app/nextcloud/secrets/nextcloud/admin_pass new file mode 100644 index 0000000..ffc9830 --- /dev/null +++ b/experimental/app/nextcloud/secrets/nextcloud/admin_pass @@ -0,0 +1 @@ +CMD_ONCE openssl rand -base64 9 diff --git a/experimental/app/nextcloud/secrets/nextcloud/admin_user b/experimental/app/nextcloud/secrets/nextcloud/admin_user new file mode 100644 index 0000000..7ff2967 --- /dev/null +++ b/experimental/app/nextcloud/secrets/nextcloud/admin_user @@ -0,0 +1 @@ +USER Username for administrator account diff --git a/experimental/app/nextcloud/secrets/nextcloud/s3_access_key b/experimental/app/nextcloud/secrets/nextcloud/s3_access_key new file mode 100644 index 0000000..692dc34 --- /dev/null +++ b/experimental/app/nextcloud/secrets/nextcloud/s3_access_key @@ -0,0 +1 @@ +USER S3 access key ID for database storage diff --git a/experimental/app/nextcloud/secrets/nextcloud/s3_secret_key b/experimental/app/nextcloud/secrets/nextcloud/s3_secret_key new file mode 100644 index 0000000..8bef13c --- /dev/null +++ b/experimental/app/nextcloud/secrets/nextcloud/s3_secret_key @@ -0,0 +1 @@ +USER S3 secret key for database storage diff --git a/experimental/app/ssb/deploy/go-ssb-room.hcl b/experimental/app/ssb/deploy/go-ssb-room.hcl new file mode 100644 index 0000000..c9c4109 --- /dev/null +++ b/experimental/app/ssb/deploy/go-ssb-room.hcl @@ -0,0 +1,83 @@ +job "ssb" { + datacenters = ["neptune"] + type = "service" + + group "go-ssb-room" { + count = 1 + + constraint { + attribute = "${attr.unique.hostname}" + value = "caribou" + } + + network { + port "web_port" { to = 3888 } + port "ssb_port" { to = 8008 } + } + + task "go-ssb-room" { + driver = "docker" + config { + image = "lxpz/amd64_go_ssb_room:1" + readonly_rootfs = true + ports = [ "web_port", "ssb_port" ] + network_mode = "host" + command = "/app/cmd/server/server" + args = [ + "-https-domain=ssb.staging.deuxfleurs.org", + "-repo=/repo", + "-aliases-as-subdomains=false", + "-lishttp=:3888", + ] + volumes = [ + "/mnt/ssd/go-ssb-room:/repo" + ] + } + + resources { + memory = 200 + } + + service { + name = "go-ssb-room-http" + tags = [ + "tricot ssb.staging.deuxfleurs.org", + ] + port = "web_port" + address_mode = "driver" + check { + type = "tcp" + port = "web_port" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + + service { + name = "go-ssb-room-ssb" + tags = [ + "(diplonat (port 8008))", + ] + port = "ssb_port" + address_mode = "driver" + check { + type = "tcp" + port = "ssb_port" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + } + } +} + diff --git a/experimental/app/ssb/deploy/ssb-room.hcl b/experimental/app/ssb/deploy/ssb-room.hcl new file mode 100644 index 0000000..049b7dd --- /dev/null +++ b/experimental/app/ssb/deploy/ssb-room.hcl @@ -0,0 +1,59 @@ +job "ssb" { + datacenters = ["neptune"] + type = "service" + + group "ssb-room" { + count = 1 + + constraint { + attribute = "${attr.unique.hostname}" + value = "caribou" + } + + network { + port "web_port" { + to = 8007 + } + port "ssb_port" { + to = 8008 + } + } + + task "go-ssb-room" { + driver = "docker" + config { + image = "lxpz/amd64_ssb_room:3" + readonly_rootfs = true + ports = [ "web_port", "ssb_port" ] + network_mode = "host" + volumes = [ + "/mnt/ssd/ssb-room:/root/.ssb/" + ] + } + user = "root" + + resources { + memory = 200 + } + + service { + name = "ssb-room-http" + tags = [ + "tricot ssb.staging.deuxfleurs.org", + ] + port = "web_port" + address_mode = "driver" + } + + service { + name = "ssb-room-ssb" + tags = [ + "(diplonat (port 8008))", + ] + port = "ssb_port" + address_mode = "driver" + } + } + } +} + diff --git a/experimental/app/telemetry-elastic/config/apm-config.yaml b/experimental/app/telemetry-elastic/config/apm-config.yaml new file mode 100644 index 0000000..07a88bd --- /dev/null +++ b/experimental/app/telemetry-elastic/config/apm-config.yaml @@ -0,0 +1,20 @@ +apm-server: + # Defines the host and port the server is listening on. Use "unix:/path/to.sock" to listen on a unix domain socket. + host: "0.0.0.0:8200" +#-------------------------- Elasticsearch output -------------------------- +output.elasticsearch: + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (`http` and `9200`). + # In case you specify and additional path, the scheme is required: `http://localhost:9200/path`. + # IPv6 addresses should always be defined as: `https://[2001:db8::1]:9200`. + hosts: ["localhost:9200"] + username: "elastic" + password: "{{ key "secrets/telemetry/elastic_passwords/elastic" }}" + +instrumentation: + enabled: true + environment: staging + +logging: + level: warning + to_stderr: true diff --git a/experimental/app/telemetry-elastic/config/filebeat.yml b/experimental/app/telemetry-elastic/config/filebeat.yml new file mode 100644 index 0000000..310afd1 --- /dev/null +++ b/experimental/app/telemetry-elastic/config/filebeat.yml @@ -0,0 +1,46 @@ +# see https://github.com/elastic/beats/blob/master/filebeat/filebeat.reference.yml +filebeat.modules: +- module: system + syslog: + enabled: true + auth: + enabled: true + +#filebeat.inputs: +#- type: container +# enabled: true +# paths: +# -/var/lib/docker/containers/*/*.log +# stream: all # can be all, stdout or stderr + +#========================== Filebeat autodiscover ============================== +filebeat.autodiscover: + providers: + - type: docker + # https://www.elastic.co/guide/en/beats/filebeat/current/configuration-autodiscover-hints.html + # This URL alos contains instructions on multi-line logs + hints.enabled: true + +#================================ Processors =================================== +processors: +# - add_cloud_metadata: ~ +- add_docker_metadata: ~ +- add_locale: + format: offset +- add_host_metadata: + netinfo.enabled: true + +#========================== Elasticsearch output =============================== +output.elasticsearch: + hosts: ["localhost:9200"] + username: elastic + password: {{ key "secrets/telemetry/elastic_passwords/elastic" }} + +#============================== Dashboards ===================================== +setup.dashboards: + enabled: false + +#============================== Xpack Monitoring =============================== +xpack.monitoring: + enabled: true + elasticsearch: diff --git a/experimental/app/telemetry-elastic/config/grafana-litestream.yml b/experimental/app/telemetry-elastic/config/grafana-litestream.yml new file mode 100644 index 0000000..a537d9c --- /dev/null +++ b/experimental/app/telemetry-elastic/config/grafana-litestream.yml @@ -0,0 +1,10 @@ +dbs: + - path: /ephemeral/grafana.db + replicas: + - url: s3://grafana-db/grafana.db + region: garage-staging + endpoint: http://{{ env "attr.unique.network.ip-address" }}:3990 + access-key-id: {{ key "secrets/telemetry/grafana/s3_access_key" | trimSpace }} + secret-access-key: {{ key "secrets/telemetry/grafana/s3_secret_key" | trimSpace }} + force-path-style: true + sync-interval: 60s diff --git a/experimental/app/telemetry-elastic/config/grafana/provisioning/datasources/elastic.yaml b/experimental/app/telemetry-elastic/config/grafana/provisioning/datasources/elastic.yaml new file mode 100644 index 0000000..7d2277c --- /dev/null +++ b/experimental/app/telemetry-elastic/config/grafana/provisioning/datasources/elastic.yaml @@ -0,0 +1,21 @@ +apiVersion: 1 + +datasources: + - name: DS_ELASTICSEARCH + type: elasticsearch + access: proxy + url: http://localhost:9200 + password: '{{ key "secrets/telemetry/elastic_passwords/elastic" }}' + user: 'elastic' + database: metrics-* + basicAuth: false + isDefault: true + jsonData: + esVersion: "8.2.0" + includeFrozen: false + logLevelField: '' + logMessageField: '' + maxConcurrentShardRequests: 5 + timeField: "@timestamp" + timeInterval: "5s" + readOnly: false diff --git a/experimental/app/telemetry-elastic/config/otel-config.yaml b/experimental/app/telemetry-elastic/config/otel-config.yaml new file mode 100644 index 0000000..bcf1baa --- /dev/null +++ b/experimental/app/telemetry-elastic/config/otel-config.yaml @@ -0,0 +1,56 @@ +receivers: + # Data sources: metrics, traces + otlp: + protocols: + grpc: + endpoint: ":4317" + http: + endpoint: ":55681" + # Data sources: metrics + prometheus: + config: + scrape_configs: + - job_name: "garage" + scrape_interval: 5s + static_configs: + - targets: + - "{{ env "attr.unique.network.ip-address" }}:3909" + - job_name: "node_exporter" + scrape_interval: 5s + static_configs: + - targets: + - "{{ env "attr.unique.network.ip-address" }}:9100" + +exporters: + logging: + logLevel: info + # see https://www.elastic.co/guide/en/apm/get-started/current/open-telemetry-elastic.html#open-telemetry-collector + otlp/elastic: + endpoint: "localhost:8200" + tls: + insecure: true + +processors: + batch: + probabilistic_sampler: + hash_seed: 42 + sampling_percentage: 10 + +extensions: + health_check: + pprof: + endpoint: :1888 + zpages: + endpoint: :55679 + +service: + extensions: [pprof, zpages, health_check] + pipelines: + traces: + receivers: [otlp] + processors: [probabilistic_sampler, batch] + exporters: [logging, otlp/elastic] + metrics: + receivers: [otlp, prometheus] + processors: [batch] + exporters: [logging, otlp/elastic] diff --git a/experimental/app/telemetry-elastic/deploy/telemetry-system.hcl b/experimental/app/telemetry-elastic/deploy/telemetry-system.hcl new file mode 100644 index 0000000..3e26c2e --- /dev/null +++ b/experimental/app/telemetry-elastic/deploy/telemetry-system.hcl @@ -0,0 +1,182 @@ +job "telemetry-system" { + datacenters = ["neptune"] + type = "system" + + group "elasticsearch" { + network { + port "elastic" { + static = 9200 + } + port "elastic_internal" { + static = 9300 + } + } + + task "elastic" { + driver = "docker" + config { + image = "docker.elastic.co/elasticsearch/elasticsearch:8.2.0" + network_mode = "host" + volumes = [ + "/mnt/ssd/telemetry/es_data:/usr/share/elasticsearch/data", + "secrets/elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12", + ] + ports = [ "elastic", "elastic_internal" ] + sysctl = { + #"vm.max_map_count" = "262144", + } + ulimit = { + memlock = "9223372036854775807:9223372036854775807", + } + } + + user = "1000" + + resources { + memory = 1500 + cpu = 500 + } + + template { + data = "{{ key \"secrets/telemetry/elasticsearch/elastic-certificates.p12\" }}" + destination = "secrets/elastic-certificates.p12" + } + + template { + data = <<EOH +node.name={{ env "attr.unique.hostname" }} +http.port=9200 +transport.port=9300 +cluster.name=es-deuxfleurs +cluster.initial_master_nodes=carcajou,caribou,cariacou +discovery.seed_hosts=carcajou,caribou,cariacou +bootstrap.memory_lock=true +xpack.security.enabled=true +xpack.security.authc.api_key.enabled=true +xpack.security.transport.ssl.enabled=true +xpack.security.transport.ssl.verification_mode=certificate +xpack.security.transport.ssl.client_authentication=required +xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/elastic-certificates.p12 +xpack.security.transport.ssl.truststore.path=/usr/share/elasticsearch/config/elastic-certificates.p12 +cluster.routing.allocation.disk.watermark.high=75% +cluster.routing.allocation.disk.watermark.low=65% +ES_JAVA_OPTS=-Xms512M -Xmx512M +EOH + destination = "secrets/env" + env = true + } + } + } + + group "collector" { + network { + port "otel_grpc" { + static = 4317 + } + port "apm" { + static = 8200 + } + port "node_exporter" { + static = 9100 + } + } + + task "otel" { + driver = "docker" + config { + image = "otel/opentelemetry-collector-contrib:0.46.0" + args = [ + "--config=/etc/otel-config.yaml", + ] + network_mode = "host" + ports= [ "otel_grpc" ] + volumes = [ + "secrets/otel-config.yaml:/etc/otel-config.yaml" + ] + } + + template { + data = file("../config/otel-config.yaml") + destination = "secrets/otel-config.yaml" + } + + resources { + memory = 100 + cpu = 100 + } + } + + task "apm" { + driver = "docker" + config { + image = "docker.elastic.co/apm/apm-server:8.2.0" + network_mode = "host" + ports = [ "apm" ] + args = [ "--strict.perms=false" ] + volumes = [ + "secrets/apm-config.yaml:/usr/share/apm-server/apm-server.yml:ro" + ] + } + + template { + data = file("../config/apm-config.yaml") + destination = "secrets/apm-config.yaml" + } + + resources { + memory = 100 + cpu = 100 + } + } + +/* + task "node_exporter" { + driver = "docker" + config { + image = "quay.io/prometheus/node-exporter:v1.1.2" + network_mode = "host" + ports = [ "node_exporter" ] + volumes = [ + "/:/host:ro,rslave" + ] + args = [ "--path.rootfs=/host" ] + } + + resources { + cpu = 50 + memory = 40 + } + } +*/ + + task "filebeat" { + driver = "docker" + config { + image = "docker.elastic.co/beats/filebeat:8.2.0" + network_mode = "host" + volumes = [ + "/mnt/ssd/telemetry/filebeat:/usr/share/filebeat/data", + "secrets/filebeat.yml:/usr/share/filebeat/filebeat.yml", + "/var/run/docker.sock:/var/run/docker.sock", + "/var/lib/docker/containers/:/var/lib/docker/containers/:ro", + "/var/log/:/var/log/:ro", + ] + args = [ "--strict.perms=false" ] + privileged = true + } + user = "root" + + + template { + data = file("../config/filebeat.yml") + destination = "secrets/filebeat.yml" + } + + resources { + memory = 100 + cpu = 100 + } + } + } +} + diff --git a/experimental/app/telemetry-elastic/deploy/telemetry.hcl b/experimental/app/telemetry-elastic/deploy/telemetry.hcl new file mode 100644 index 0000000..21685a1 --- /dev/null +++ b/experimental/app/telemetry-elastic/deploy/telemetry.hcl @@ -0,0 +1,181 @@ +job "telemetry" { + datacenters = ["neptune"] + type = "service" + + group "kibana" { + count = 1 + + network { + port "kibana" { + static = 5601 + } + } + + task "kibana" { + driver = "docker" + config { + image = "docker.elastic.co/kibana/kibana:8.2.0" + network_mode = "host" + ports = [ "kibana" ] + } + + template { + data = <<EOH +SERVER_NAME={{ env "attr.unique.hostname" }} +ELASTICSEARCH_HOSTS=http://localhost:9200 +ELASTICSEARCH_USERNAME=kibana_system +ELASTICSEARCH_PASSWORD={{ key "secrets/telemetry/elastic_passwords/kibana_system" }} +SERVER_PUBLICBASEURL=https://kibana.home.adnab.me +EOH + destination = "secrets/env" + env = true + } + + resources { + memory = 1000 + cpu = 500 + } + + service { + tags = [ + "kibana", + "tricot kibana.staging.deuxfleurs.org", + ] + port = 5601 + address_mode = "driver" + name = "kibana" + check { + type = "tcp" + port = 5601 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + } + } + + group "grafana" { + count = 1 + + network { + port "grafana" { + static = 3333 + } + } + + task "restore-db" { + lifecycle { + hook = "prestart" + sidecar = false + } + + driver = "docker" + config { + image = "litestream/litestream:0.3.7" + args = [ + "restore", "-config", "/etc/litestream.yml", "/ephemeral/grafana.db" + ] + volumes = [ + "../alloc/data:/ephemeral", + "secrets/litestream.yml:/etc/litestream.yml" + ] + } + user = "472" + + template { + data = file("../config/grafana-litestream.yml") + destination = "secrets/litestream.yml" + } + + resources { + memory = 200 + cpu = 1000 + } + } + + task "grafana" { + driver = "docker" + config { + image = "grafana/grafana:8.4.3" + network_mode = "host" + ports = [ "grafana" ] + volumes = [ + "../alloc/data:/var/lib/grafana", + "secrets/elastic.yaml:/etc/grafana/provisioning/datasources/elastic.yaml" + ] + } + + template { + data = file("../config/grafana/provisioning/datasources/elastic.yaml") + destination = "secrets/elastic.yaml" + } + + template { + data = <<EOH +GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,grafana-piechart-panel,grafana-worldmap-panel,grafana-polystat-panel +GF_SERVER_HTTP_PORT=3333 +EOH + destination = "secrets/env" + env = true + } + + resources { + memory = 500 + cpu = 100 + } + + service { + tags = [ + "grafana", + "tricot grafana.staging.deuxfleurs.org", + ] + port = 3333 + address_mode = "driver" + name = "grafana" + check { + type = "tcp" + port = 3333 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + } + + task "replicate-db" { + driver = "docker" + config { + image = "litestream/litestream:0.3.7" + args = [ + "replicate", "-config", "/etc/litestream.yml" + ] + volumes = [ + "../alloc/data:/ephemeral", + "secrets/litestream.yml:/etc/litestream.yml" + ] + } + user = "472" + + template { + data = file("../config/grafana-litestream.yml") + destination = "secrets/litestream.yml" + } + + resources { + memory = 200 + cpu = 100 + } + } + } +} diff --git a/experimental/app/yugabyte/deploy/yugabyte.hcl b/experimental/app/yugabyte/deploy/yugabyte.hcl new file mode 100644 index 0000000..e7efa7a --- /dev/null +++ b/experimental/app/yugabyte/deploy/yugabyte.hcl @@ -0,0 +1,204 @@ +job "yugabytedb" { + type = "service" + datacenters = [ "neptune", "pluton" ] + + priority = 80 + + constraint { + attribute = "${attr.cpu.arch}" + value = "amd64" + } + + group "master" { + count = 3 + + constraint { + attribute = "${attr.unique.hostname}" + operator = "regexp" + value = "(caribou|cariacou|carcajou)" + } + + network { + port "admin" { static = 7000 } + port "master-rpc" { static = 7100 } + } + + update { + max_parallel = 1 + min_healthy_time = "30s" + healthy_deadline = "5m" + } + + task "master" { + driver = "docker" + + config { + image = "yugabytedb/yugabyte:2.11.1.0-b305" + command = "/home/yugabyte/bin/yb-master" + args = [ + "--fs_data_dirs=/mnt/master", + "--replication_factor=3", + "--master_addresses=10.42.0.21:7100,10.42.0.22:7100,10.42.0.23:7100", + "--rpc_bind_addresses=0.0.0.0:7100", + "--placement_cloud=deuxfleurs", + "--placement_region=staging", + "--placement_zone=neptune", + "--minloglevel=1", + ] + volumes = [ + "/mnt/ssd/yugabyte/master:/mnt/master", + ] + network_mode = "host" + logging { + type = "journald" + } + } + + resources { + memory = 1000 + cpu = 1000 + } + + kill_signal = "SIGINT" + kill_timeout = "20s" + + service { + tags = ["yugabyte-master-rpc"] + port = 7100 + address_mode = "driver" + name = "yugabyte-master-rpc" + check { + type = "tcp" + port = 7100 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + + service { + tags = ["yugabyte-admin"] + port = 7000 + address_mode = "driver" + name = "yugabyte-admin" + check { + type = "tcp" + port = 7000 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + + restart { + interval = "30m" + attempts = 10 + delay = "15s" + mode = "delay" + } + } + } + + group "tserver" { + count = 3 + + constraint { + attribute = "${attr.unique.hostname}" + operator = "!=" + value = "spoutnik" + } + + network { + port "ysql" { static = 5433 } + port "tserver-rpc" { static = 9100 } + } + + update { + max_parallel = 1 + min_healthy_time = "30s" + healthy_deadline = "5m" + } + + task "tserver" { + driver = "docker" + + config { + image = "yugabytedb/yugabyte:2.11.1.0-b305" + command = "/home/yugabyte/bin/yb-tserver" + args = [ + "--fs_data_dirs=/mnt/tserver", + "--start_pgsql_proxy", + "--tserver_master_addrs=10.42.0.21:7100,10.42.0.22:7100,10.42.0.23:7100", + "--rpc_bind_addresses=0.0.0.0:9100", + "--placement_cloud=deuxfleurs", + "--placement_region=staging", + "--placement_zone=neptune", + "--minloglevel=1", + ] + volumes = [ + "/mnt/ssd/yugabyte/tserver:/mnt/tserver", + ] + network_mode = "host" + logging { + type = "journald" + } + } + + resources { + memory = 1000 + cpu = 1000 + } + + kill_signal = "SIGINT" + kill_timeout = "20s" + + service { + tags = ["yugabyte-tserver-rpc"] + port = 9100 + address_mode = "driver" + name = "yugabyte-tserver-rpc" + check { + type = "tcp" + port = 9100 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + + service { + tags = [ "yugabyte-ysql" ] + port = 5433 + address_mode = "driver" + name = "yugabyte-ysql" + check { + type = "tcp" + port = 5433 + address_mode = "driver" + interval = "60s" + timeout = "5s" + check_restart { + limit = 3 + grace = "90s" + ignore_warnings = false + } + } + } + } + } +} |