aboutsummaryrefslogtreecommitdiff
path: root/cluster/prod/app/backup/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'cluster/prod/app/backup/deploy')
-rw-r--r--cluster/prod/app/backup/deploy/backup-daily.hcl196
-rw-r--r--cluster/prod/app/backup/deploy/backup-weekly.hcl55
2 files changed, 251 insertions, 0 deletions
diff --git a/cluster/prod/app/backup/deploy/backup-daily.hcl b/cluster/prod/app/backup/deploy/backup-daily.hcl
new file mode 100644
index 0000000..df592ce
--- /dev/null
+++ b/cluster/prod/app/backup/deploy/backup-daily.hcl
@@ -0,0 +1,196 @@
+job "backup_daily" {
+ datacenters = ["orion", "neptune"]
+ type = "batch"
+
+ priority = "60"
+
+ periodic {
+ cron = "@daily"
+ // Do not allow overlapping runs.
+ prohibit_overlap = true
+ }
+
+ group "backup-dovecot" {
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "="
+ value = "doradille"
+ }
+
+ task "main" {
+ driver = "docker"
+
+ config {
+ image = "restic/restic:0.14.0"
+ entrypoint = [ "/bin/sh", "-c" ]
+ args = [ "restic backup /mail && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
+ volumes = [
+ "/mnt/ssd/mail:/mail"
+ ]
+ }
+
+ template {
+ data = <<EOH
+AWS_ACCESS_KEY_ID={{ key "secrets/email/dovecot/backup_aws_access_key_id" }}
+AWS_SECRET_ACCESS_KEY={{ key "secrets/email/dovecot/backup_aws_secret_access_key" }}
+RESTIC_REPOSITORY={{ key "secrets/email/dovecot/backup_restic_repository" }}
+RESTIC_PASSWORD={{ key "secrets/email/dovecot/backup_restic_password" }}
+EOH
+
+ destination = "secrets/env_vars"
+ env = true
+ }
+
+ resources {
+ cpu = 500
+ memory = 100
+ memory_max = 300
+ }
+
+ restart {
+ attempts = 2
+ interval = "30m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+ }
+
+ group "backup-plume" {
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "="
+ value = "dahlia"
+ }
+
+ task "main" {
+ driver = "docker"
+
+ config {
+ image = "restic/restic:0.14.0"
+ entrypoint = [ "/bin/sh", "-c" ]
+ args = [ "restic backup /plume && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
+ volumes = [
+ "/mnt/ssd/plume/media:/plume"
+ ]
+ }
+
+ template {
+ data = <<EOH
+AWS_ACCESS_KEY_ID={{ key "secrets/plume/backup_aws_access_key_id" }}
+AWS_SECRET_ACCESS_KEY={{ key "secrets/plume/backup_aws_secret_access_key" }}
+RESTIC_REPOSITORY={{ key "secrets/plume/backup_restic_repository" }}
+RESTIC_PASSWORD={{ key "secrets/plume/backup_restic_password" }}
+EOH
+
+ destination = "secrets/env_vars"
+ env = true
+ }
+
+ resources {
+ cpu = 500
+ memory = 100
+ memory_max = 300
+ }
+
+ restart {
+ attempts = 2
+ interval = "30m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+ }
+
+ group "backup-consul" {
+ task "consul-kv-export" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "prestart"
+ sidecar = false
+ }
+
+ config {
+ image = "consul:1.13.1"
+ network_mode = "host"
+ entrypoint = [ "/bin/sh", "-c" ]
+ args = [ "/bin/consul kv export > $NOMAD_ALLOC_DIR/consul.json" ]
+ volumes = [
+ "secrets:/etc/consul",
+ ]
+ }
+
+ env {
+ CONSUL_HTTP_ADDR = "https://consul.service.prod.consul:8501"
+ CONSUL_HTTP_SSL = "true"
+ CONSUL_CACERT = "/etc/consul/consul.crt"
+ CONSUL_CLIENT_CERT = "/etc/consul/consul-client.crt"
+ CONSUL_CLIENT_KEY = "/etc/consul/consul-client.key"
+ }
+
+ resources {
+ cpu = 200
+ memory = 200
+ }
+
+
+ template {
+ data = "{{ key \"secrets/consul/consul.crt\" }}"
+ destination = "secrets/consul.crt"
+ }
+
+ template {
+ data = "{{ key \"secrets/consul/consul-client.crt\" }}"
+ destination = "secrets/consul-client.crt"
+ }
+
+ template {
+ data = "{{ key \"secrets/consul/consul-client.key\" }}"
+ destination = "secrets/consul-client.key"
+ }
+
+ restart {
+ attempts = 2
+ interval = "30m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+
+ task "restic-backup" {
+ driver = "docker"
+
+ config {
+ image = "restic/restic:0.12.1"
+ entrypoint = [ "/bin/sh", "-c" ]
+ args = [ "restic backup $NOMAD_ALLOC_DIR/consul.json && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
+ }
+
+
+ template {
+ data = <<EOH
+AWS_ACCESS_KEY_ID={{ key "secrets/backup/consul/backup_aws_access_key_id" }}
+AWS_SECRET_ACCESS_KEY={{ key "secrets/backup/consul/backup_aws_secret_access_key" }}
+RESTIC_REPOSITORY={{ key "secrets/backup/consul/backup_restic_repository" }}
+RESTIC_PASSWORD={{ key "secrets/backup/consul/backup_restic_password" }}
+EOH
+
+ destination = "secrets/env_vars"
+ env = true
+ }
+
+ resources {
+ cpu = 200
+ memory = 200
+ }
+
+ restart {
+ attempts = 2
+ interval = "30m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+ }
+}
diff --git a/cluster/prod/app/backup/deploy/backup-weekly.hcl b/cluster/prod/app/backup/deploy/backup-weekly.hcl
new file mode 100644
index 0000000..36a507a
--- /dev/null
+++ b/cluster/prod/app/backup/deploy/backup-weekly.hcl
@@ -0,0 +1,55 @@
+job "backup_weekly" {
+ datacenters = ["orion"]
+ type = "batch"
+
+ priority = "60"
+
+ periodic {
+ cron = "@weekly"
+ // Do not allow overlapping runs.
+ prohibit_overlap = true
+ }
+
+ group "backup-psql" {
+ task "main" {
+ driver = "docker"
+
+ config {
+ image = "superboum/backup-psql-docker:gyr3aqgmhs0hxj0j9hkrdmm1m07i8za2"
+ volumes = [
+ // Mount a cache on the hard disk to avoid filling up the SSD
+ "/mnt/storage/tmp_bckp_psql:/mnt/cache"
+ ]
+ }
+
+ template {
+ data = <<EOH
+CACHE_DIR=/mnt/cache
+AWS_BUCKET=backups-pgbasebackup
+AWS_ENDPOINT=s3.deuxfleurs.shirokumo.net
+AWS_ACCESS_KEY_ID={{ key "secrets/postgres/backup/aws_access_key_id" }}
+AWS_SECRET_ACCESS_KEY={{ key "secrets/postgres/backup/aws_secret_access_key" }}
+CRYPT_PUBLIC_KEY={{ key "secrets/postgres/backup/crypt_public_key" }}
+PSQL_HOST=psql-proxy.service.prod.consul
+PSQL_USER={{ key "secrets/postgres/keeper/pg_repl_username" }}
+PGPASSWORD={{ key "secrets/postgres/keeper/pg_repl_pwd" }}
+EOH
+
+ destination = "secrets/env_vars"
+ env = true
+ }
+
+ resources {
+ cpu = 200
+ memory = 200
+ }
+
+ restart {
+ attempts = 2
+ interval = "30m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+ }
+}