diff options
author | Quentin Dufour <quentin@deuxfleurs.fr> | 2020-10-22 18:55:29 +0200 |
---|---|---|
committer | Quentin Dufour <quentin@deuxfleurs.fr> | 2020-10-22 18:55:29 +0200 |
commit | 3b75213d4077b4dbbad1fe43c22beaecf4e815d6 (patch) | |
tree | cc2c8aea67333e2df7320c9a225cd585c78421ee | |
parent | 5c31fbf0b12632b32912fa92be3d53b6e3902b91 (diff) | |
parent | b53b71f750008267351d84938b4701413f045628 (diff) | |
download | infrastructure-3b75213d4077b4dbbad1fe43c22beaecf4e815d6.tar.gz infrastructure-3b75213d4077b4dbbad1fe43c22beaecf4e815d6.zip |
We now have IPv6 activated on our network interfaces!
-rw-r--r-- | os/config/README.md | 10 | ||||
-rw-r--r-- | os/config/production | 4 | ||||
-rw-r--r-- | os/config/production.yml | 31 | ||||
-rw-r--r-- | os/config/roles/consul/tasks/main.yml | 5 | ||||
-rw-r--r-- | os/config/roles/consul/templates/consul.json.j2 | 4 | ||||
-rw-r--r-- | os/config/roles/consul/templates/resolv.conf.j2 | 2 | ||||
-rw-r--r-- | os/config/roles/network/files/rules.v6 | 12 | ||||
-rw-r--r-- | os/config/roles/network/tasks/main.yml | 7 | ||||
-rw-r--r-- | os/config/roles/network/templates/en.network | 9 | ||||
-rw-r--r-- | os/config/roles/network/templates/rules.v4 (renamed from os/config/roles/network/templates/rules.v4.j2) | 11 | ||||
-rw-r--r-- | os/config/roles/network/templates/rules.v6 | 40 | ||||
-rw-r--r-- | os/config/roles/nomad/tasks/main.yml | 2 | ||||
-rw-r--r-- | os/config/roles/nomad/templates/nomad.hcl.j2 | 6 | ||||
-rw-r--r-- | os/config/roles/storage/tasks/main.yml | 4 |
14 files changed, 109 insertions, 38 deletions
diff --git a/os/config/README.md b/os/config/README.md index db8d960..fb4f6e7 100644 --- a/os/config/README.md +++ b/os/config/README.md @@ -4,12 +4,16 @@ For each machine, **one by one** do: - Check that cluster is healthy - - `sudo gluster peer status` - - `sudo gluster volume status all` (check Online Col, only `Y` must appear) + - Check gluster + - `sudo gluster peer status` + - `sudo gluster volume status all` (check Online Col, only `Y` must appear) - Check that Nomad is healthy + - `nomad server members` + - `nomad node status` - Check that Consul is healthy + - `consul members` - Check that Postgres is healthy - - Run `ansible-playbook -i production --limit <machine> site.yml` + - Run `ansible-playbook -i production.yml --limit <machine> site.yml` - Reboot - Check that cluster is healthy diff --git a/os/config/production b/os/config/production deleted file mode 100644 index c8f08f2..0000000 --- a/os/config/production +++ /dev/null @@ -1,4 +0,0 @@ -[cluster_nodes] -veterini ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=110 ansible_user=root public_ip=192.168.1.2 private_ip=192.168.1.2 interface=eno1 dns_server=80.67.169.40 -silicareux ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=111 ansible_user=root public_ip=192.168.1.3 private_ip=192.168.1.3 interface=eno1 dns_server=80.67.169.40 -wonse ansible_host=fbx-rennes2.machine.deuxfleurs.fr ansible_port=112 ansible_user=root public_ip=192.168.1.4 private_ip=192.168.1.4 interface=eno1 dns_server=80.67.169.40 diff --git a/os/config/production.yml b/os/config/production.yml new file mode 100644 index 0000000..77624c6 --- /dev/null +++ b/os/config/production.yml @@ -0,0 +1,31 @@ +cluster_nodes: + hosts: + veterini: + ansible_host: fbx-rennes2.machine.deuxfleurs.fr + ansible_port: 110 + ansible_user: root + ipv4: 192.168.1.2 + ipv6: 2a01:e35:2fdc:dbe0::2 + interface: eno1 + dns_server: 80.67.169.40 + ansible_python_interpreter: python3 + + silicareux: + ansible_host: fbx-rennes2.machine.deuxfleurs.fr + ansible_port: 111 + ansible_user: root + ipv4: 192.168.1.3 + ipv6: 2a01:e35:2fdc:dbe0::3 + interface: eno1 + dns_server: 80.67.169.40 + ansible_python_interpreter: python3 + + wonse: + ansible_host: fbx-rennes2.machine.deuxfleurs.fr + ansible_port: 112 + ansible_user: root + ipv4: 192.168.1.4 + ipv6: 2a01:e35:2fdc:dbe0::4 + interface: eno1 + dns_server: 80.67.169.40 + ansible_python_interpreter: python3 diff --git a/os/config/roles/consul/tasks/main.yml b/os/config/roles/consul/tasks/main.yml index 2b77080..761c1f8 100644 --- a/os/config/roles/consul/tasks/main.yml +++ b/os/config/roles/consul/tasks/main.yml @@ -1,6 +1,6 @@ - name: "Set consul version" set_fact: - consul_version: 1.8.0 + consul_version: 1.8.4 - name: "Download and install Consul for x86_64" unarchive: @@ -21,6 +21,3 @@ - name: "Enable consul systemd service at boot" service: name=consul state=started enabled=yes daemon_reload=yes - -- name: "Deploy resolv.conf to use Consul" - template: src=resolv.conf.j2 dest=/etc/resolv.conf diff --git a/os/config/roles/consul/templates/consul.json.j2 b/os/config/roles/consul/templates/consul.json.j2 index b6c86aa..76632a6 100644 --- a/os/config/roles/consul/templates/consul.json.j2 +++ b/os/config/roles/consul/templates/consul.json.j2 @@ -1,14 +1,14 @@ { "data_dir": "/var/lib/consul", "bind_addr": "0.0.0.0", - "advertise_addr": "{{ public_ip }}", + "advertise_addr": "{{ ipv4 }}", "addresses": { "dns": "0.0.0.0", "http": "0.0.0.0" }, "retry_join": [ {% for selected_host in groups['cluster_nodes']|reject("sameas", ansible_fqdn) %}{# @FIXME: Reject doesn't work #} - "{{ hostvars[selected_host]['private_ip'] }}" {{ "," if not loop.last else "" }} + "{{ hostvars[selected_host]['ipv4'] }}" {{ "," if not loop.last else "" }} {% endfor %} ], "bootstrap_expect": 3, diff --git a/os/config/roles/consul/templates/resolv.conf.j2 b/os/config/roles/consul/templates/resolv.conf.j2 index 2404034..db64711 100644 --- a/os/config/roles/consul/templates/resolv.conf.j2 +++ b/os/config/roles/consul/templates/resolv.conf.j2 @@ -1,2 +1,2 @@ -nameserver {{ private_ip }} +nameserver {{ ipv4 }} nameserver {{ dns_server }} diff --git a/os/config/roles/network/files/rules.v6 b/os/config/roles/network/files/rules.v6 deleted file mode 100644 index 17ff71c..0000000 --- a/os/config/roles/network/files/rules.v6 +++ /dev/null @@ -1,12 +0,0 @@ -# WARNING!! When rules.{v4,v6} are changed, the whole iptables configuration is reloaded. -# This creates issues with Docker, which injects its own configuration in iptables when it starts. -# In practice, most (all?) containers will break if rules.{v4,v6} are changed, -# and docker will have to be restared. - - -*filter -:INPUT DROP [0:0] -:FORWARD DROP [0:0] -:OUTPUT ACCEPT [0:0] -COMMIT - diff --git a/os/config/roles/network/tasks/main.yml b/os/config/roles/network/tasks/main.yml index 1443e0c..caa6bff 100644 --- a/os/config/roles/network/tasks/main.yml +++ b/os/config/roles/network/tasks/main.yml @@ -1,8 +1,11 @@ - name: "Deploy iptablesv4 configuration" - template: src=rules.v4.j2 dest=/etc/iptables/rules.v4 + template: src=rules.v4 dest=/etc/iptables/rules.v4 - name: "Deploy iptablesv6 configuration" - copy: src=rules.v6 dest=/etc/iptables/rules.v6 + template: src=rules.v6 dest=/etc/iptables/rules.v6 + +- name: "Deploy systemd-networkd configuration" + template: src=en.network dest=/etc/systemd/network/en.network - name: "Activate IP forwarding" sysctl: diff --git a/os/config/roles/network/templates/en.network b/os/config/roles/network/templates/en.network new file mode 100644 index 0000000..b9b52e9 --- /dev/null +++ b/os/config/roles/network/templates/en.network @@ -0,0 +1,9 @@ +[Match] +Name={{ interface }} + +[Network] +Address={{ ipv4 }}/24 +Address={{ ipv6 }}/64 +Gateway=192.168.1.254 +DNS={{ ipv4 }} +DNS={{ dns_server }} diff --git a/os/config/roles/network/templates/rules.v4.j2 b/os/config/roles/network/templates/rules.v4 index a446139..a5f138b 100644 --- a/os/config/roles/network/templates/rules.v4.j2 +++ b/os/config/roles/network/templates/rules.v4 @@ -3,15 +3,18 @@ :FORWARD DROP [0:0] :OUTPUT ACCEPT [0:0] +# Internet Control Message Protocol +-A INPUT -p icmp -j ACCEPT + # Administration -A INPUT -p tcp --dport 22 -j ACCEPT -# Cluster +# Diplonat needs everything open to communicate with IGD with the router -A INPUT -s 192.168.1.254 -j ACCEPT --A INPUT -s 82.253.205.190 -j ACCEPT + +# Cluster {% for selected_host in groups['cluster_nodes'] %} --A INPUT -s {{ hostvars[selected_host]['public_ip'] }} -j ACCEPT --A INPUT -s {{ hostvars[selected_host]['private_ip'] }} -j ACCEPT +-A INPUT -s {{ hostvars[selected_host]['ipv4'] }} -j ACCEPT {% endfor %} # Local diff --git a/os/config/roles/network/templates/rules.v6 b/os/config/roles/network/templates/rules.v6 new file mode 100644 index 0000000..50737a0 --- /dev/null +++ b/os/config/roles/network/templates/rules.v6 @@ -0,0 +1,40 @@ +*filter +:INPUT DROP [0:0] +:FORWARD DROP [0:0] +:OUTPUT ACCEPT [0:0] + +# Internet Control Message Protocol +# (required) +-A INPUT -p icmp -j ACCEPT +-A INPUT -p ipv6-icmp -j ACCEPT + +# Administration +-A INPUT -p tcp --dport 22 -j ACCEPT + +# Cluster +{% for selected_host in groups['cluster_nodes'] %} +-A INPUT -s {{ hostvars[selected_host]['ipv6'] }} -j ACCEPT +{% endfor %} + +# Local +-A INPUT -i docker0 -j ACCEPT +-A INPUT -s ::1/128 -j ACCEPT +-A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + +COMMIT + +*nat +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +COMMIT + +*mangle +:PREROUTING ACCEPT [0:0] +:INPUT ACCEPT [0:0] +:FORWARD ACCEPT [0:0] +:OUTPUT ACCEPT [0:0] +:POSTROUTING ACCEPT [0:0] +COMMIT + diff --git a/os/config/roles/nomad/tasks/main.yml b/os/config/roles/nomad/tasks/main.yml index 7c73362..db519d5 100644 --- a/os/config/roles/nomad/tasks/main.yml +++ b/os/config/roles/nomad/tasks/main.yml @@ -1,6 +1,6 @@ - name: "Set nomad version" set_fact: - nomad_version: 0.12.0-beta2 + nomad_version: 0.12.6 - name: "Download and install Nomad for x86_64" unarchive: diff --git a/os/config/roles/nomad/templates/nomad.hcl.j2 b/os/config/roles/nomad/templates/nomad.hcl.j2 index b0be6a8..37952db 100644 --- a/os/config/roles/nomad/templates/nomad.hcl.j2 +++ b/os/config/roles/nomad/templates/nomad.hcl.j2 @@ -5,9 +5,9 @@ addresses { } advertise { - http = "{{ public_ip }}" - rpc = "{{ public_ip }}" - serf = "{{ public_ip }}" + http = "{{ ipv4 }}" + rpc = "{{ ipv4 }}" + serf = "{{ ipv4 }}" } data_dir = "/var/lib/nomad" diff --git a/os/config/roles/storage/tasks/main.yml b/os/config/roles/storage/tasks/main.yml index a1f2d8f..d66011b 100644 --- a/os/config/roles/storage/tasks/main.yml +++ b/os/config/roles/storage/tasks/main.yml @@ -48,7 +48,7 @@ nfs.export-volumes: "off" cluster.lookup-optimize: "on" - cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['private_ip'] }}{{ ',' if not loop.last else '' }}{% endfor %}" + cluster: "{% for selected_host in groups['cluster_nodes'] %}{{ hostvars[selected_host]['ipv4'] }}{{ ',' if not loop.last else '' }}{% endfor %}" run_once: true - name: "Create mountpoint" @@ -61,7 +61,7 @@ tags: gluster-fstab mount: path: /mnt/glusterfs - src: "{{ private_ip }}:/donnees" + src: "{{ ipv4 }}:/donnees" fstype: glusterfs opts: "defaults,_netdev,noauto,x-systemd.automount" state: present |