From a9a665e437a12e8a3231bf537f7f8ef10a9b9cf4 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 21 Apr 2022 23:00:43 +0200 Subject: Move files again --- os/host_TOREWORK/cluster/prod/cluster.nix | 70 ++++++ os/host_TOREWORK/cluster/prod/node/celeri.nix | 29 +++ os/host_TOREWORK/cluster/prod/node/celeri.site.nix | 1 + os/host_TOREWORK/cluster/prod/node/concombre.nix | 29 +++ .../cluster/prod/node/concombre.site.nix | 1 + os/host_TOREWORK/cluster/prod/node/courgette.nix | 29 +++ .../cluster/prod/node/courgette.site.nix | 1 + os/host_TOREWORK/cluster/prod/site/neptune.nix | 9 + os/host_TOREWORK/cluster/prod/ssh_config | 10 + os/host_TOREWORK/cluster/staging/cluster.nix | 45 ++++ os/host_TOREWORK/cluster/staging/node/carcajou.nix | 27 +++ .../cluster/staging/node/carcajou.site.nix | 1 + os/host_TOREWORK/cluster/staging/node/cariacou.nix | 27 +++ .../cluster/staging/node/cariacou.site.nix | 1 + os/host_TOREWORK/cluster/staging/node/caribou.nix | 27 +++ .../cluster/staging/node/caribou.site.nix | 1 + os/host_TOREWORK/cluster/staging/node/spoutnik.nix | 58 +++++ .../cluster/staging/node/spoutnik.site.nix | 1 + os/host_TOREWORK/cluster/staging/site/neptune.nix | 20 ++ os/host_TOREWORK/cluster/staging/site/pluton.nix | 13 ++ os/host_TOREWORK/cluster/staging/ssh_config | 14 ++ os/host_TOREWORK/configuration.nix | 94 +++++++++ os/modules/deuxfleurs.nix | 234 +++++++++++++++++++++ os/modules/remote-unlock.nix | 26 +++ os/modules/wesher_service.nix | 137 ++++++++++++ 25 files changed, 905 insertions(+) create mode 100644 os/host_TOREWORK/cluster/prod/cluster.nix create mode 100644 os/host_TOREWORK/cluster/prod/node/celeri.nix create mode 120000 os/host_TOREWORK/cluster/prod/node/celeri.site.nix create mode 100644 os/host_TOREWORK/cluster/prod/node/concombre.nix create mode 120000 os/host_TOREWORK/cluster/prod/node/concombre.site.nix create mode 100644 os/host_TOREWORK/cluster/prod/node/courgette.nix create mode 120000 os/host_TOREWORK/cluster/prod/node/courgette.site.nix create mode 100644 os/host_TOREWORK/cluster/prod/site/neptune.nix create mode 100644 os/host_TOREWORK/cluster/prod/ssh_config create mode 100644 os/host_TOREWORK/cluster/staging/cluster.nix create mode 100644 os/host_TOREWORK/cluster/staging/node/carcajou.nix create mode 120000 os/host_TOREWORK/cluster/staging/node/carcajou.site.nix create mode 100644 os/host_TOREWORK/cluster/staging/node/cariacou.nix create mode 120000 os/host_TOREWORK/cluster/staging/node/cariacou.site.nix create mode 100644 os/host_TOREWORK/cluster/staging/node/caribou.nix create mode 120000 os/host_TOREWORK/cluster/staging/node/caribou.site.nix create mode 100644 os/host_TOREWORK/cluster/staging/node/spoutnik.nix create mode 120000 os/host_TOREWORK/cluster/staging/node/spoutnik.site.nix create mode 100644 os/host_TOREWORK/cluster/staging/site/neptune.nix create mode 100644 os/host_TOREWORK/cluster/staging/site/pluton.nix create mode 100644 os/host_TOREWORK/cluster/staging/ssh_config create mode 100644 os/host_TOREWORK/configuration.nix create mode 100644 os/modules/deuxfleurs.nix create mode 100644 os/modules/remote-unlock.nix create mode 100644 os/modules/wesher_service.nix (limited to 'os') diff --git a/os/host_TOREWORK/cluster/prod/cluster.nix b/os/host_TOREWORK/cluster/prod/cluster.nix new file mode 100644 index 0000000..d131db7 --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/cluster.nix @@ -0,0 +1,70 @@ +{ config, pkgs, ... } @ args: + +{ + deuxfleurs.cluster_name = "prod"; + deuxfleurs.cluster_nodes = [ + { + hostname = "concombre"; + site_name = "neptune"; + publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34="; + IP = "10.42.1.31"; + endpoint = "82.66.112.151:33731"; + lan_endpoint = "192.168.1.31:33731"; + } + { + hostname = "courgette"; + site_name = "neptune"; + publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0="; + IP = "10.42.1.32"; + endpoint = "82.66.112.151:33732"; + lan_endpoint = "192.168.1.32:33732"; + } + { + hostname = "celeri"; + site_name = "neptune"; + publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U="; + IP = "10.42.1.33"; + endpoint = "82.66.112.151:33733"; + lan_endpoint = "192.168.1.33:33733"; + } + ]; + deuxfleurs.admin_nodes = [ + { + hostname = "hammerhead"; + publicKey = "b5hF+GSTgg3oM6wnjL7jRbfyf1jtsWdVptPPbAh3Qic="; + IP = "10.42.0.1"; + endpoint = "5.135.179.11:51349"; + } + { + hostname = "robinson"; + publicKey = "ETaZFil3mFXlJ0LaJZyWqJVLV2IZUF5PB/8M7WbQSTg="; + IP = "10.42.0.42"; + endpoint = "77.141.67.109:33742"; + } + { + hostname = "shiki"; + publicKey = "QUiUNMk70TEQ75Ut7Uqikr5uGVSXmx8EGNkGM6tANlg="; + IP = "10.42.0.206"; + endpoint = "37.187.118.206:51820"; + } + { + hostname = "lindy"; + publicKey = "wen9GnZy2iLT6RyHfn7ydS/wvdvow1XPmhZxIkrDbks="; + IP = "10.42.0.66"; + endpoint = "82.66.112.151:33766"; + } + ]; + deuxfleurs.admin_accounts = { + lx = [ + # Keys for accessing nodes from outside + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIDdVbA9fEdqSr5UJ77NnoIqDTVp8ca5kHExhZYI4ecBExFJfonJllXMBN9KdC4ukxtY8Ug47PcMOfMaTBZQc+e+KpvDWpkBt15Xpem3RCxmMBES79sLL7LgtAdBXc5mNaCX8EOEVixWKdarjvxRyf6py6the51G5muaiMpoj5fae4ZpRGjhGTPefzc7y7zRWBUUZ8pYHW774BIaK6XT9gn3hyHV+Occjl/UODXvodktk55YtnuPi8adXTYEsHrVVz8AkFhx+cr0U/U8vtQnsTrZG+JmgQLqpXVs0RDw5bE1RefEbMuYNKxutYKUe3L+ZJtDe0M0MqOFI8a4F5TxP5 katchup@konata" + ]; + }; + + # For Garage external communication + networking.firewall.allowedTCPPorts = [ 3901 ]; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/prod/node/celeri.nix b/os/host_TOREWORK/cluster/prod/node/celeri.nix new file mode 100644 index 0000000..02a33c9 --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/celeri.nix @@ -0,0 +1,29 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "celeri"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.33"; + + networking.interfaces.eno1.ipv6.addresses = [ + { + address = "2a01:e0a:c:a720::33"; + prefixLength = 64; + } + ]; + + deuxfleurs.vpn_ip = "10.42.1.33"; + deuxfleurs.vpn_listen_port = 33733; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/prod/node/celeri.site.nix b/os/host_TOREWORK/cluster/prod/node/celeri.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/celeri.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/prod/node/concombre.nix b/os/host_TOREWORK/cluster/prod/node/concombre.nix new file mode 100644 index 0000000..517dcf8 --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/concombre.nix @@ -0,0 +1,29 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "concombre"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.31"; + + networking.interfaces.eno1.ipv6.addresses = [ + { + address = "2a01:e0a:c:a720::31"; + prefixLength = 64; + } + ]; + + deuxfleurs.vpn_ip = "10.42.1.31"; + deuxfleurs.vpn_listen_port = 33731; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/prod/node/concombre.site.nix b/os/host_TOREWORK/cluster/prod/node/concombre.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/concombre.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/prod/node/courgette.nix b/os/host_TOREWORK/cluster/prod/node/courgette.nix new file mode 100644 index 0000000..d34e7a5 --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/courgette.nix @@ -0,0 +1,29 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "courgette"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.32"; + + networking.interfaces.eno1.ipv6.addresses = [ + { + address = "2a01:e0a:c:a720::32"; + prefixLength = 64; + } + ]; + + deuxfleurs.vpn_ip = "10.42.1.32"; + deuxfleurs.vpn_listen_port = 33732; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/prod/node/courgette.site.nix b/os/host_TOREWORK/cluster/prod/node/courgette.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/node/courgette.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/prod/site/neptune.nix b/os/host_TOREWORK/cluster/prod/site/neptune.nix new file mode 100644 index 0000000..900ddf0 --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/site/neptune.nix @@ -0,0 +1,9 @@ +{ config, pkgs, ... }: + +{ + deuxfleurs.site_name = "neptune"; + deuxfleurs.lan_default_gateway = "192.168.1.254"; + deuxfleurs.lan_ip_prefix_length = 24; + + networking.nameservers = [ "192.168.1.254" ]; +} diff --git a/os/host_TOREWORK/cluster/prod/ssh_config b/os/host_TOREWORK/cluster/prod/ssh_config new file mode 100644 index 0000000..cb4841f --- /dev/null +++ b/os/host_TOREWORK/cluster/prod/ssh_config @@ -0,0 +1,10 @@ +UserKnownHostsFile ./ssh_known_hosts + +Host concombre + HostName 2a01:e0a:c:a720::31 + +Host courgette + HostName 2a01:e0a:c:a720::32 + +Host celeri + HostName 2a01:e0a:c:a720::33 diff --git a/os/host_TOREWORK/cluster/staging/cluster.nix b/os/host_TOREWORK/cluster/staging/cluster.nix new file mode 100644 index 0000000..5007815 --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/cluster.nix @@ -0,0 +1,45 @@ +{ config, pkgs, ... } @ args: + +{ + deuxfleurs.cluster_name = "staging"; + + # Bootstrap nodes for Wesher overlay network + services.wesher.join = [ + "2a01:e0a:c:a720::21" # cariacou + "2a01:e0a:c:a720::22" # carcajou + "2a01:e0a:c:a720::23" # caribou + ]; + + # The IP range to use for the Wesher overlay of this cluster + deuxfleurs.wesher_cluster_prefix = "10.14.0.0"; + deuxfleurs.wesher_cluster_prefix_length = 16; + + # Bootstrap IPs for Consul cluster, + # these are IPs on the Wesher overlay + services.consul.extraConfig.retry_join = [ + "10.14.181.82" # caribou + "10.14.179.56" # cariacou + "10.14.252.121" # carcajou + ]; + + deuxfleurs.admin_accounts = { + lx = [ + # Keys for accessing nodes from outside + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIDdVbA9fEdqSr5UJ77NnoIqDTVp8ca5kHExhZYI4ecBExFJfonJllXMBN9KdC4ukxtY8Ug47PcMOfMaTBZQc+e+KpvDWpkBt15Xpem3RCxmMBES79sLL7LgtAdBXc5mNaCX8EOEVixWKdarjvxRyf6py6the51G5muaiMpoj5fae4ZpRGjhGTPefzc7y7zRWBUUZ8pYHW774BIaK6XT9gn3hyHV+Occjl/UODXvodktk55YtnuPi8adXTYEsHrVVz8AkFhx+cr0U/U8vtQnsTrZG+JmgQLqpXVs0RDw5bE1RefEbMuYNKxutYKUe3L+ZJtDe0M0MqOFI8a4F5TxP5 katchup@konata" + ]; + quentin = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDT1+H08FdUSvdPpPKdcafq4+JRHvFVjfvG5Id97LAoROmFRUb/ZOMTLdNuD7FqvW0Da5CPxIMr8ZxfrFLtpGyuG7qdI030iIRZPlKpBh37epZHaV+l9F4ZwJQMIBO9cuyLPXgsyvM/s7tDtrdK1k7JTf2EVvoirrjSzBaMhAnhi7//to8zvujDtgDZzy6aby75bAaDetlYPBq2brWehtrf9yDDG9WAMYJqp//scje/WmhbRR6eSdim1HaUcWk5+4ZPt8sQJcy8iWxQ4jtgjqTvMOe5v8ZPkxJNBine/ZKoJsv7FzKem00xEH7opzktaGukyEqH0VwOwKhmBiqsX2yN quentin@dufour.io" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBu+KUebaWwlugMC5fGbNhHc6IaQDAC6+1vMc4Ww7nVU1rs2nwI7L5qcWxOwNdhFaorZQZy/fJuCWdFbF61RCKGayBWPLZHGPsfqDuggYNEi1Qil1kpeCECfDQNjyMTK058ZBBhOWNMHBjlLWXUlRJDkRBBECY0vo4jRv22SvSaPUCAnkdJ9rbAp/kqb497PTIb2r1l1/ew8YdhINAlpYQFQezZVfkZdTKxt22n0QCjhupqjfh3gfNnbBX0z/iO+RvAOWRIZsjPFLC+jXl+n7cnu2cq1nvST5eHiYfXXeIgIwmeENLKqp+2Twr7PIdv22PnJkh6iR5kx7eTRxkNZdN quentin@deuxfleurs.fr" + ]; + adrien = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBfVX+iQSHl3V0el3/y2Rtl9Q/nrmLoTE3oXnR+16yX7g8HvzU871q89jbE/UWvNRvO4hirTcKF8yojuq8ZRCoUcQO+6/YlPrY/2G8kFhPTlUGDQ+mLT+ancZsom4mkg3I9oQjKZ9qxMD1GuU8Ydz4eXjhJ8OGFZhBpEgnrLmdA53Y5d2fCbaZN5EYD4sWEFYN7xBLxTGNwv0gygiPs967Z4/ZfHngTvqVoS9wnQThSCIoXPTWFAJCkN8dC5tPZwnbOT1bGcYUF0VTrcaD6cU6Q1ZRrtyqXxnnyxpQCAoe2hgdIm+LnDsBx9trfPauqi0dXi36X8pLmudW1f1RmKWT adrien@bacigalupi" + ]; + maximilien = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHMMR6zNzz8NQU80wFquhUCeiXJuGphjP+zNouKbn228GyESu8sfNBwnuZq86vblR11Lz8l2rtCM73GfAKg29qmUWUHRKWvRIYWv2vaUJcCdy0bAxIzcvCvjZX0SpnIKxe9y3Rp0LGO5WLYfw0ZFaavwFZP0Z8w1Kj9/zBmL2X2avbhkaYHi/C1yXhbvESYQysmqLa48EX/TS616MBrgR9zbI9AoTQ9NOHnR14Tve/AP/khcZoBJdm4hTttMbNkEc0wonzdylTDew263SPRs/uoqnQIpUtErdPHqU10Yup8HjXjEyFJsSwcZcM5sZOw5JKckKJwmcd0yjO/x/4/Mk5" + ]; + kokakiwi = [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira" + ]; + }; +} diff --git a/os/host_TOREWORK/cluster/staging/node/carcajou.nix b/os/host_TOREWORK/cluster/staging/node/carcajou.nix new file mode 100644 index 0000000..dbcc5ec --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/carcajou.nix @@ -0,0 +1,27 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + imports = + [ + ./remote-unlock.nix + ]; + + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "carcajou"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.22"; + deuxfleurs.ipv6 = "2a01:e0a:c:a720::22"; + + deuxfleurs.cluster_ip = "10.14.252.121"; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/staging/node/carcajou.site.nix b/os/host_TOREWORK/cluster/staging/node/carcajou.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/carcajou.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/staging/node/cariacou.nix b/os/host_TOREWORK/cluster/staging/node/cariacou.nix new file mode 100644 index 0000000..14d1842 --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/cariacou.nix @@ -0,0 +1,27 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + imports = + [ + ./remote-unlock.nix + ]; + + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "cariacou"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.21"; + deuxfleurs.ipv6 = "2a01:e0a:c:a720::21"; + + deuxfleurs.cluster_ip = "10.14.179.56"; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/staging/node/cariacou.site.nix b/os/host_TOREWORK/cluster/staging/node/cariacou.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/cariacou.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/staging/node/caribou.nix b/os/host_TOREWORK/cluster/staging/node/caribou.nix new file mode 100644 index 0000000..3b41972 --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/caribou.nix @@ -0,0 +1,27 @@ +# Configuration file local to this node + +{ config, pkgs, ... }: + +{ + imports = + [ + ./remote-unlock.nix + ]; + + # Use the systemd-boot EFI boot loader. + boot.loader.systemd-boot.enable = true; + boot.loader.timeout = 20; + boot.loader.efi.canTouchEfiVariables = true; + + networking.hostName = "caribou"; + + deuxfleurs.network_interface = "eno1"; + deuxfleurs.lan_ip = "192.168.1.23"; + deuxfleurs.ipv6 = "2a01:e0a:c:a720::23"; + + deuxfleurs.cluster_ip = "10.14.181.82"; + deuxfleurs.is_raft_server = true; + + # Enable netdata monitoring + services.netdata.enable = true; +} diff --git a/os/host_TOREWORK/cluster/staging/node/caribou.site.nix b/os/host_TOREWORK/cluster/staging/node/caribou.site.nix new file mode 120000 index 0000000..04ee36c --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/caribou.site.nix @@ -0,0 +1 @@ +../site/neptune.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/staging/node/spoutnik.nix b/os/host_TOREWORK/cluster/staging/node/spoutnik.nix new file mode 100644 index 0000000..060d77d --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/spoutnik.nix @@ -0,0 +1,58 @@ +# Edit this configuration file to define what should be installed on +# your system. Help is available in the configuration.nix(5) man page +# and in the NixOS manual (accessible by running ‘nixos-help’). + +{ config, pkgs, ... }: + +{ + boot.loader.grub.enable = true; + boot.loader.grub.version = 2; + boot.loader.grub.device = "/dev/sda"; # or "nodev" for efi only + + networking.hostName = "spoutnik"; + services.openssh.ports = [ 220 ]; + + deuxfleurs.network_interface = "enp0s25"; + deuxfleurs.lan_ip = "192.168.0.40"; + deuxfleurs.ipv6 = "::"; #TODO + + deuxfleurs.cluster_ip = "10.14.0.0"; + deuxfleurs.is_raft_server = false; #TODO + + # Nginx configuration: + + services.nginx = { + enable = true; + + # Use recommended settings + recommendedGzipSettings = true; + recommendedOptimisation = true; + recommendedProxySettings = true; + recommendedTlsSettings = true; + + # Add any further config to match your needs, e.g.: + virtualHosts = let + base = locations: { + inherit locations; + + forceSSL = true; + enableACME = true; + }; + proxy = addr: port: base { + "/".proxyPass = "http://" + addr + ":" + toString(port); + }; + in { + "axl.deuxfleurs.fr" = proxy "192.168.0.60" 80; + "warez.luxeylab.net" = proxy "192.168.0.50" 80; + }; + }; + + + # ACME: + + security.acme = { + acceptTerms = true; + email = "adrien@luxeylab.net"; + }; +} + diff --git a/os/host_TOREWORK/cluster/staging/node/spoutnik.site.nix b/os/host_TOREWORK/cluster/staging/node/spoutnik.site.nix new file mode 120000 index 0000000..87c7991 --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/node/spoutnik.site.nix @@ -0,0 +1 @@ +../site/pluton.nix \ No newline at end of file diff --git a/os/host_TOREWORK/cluster/staging/site/neptune.nix b/os/host_TOREWORK/cluster/staging/site/neptune.nix new file mode 100644 index 0000000..38a4bab --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/site/neptune.nix @@ -0,0 +1,20 @@ +{ config, pkgs, ... }: + +{ + deuxfleurs.site_name = "neptune"; + deuxfleurs.lan_default_gateway = "192.168.1.254"; + deuxfleurs.lan_ip_prefix_length = 24; + deuxfleurs.ipv6_prefix_length = 64; + + networking.nameservers = [ "192.168.1.254" ]; + + networking.firewall.allowedTCPPorts = [ 80 443 ]; + + services.cron = { + enable = true; + systemCronJobs = [ + "0 2 * * * root nix-collect-garbage --delete-older-than 10d >> /root/nix_gc_log 2>&1" + "30 2 1 * * root docker run --rm -v /var/lib/drone/nix:/nix nixpkgs/nix:nixos-21.05 nix-collect-garbage --delete-older-than 30d >> /root/drone_nix_gc_log 2>&1" + ]; + }; +} diff --git a/os/host_TOREWORK/cluster/staging/site/pluton.nix b/os/host_TOREWORK/cluster/staging/site/pluton.nix new file mode 100644 index 0000000..9f972c0 --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/site/pluton.nix @@ -0,0 +1,13 @@ +{ config, pkgs, ... }: + +{ + networking.defaultGateway = { + address = "192.168.0.1"; + interface = "enp0s25"; + }; + networking.nameservers = [ "213.186.33.99" "172.104.136.243" ]; + + deuxfleurs.site_name = "pluton"; + + networking.firewall.allowedTCPPorts = [ 80 443 ]; +} diff --git a/os/host_TOREWORK/cluster/staging/ssh_config b/os/host_TOREWORK/cluster/staging/ssh_config new file mode 100644 index 0000000..9bc4e6e --- /dev/null +++ b/os/host_TOREWORK/cluster/staging/ssh_config @@ -0,0 +1,14 @@ +UserKnownHostsFile ./ssh_known_hosts + +Host caribou + HostName 2a01:e0a:c:a720::23 + +Host carcajou + HostName 2a01:e0a:c:a720::22 + +Host cariacou + HostName 2a01:e0a:c:a720::21 + +Host spoutnik + HostName 10.42.0.2 + Port 220 diff --git a/os/host_TOREWORK/configuration.nix b/os/host_TOREWORK/configuration.nix new file mode 100644 index 0000000..984307c --- /dev/null +++ b/os/host_TOREWORK/configuration.nix @@ -0,0 +1,94 @@ +# Edit this configuration file to define what should be installed on +# your system. Help is available in the configuration.nix(5) man page +# and in the NixOS manual (accessible by running ‘nixos-help’). + +{ config, pkgs, ... } @ args: + +# Configuration local for this cluster node (hostname, IP, etc) +{ + imports = + [ # Include the results of the hardware scan. + ./hardware-configuration.nix + # Include generic Deuxfleurs module + ./deuxfleurs.nix + # Wesher module + ./wesher_service.nix + # Configuration for this deployment (a cluster) + ./cluster.nix + # Configuration local for this Deuxfleurs site (set of nodes) + ./site.nix + # Configuration local for this cluster node (hostname, IP, etc) + ./node.nix + ]; + + nixpkgs.overlays = [ + (import ./wesher.nix) + ]; + + # The global useDHCP flag is deprecated, therefore explicitly set to false here. + # Per-interface useDHCP will be mandatory in the future, so this generated config + # replicates the default behaviour. + networking.useDHCP = false; + + # Set your time zone. + time.timeZone = "Europe/Paris"; + + # Select internationalisation properties. + # i18n.defaultLocale = "en_US.UTF-8"; + console = { + font = "sun12x22"; + keyMap = "fr"; + }; + + boot.kernel.sysctl = { + "vm.max_map_count" = 262144; + }; + + services.journald.extraConfig = '' +SystemMaxUse=1G + ''; + + # List packages installed in system profile. To search, run: + # $ nix search wget + environment.systemPackages = with pkgs; [ + nmap + bind + inetutils + pciutils + vim + tmux + ncdu + iotop + jnettop + nethogs + wget + htop + smartmontools + links + git + rclone + docker + docker-compose + wireguard + wesher + ]; + + programs.vim.defaultEditor = true; + + # Enable network time + services.ntp.enable = true; + + # Enable the OpenSSH daemon and disable password login. + services.openssh.enable = true; + services.openssh.passwordAuthentication = false; + + + # This value determines the NixOS release from which the default + # settings for stateful data, like file locations and database versions + # on your system were taken. It‘s perfectly fine and recommended to leave + # this value at the release version of the first install of this system. + # Before changing this value read the documentation for this option + # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html). + system.stateVersion = "21.05"; # Did you read the comment? +} + diff --git a/os/modules/deuxfleurs.nix b/os/modules/deuxfleurs.nix new file mode 100644 index 0000000..2050776 --- /dev/null +++ b/os/modules/deuxfleurs.nix @@ -0,0 +1,234 @@ +{ config, pkgs, ... }: + +let + cfg = config.deuxfleurs; +in + with builtins; + with pkgs.lib; +{ + options.deuxfleurs = + { + # Parameters for individual nodes + network_interface = mkOption { + description = "Network interface name to configure"; + type = types.str; + }; + lan_ip = mkOption { + description = "IP address of this node on the local network interface"; + type = types.str; + }; + lan_ip_prefix_length = mkOption { + description = "Prefix length associated with lan_ip"; + type = types.int; + }; + ipv6 = mkOption { + description = "Public IPv6 address of this node"; + type = types.str; + }; + ipv6_prefix_length = mkOption { + description = "Prefix length associated with ipv6 ip"; + type = types.int; + }; + + wesher_cluster_prefix = mkOption { + description = "IP address prefix for the Wesher overlay network"; + type = types.str; + }; + wesher_cluster_prefix_length = mkOption { + description = "IP address prefix length for the Wesher overlay network"; + type = types.int; + default = 16; + }; + + cluster_ip = mkOption { + description = "IP address of this node on the Wesher mesh network"; + type = types.str; + }; + is_raft_server = mkOption { + description = "Make this node a RAFT server for the Nomad and Consul deployments"; + type = types.bool; + default = false; + }; + + + # Parameters that generally vary between sites + lan_default_gateway = mkOption { + description = "IP address of the default route on the locak network interface"; + type = types.str; + }; + site_name = mkOption { + description = "Site (availability zone) on which this node is deployed"; + type = types.str; + }; + + # Parameters common to all nodes + cluster_name = mkOption { + description = "Name of this Deuxfleurs deployment"; + type = types.str; + }; + admin_accounts = mkOption { + description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys"; + type = types.attrsOf (types.listOf types.str); + }; + }; + + config = { + # Configure admin accounts on all nodes + users.users = builtins.mapAttrs (name: publicKeys: { + isNormalUser = true; + extraGroups = [ "wheel" ]; + openssh.authorizedKeys.keys = publicKeys; + }) cfg.admin_accounts; + + # Configure network interfaces + networking.interfaces = attrsets.setAttrByPath [ cfg.network_interface ] { + useDHCP = false; + ipv4.addresses = [ + { + address = cfg.lan_ip; + prefixLength = cfg.lan_ip_prefix_length; + } + ]; + ipv6.addresses = [ + { + address = cfg.ipv6; + prefixLength = cfg.ipv6_prefix_length; + } + ]; + }; + networking.defaultGateway = { + address = cfg.lan_default_gateway; + interface = cfg.network_interface; + }; + + # wesher overlay network + services.wesher = { + enable = true; + bindAddr = cfg.ipv6; + overlayNet = "${cfg.wesher_cluster_prefix}/${toString cfg.wesher_cluster_prefix_length}"; + interface = "wg0"; + logLevel = "debug"; + }; + + # Configure /etc/hosts to link all hostnames to their Wireguard IP + #networking.extraHosts = builtins.concatStringsSep "\n" (map + # ({ hostname, IP, ...}: "${IP} ${hostname}") + # (cfg.cluster_nodes ++ cfg.admin_nodes)); + + # Enable Hashicorp Consul & Nomad + services.consul.enable = true; + services.consul.extraConfig = + (if cfg.is_raft_server + then { + server = true; + bootstrap_expect = 3; + } + else {}) // + { + datacenter = cfg.cluster_name; + node_meta = { + "site" = cfg.site_name; + }; + ui = true; + bind_addr = "${cfg.cluster_ip}"; + + ports.http = -1; + addresses.https = "0.0.0.0"; + ports.https = 8501; + + ca_file = "/var/lib/consul/pki/consul-ca.crt"; + cert_file = "/var/lib/consul/pki/consul2022.crt"; + key_file = "/var/lib/consul/pki/consul2022.key"; + verify_incoming = true; + verify_outgoing = true; + verify_server_hostname = true; + }; + + services.nomad.enable = true; + services.nomad.package = pkgs.nomad_1_1; + services.nomad.settings = + (if cfg.is_raft_server + then { server = { + enabled = true; + bootstrap_expect = 3; + }; } + else {}) // + { + region = cfg.cluster_name; + datacenter = cfg.site_name; + advertise = { + rpc = "${cfg.cluster_ip}"; + http = "${cfg.cluster_ip}"; + serf = "${cfg.cluster_ip}"; + }; + consul = { + address = "localhost:8501"; + ca_file = "/var/lib/nomad/pki/consul2022.crt"; + cert_file = "/var/lib/nomad/pki/consul2022-client.crt"; + key_file = "/var/lib/nomad/pki/consul2022-client.key"; + ssl = true; + }; + client = { + enabled = true; + network_interface = "wg0"; + meta = { + "site" = cfg.site_name; + }; + }; + tls = { + http = true; + rpc = true; + ca_file = "/var/lib/nomad/pki/nomad-ca.crt"; + cert_file = "/var/lib/nomad/pki/nomad2022.crt"; + key_file = "/var/lib/nomad/pki/nomad2022.key"; + verify_server_hostname = true; + verify_https_client = true; + }; + plugin = [ + { + docker = [ + { + config = [ + { + volumes.enabled = true; + allow_privileged = true; + } + ]; + } + ]; + } + ]; + }; + + # ---- Firewall config ---- + + # Open ports in the firewall. + networking.firewall = { + enable = true; + + # Allow anyone to connect on SSH port + allowedTCPPorts = [ + (builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports) + ]; + + # Allow specific hosts access to specific things in the cluster + extraCommands = '' + # Allow everything from router (usefull for UPnP/IGD) + iptables -A INPUT -s 192.168.1.254 -j ACCEPT + + # Allow docker containers to access all ports + iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT + + # Allow other nodes on VPN to access all ports + iptables -A INPUT -s ${cfg.wesher_cluster_prefix}/${toString cfg.wesher_cluster_prefix_length} -j ACCEPT + ''; + + # When stopping firewall, delete all rules that were configured manually above + extraStopCommands = '' + iptables -D INPUT -s 192.168.1.254 -j ACCEPT + iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT + iptables -D INPUT -s ${cfg.wesher_cluster_prefix}/${toString cfg.wesher_cluster_prefix_length} -j ACCEPT + ''; + }; + }; +} diff --git a/os/modules/remote-unlock.nix b/os/modules/remote-unlock.nix new file mode 100644 index 0000000..2975a94 --- /dev/null +++ b/os/modules/remote-unlock.nix @@ -0,0 +1,26 @@ +{ config, pkgs, ... }: + + with builtins; + with pkgs.lib; +{ + config = { + boot.initrd.availableKernelModules = [ "pps_core" "ptp" "e1000e" ]; + boot.initrd.network.enable = true; + boot.initrd.network.ssh = { + enable = true; + port = 222; + authorizedKeys = concatLists (mapAttrsToList (name: user: user) config.deuxfleurs.admin_accounts); + hostKeys = [ "/var/lib/deuxfleurs/remote-unlock/ssh_host_ed25519_key" ]; + }; + boot.initrd.network.postCommands = '' + ip addr add ${config.deuxfleurs.lan_ip}/${toString config.deuxfleurs.lan_ip_prefix_length} dev ${config.deuxfleurs.network_interface} + ip link set dev ${config.deuxfleurs.network_interface} up + ip route add default via ${config.deuxfleurs.lan_default_gateway} dev ${config.deuxfleurs.network_interface} + ip a + ip route + ping -c 4 ${config.deuxfleurs.lan_default_gateway} + echo 'echo run cryptsetup-askpass to unlock drives' >> /root/.profile + ''; + }; +} + diff --git a/os/modules/wesher_service.nix b/os/modules/wesher_service.nix new file mode 100644 index 0000000..d269a2f --- /dev/null +++ b/os/modules/wesher_service.nix @@ -0,0 +1,137 @@ +{ config, lib, pkgs, ... }: +with lib; +let + keysPath = "/var/lib/wesher/secrets"; + cfg = config.services.wesher; +in { + options = with types; { + services.wesher = { + enable = mkEnableOption "wesher wireguard overlay mesh network manager"; + + package = mkOption { + type = package; + default = pkgs.wesher; + defaultText = literalExpression "pkgs.wesher"; + description = "Wesher package to use."; + }; + + clusterKey = mkOption { + type = nullOr str; + default = null; + description = "shared key for cluster membership to use on first initialization, if no key was previously used by Wesher. Must be 32 bytes base64 encoded; will be generated if not provided. Setting this parameter value will not overwrite an existing cluster key; to do so please delete ${keysPath}"; + }; + + bindAddr = mkOption { + type = nullOr str; + default = null; + description = "IP address to bind to for cluster membership (cannot be used with --bind-iface)"; + }; + + bindIface = mkOption { + type = nullOr str; + default = null; + description = "Interface to bind to for cluster membership (cannot be used with --bind-addr)"; + }; + + join = mkOption { + type = listOf str; + default = []; + description = "list of hostnames or IP addresses to existing cluster members; if not provided, will attempt resuming any known state or otherwise wait for further members"; + }; + + clusterPort = mkOption { + type = port; + default = 7946; + description = "port used for membership gossip traffic (both TCP and UDP); must be the same accross cluster"; + }; + + wireguardPort = mkOption { + type = port; + default = 51820; + description = "port used for wireguard traffic (UDP); must be the same accross cluster"; + }; + + overlayNet = mkOption { + type = str; + default = "10.0.0.0/8"; + description = "the network in which to allocate addresses for the overlay mesh network (CIDR format); smaller networks increase the chance of IP collision"; + }; + + interface = mkOption { + type = str; + default = "wgoverlay"; + description = "name of the wireguard interface to create and manage"; + }; + + logLevel = mkOption { + type = str; + default = "warn"; + description = "set the verbosity (one of debug/info/warn/error)"; + }; + + }; + }; + + config = mkIf cfg.enable (let binWesher = cfg.package + "/bin/wesher"; + in { + system.activationScripts.wesher = if (cfg.clusterKey != null) then '' + if [ ! -e ${keysPath} ] + then + mkdir --mode=700 -p ${builtins.dirOf keysPath} + echo "WESHER_CLUSTER_KEY=${cfg.clusterKey}" > ${keysPath} + fi + '' else '' + if [ ! -e ${keysPath} ] + then + mkdir --mode=700 -p ${builtins.dirOf keysPath} + echo "WESHER_CLUSTER_KEY=$(head -c 32 /dev/urandom | base64)" > ${keysPath} + fi + ''; + + systemd.services.wesher = { + description = "wesher wireguard overlay mesh network manager"; + bindsTo = [ "network-online.target" ]; + after = [ "network-online.target" ]; + wantedBy = [ "multi-user.target" ]; + + environment = { + WESHER_JOIN = builtins.concatStringsSep "," cfg.join; + WESHER_CLUSTER_PORT = builtins.toString cfg.clusterPort; + WESHER_WIREGUARD_PORT = builtins.toString cfg.wireguardPort; + WESHER_OVERLAY_NET = cfg.overlayNet; + WESHER_INTERFACE = cfg.interface; + WESHER_LOG_LEVEL = cfg.logLevel; + WESHER_NO_ETC_HOSTS = "true"; + } + // (if (cfg.bindAddr != null) then { WESHER_BIND_ADDR = cfg.bindAddr; } else {}) + // (if (cfg.bindIface != null) then { WESHER_BIND_IFACE = cfg.bindIface; } else {}) + ; + + serviceConfig = { + ExecStart = "${binWesher}"; + Restart = "always"; + + EnvironmentFile = keysPath; + + User = "wesher"; + DynamicUser = true; + StateDirectory = "wesher"; + + AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE"; + CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE"; + MemoryDenyWriteExecute = true; + ProtectControlGroups = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK"; + RestrictNamespaces = true; + RestrictRealtime = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @resources"; + }; + }; + + networking.firewall.allowedUDPPorts = mkIf cfg.enable [ cfg.clusterPort cfg.wireguardPort ]; + networking.firewall.allowedTCPPorts = mkIf cfg.enable [ cfg.clusterPort ]; + }); +} -- cgit v1.2.3