{ config, pkgs, ... }:
let
cfg = config.deuxfleurs;
in
with builtins;
with pkgs.lib;
{
options.deuxfleurs =
let wg_node = with types; submodule {
options = {
hostname = mkOption {
type = str;
description = "Host name";
};
site_name = mkOption {
type = nullOr str;
description = "Site where the node is located";
default = null;
};
IP = mkOption {
type = str;
description = "IP Address in the Wireguard network";
};
publicKey = mkOption {
type = str;
description = "Public key";
};
endpoint = mkOption {
type = nullOr str;
default = null;
description = "Wireguard endpoint on the public Internet";
};
};
};
in
{
# Parameters for individual nodes
ipv6 = mkOption {
description = "Static public IPv6 address of this node";
type = types.str;
};
staticIPv4.address = mkOption {
description = "IP address (with prefix length) of this node on the local network interface";
type = types.nullOr types.str;
default = null;
};
cluster_ip = mkOption {
description = "IP address of this node on the Wesher mesh network";
type = types.str;
};
is_raft_server = mkOption {
description = "Make this node a RAFT server for the Nomad and Consul deployments";
type = types.bool;
default = false;
};
# Parameters that generally vary between sites
site_name = mkOption {
description = "Site (availability zone) on which this node is deployed";
type = types.str;
};
staticIPv4.defaultGateway = mkOption {
description = "IPv4 address of the default route on the local network interface";
type = types.nullOr types.str;
default = null;
};
public_ipv4 = mkOption {
description = "Public IPv4 through which this node is accessible (possibly after port opening using DiploNAT), for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
cname_target = mkOption {
description = "DNS CNAME target to use for services hosted in this site, for domain names that are updated by D53";
type = types.nullOr types.str;
default = null;
};
# Parameters common to all nodes
cluster_name = mkOption {
description = "Name of this Deuxfleurs deployment";
type = types.str;
};
cluster_prefix = mkOption {
description = "IP address prefix for the Wireguard overlay network";
type = types.str;
};
cluster_prefix_length = mkOption {
description = "IP address prefix length for the Wireguard overlay network";
type = types.int;
default = 16;
};
cluster_nodes = mkOption {
description = "Nodes that are part of the cluster";
type = types.listOf wg_node;
};
admin_accounts = mkOption {
description = "List of users having an admin account on cluster nodes, maps user names to a list of authorized SSH keys";
type = types.attrsOf (types.listOf types.str);
};
bootstrap = mkOption {
description = "Whether to enable bootstrapping for Nomad and Consul";
type = types.bool;
default = false;
};
# Options that generally stay to their default value
wireguardPort = mkOption {
description = "Port for incoming Wireguard VPN connections";
type = types.port;
default = 33799;
};
};
imports = [
./wgautomesh.nix
];
config =
let node_meta = {
"site" = cfg.site_name;
"public_ipv6" = cfg.ipv6;
} //
(if cfg.public_ipv4 != null
then { "public_ipv4" = cfg.public_ipv4; }
else {}) //
(if cfg.cname_target != null
then { "cname_target" = cfg.cname_target; }
else {});
in
{
# Configure admin accounts on all nodes
users.users = builtins.mapAttrs (name: publicKeys: {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keys = publicKeys;
}) cfg.admin_accounts;
# Configure network interfaces
networking.useDHCP = false;
networking.useNetworkd = true;
systemd.network.networks = {
"10-uplink" = {
matchConfig = {
Name = "en* eth*";
};
ipv6AcceptRAConfig = {
Token = "static:${cfg.ipv6}";
UseDNS = false;
};
} // (if cfg.staticIPv4.address == null || cfg.staticIPv4.defaultGateway == null then {
networkConfig = {
DHCP = "ipv4";
};
dhcpV4Config = {
UseDNS = false;
};
} else {
address = [
"${cfg.staticIPv4.address}"
];
routes = [
{
routeConfig = {
Gateway = cfg.staticIPv4.defaultGateway;
# GatewayOnLink - Takes a boolean. If set to true, the kernel does not have to check if the gateway is reachable directly by the current machine (i.e., attached to the local network), so that we can insert the route in the kernel table without it being complained about. Defaults to "no".
GatewayOnLink = true;
};
}
];
});
};
# Configure Unbound as a central DNS server for everything
# - is its own recursor (applies DNSSec) for everything,
# no need to declare an outside nameserver
# - redirects to Consul queries under .consul
services.unbound = {
enable = true;
settings = {
server = {
interface = [ "127.0.0.1" "172.17.0.1" ];
domain-insecure = [ "consul." ];
local-zone = [ "consul. nodefault" ];
log-servfail = true;
access-control = [
"127.0.0.0/8 allow"
"172.17.0.0/16 allow"
];
};
stub-zone = [
# Forward .consul queries to Consul daemon
{
name = "consul.";
stub-addr = "${cfg.cluster_ip}@8600";
stub-no-cache = true;
stub-tcp-upstream = false;
stub-tls-upstream = false;
}
];
};
resolveLocalQueries = true;
};
services.resolved.enable = false;
# Configure Wireguard VPN between all nodes
networking.wireguard.interfaces.wg0 = {
ips = [ "${cfg.cluster_ip}/16" ];
listenPort = cfg.wireguardPort;
privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
mtu = 1420;
};
services.wgautomesh = {
enable = true;
interface = "wg0";
gossipPort = 1666;
gossipSecretFile = "/var/lib/wgautomesh/gossip_secret";
persistFile = "/var/lib/wgautomesh/state";
upnpForwardPublicPort =
let
us = filter ({ hostname, ...}: hostname == config.networking.hostName) cfg.cluster_nodes;
in
if length us > 0 && (head us).endpoint != null then
strings.toInt (lists.last (split ":" (head us).endpoint))
else null;
peers = map ({ publicKey, endpoint, IP, ... }: {
address = IP;
pubkey = publicKey;
endpoint = endpoint;
}) cfg.cluster_nodes;
};
# Old code for wg-quick, we can use this as a fallback if we fail to make wgautomesh work
# systemd.services."wg-quick-wg0".after = [ "unbound.service" ];
# networking.wg-quick.interfaces.wg0 = {
# address = [ "${cfg.cluster_ip}/16" ];
# listenPort = cfg.wireguardPort;
# privateKeyFile = "/var/lib/deuxfleurs/wireguard-keys/private";
# mtu = 1420;
# peers = map ({ publicKey, endpoint, IP, ... }: {
# inherit publicKey endpoint;
# allowedIPs = [ "${IP}/32" ];
# persistentKeepalive = 25;
# };
system.activationScripts.generate_df_wg_key = ''
if [ ! -f /var/lib/deuxfleurs/wireguard-keys/private ]; then
mkdir -p /var/lib/deuxfleurs/wireguard-keys
(umask 077; ${pkgs.wireguard-tools}/bin/wg genkey > /var/lib/deuxfleurs/wireguard-keys/private)
echo "New Wireguard key was generated."
echo "This node's Wireguard public key is: $(${pkgs.wireguard-tools}/bin/wg pubkey < /var/lib/deuxfleurs/wireguard-keys/private)"
fi
'';
# Configure /etc/hosts to link all hostnames to their Wireguard IP
networking.extraHosts = builtins.concatStringsSep "\n" (map
({ hostname, IP, ...}: "${IP} ${hostname}")
cfg.cluster_nodes);
# Enable Hashicorp Consul & Nomad
services.consul.enable = true;
systemd.services.consul.after = [ "wg-quick-wg0.service" ];
services.consul.extraConfig =
(if cfg.is_raft_server
then { server = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {})
else {}) //
{
inherit node_meta;
datacenter = cfg.cluster_name;
ui_config = {
enabled = true;
};
bind_addr = "${cfg.cluster_ip}";
addresses = {
https = "0.0.0.0";
dns = "0.0.0.0";
};
ports = {
http = -1;
https = 8501;
};
performance = {
rpc_hold_timeout = "70s";
};
ca_file = "/var/lib/consul/pki/consul-ca.crt";
cert_file = "/var/lib/consul/pki/consul.crt";
key_file = "/var/lib/consul/pki/consul.key";
verify_incoming = true;
verify_outgoing = true;
verify_server_hostname = true;
};
services.nomad.enable = true;
systemd.services.nomad.after = [ "wg-quick-wg0.service" ];
services.nomad.package = pkgs.nomad_1_4;
services.nomad.extraPackages = [
pkgs.glibc
pkgs.zstd
];
services.nomad.settings =
(if cfg.is_raft_server
then {
server = { enabled = true; }
// (if cfg.bootstrap then { bootstrap_expect = 3; } else {});
} else {}) //
{
region = cfg.cluster_name;
datacenter = cfg.site_name;
advertise = {
rpc = "${cfg.cluster_ip}";
http = "${cfg.cluster_ip}";
serf = "${cfg.cluster_ip}";
};
consul = {
address = "localhost:8501";
ca_file = "/var/lib/nomad/pki/consul.crt";
cert_file = "/var/lib/nomad/pki/consul-client.crt";
key_file = "/var/lib/nomad/pki/consul-client.key";
ssl = true;
checks_use_advertise = true;
};
client = {
enabled = true;
network_interface = "wg0";
meta = node_meta;
};
telemetry = {
publish_allocation_metrics = true;
publish_node_metrics = true;
prometheus_metrics = true;
};
tls = {
http = true;
rpc = true;
ca_file = "/var/lib/nomad/pki/nomad-ca.crt";
cert_file = "/var/lib/nomad/pki/nomad.crt";
key_file = "/var/lib/nomad/pki/nomad.key";
verify_server_hostname = true;
verify_https_client = true;
};
plugin = [
{
docker = [
{
config = [
{
volumes.enabled = true;
allow_privileged = true;
}
];
}
];
}
];
};
# ---- Firewall config ----
# Open ports in the firewall.
networking.firewall = {
enable = true;
allowedTCPPorts = [
# Allow anyone to connect on SSH port
(builtins.head ({ openssh.ports = [22]; } // config.services).openssh.ports)
];
allowedUDPPorts = [
# Allow peers to connect to Wireguard
cfg.wireguardPort
];
# Allow specific hosts access to specific things in the cluster
extraCommands = ''
# Allow UDP packets comming from port 1900 from a local address,
# these are necessary for UPnP/IGD
iptables -A INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
# Allow docker containers to access all ports
iptables -A INPUT -s 172.17.0.0/16 -j ACCEPT
# Allow other nodes on VPN to access all ports
iptables -A INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
'';
# When stopping firewall, delete all rules that were configured manually above
extraStopCommands = ''
iptables -D INPUT -s 192.168.0.0/16 -p udp --sport 1900 -j ACCEPT
iptables -D INPUT -s 172.17.0.0/16 -j ACCEPT
iptables -D INPUT -s ${cfg.cluster_prefix}/${toString cfg.cluster_prefix_length} -j ACCEPT
'';
};
};
}