Remove old infra

This commit is contained in:
Paul-Henri Froidmont 2024-12-24 03:10:32 +01:00
parent 20c62089d7
commit a8de96fb43
Signed by: phfroidmont
GPG key ID: BE948AFD7E7873BE
12 changed files with 0 additions and 643 deletions

View file

@ -1,8 +1,6 @@
keys:
- &admin 3AC6F170F01133CE393BCD94BE948AFD7E7873BE
- &elios 0C143D8AFF5FBCD2293897658E66EDB0546158DF
- &server ebdabf42731801d79db14c893639d8f0c7ff61ed
- &storage1 7675e1c632a9a0644c6ab828dbcc48a5300773a8
- &hel1 0f0c4c2f9877cb8a53efadacb90613a2af502673
creation_rules:
- path_regex: secrets.enc.yml$
@ -10,6 +8,4 @@ creation_rules:
- pgp:
- *admin
- *elios
- *server
- *storage1
- *hel1

View file

@ -53,66 +53,6 @@
};
nixosConfigurations = {
db1 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
inherit nixpkgs inputs;
};
modules = [
sops-nix.nixosModules.sops
foundryvtt.nixosModules.foundryvtt
./profiles/db.nix
{
sops.defaultSopsFile = ./secrets.enc.yml;
networking.hostName = "db1";
networking.domain = "banditlair.com";
nix.registry.nixpkgs.flake = nixpkgs;
system.stateVersion = "21.05";
}
];
};
backend1 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
inherit nixpkgs inputs;
};
modules = [
defaultModuleArgs
sops-nix.nixosModules.sops
foundryvtt.nixosModules.foundryvtt
./profiles/backend.nix
{
sops.defaultSopsFile = ./secrets.enc.yml;
networking.hostName = "backend1";
networking.domain = "banditlair.com";
nix.registry.nixpkgs.flake = nixpkgs;
system.stateVersion = "21.05";
}
];
};
storage1 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
inherit nixpkgs inputs;
};
modules = [
defaultModuleArgs
sops-nix.nixosModules.sops
simple-nixos-mailserver.nixosModule
foundryvtt.nixosModules.foundryvtt
./profiles/storage.nix
{
sops.defaultSopsFile = ./secrets.enc.yml;
networking.hostName = "storage1";
networking.domain = "banditlair.com";
nix.registry.nixpkgs.flake = nixpkgs;
system.stateVersion = "21.05";
}
];
};
hel1 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
@ -147,18 +87,6 @@
};
in
{
db1 = {
hostname = "db1.banditlair.com";
profiles.system = createSystemProfile self.nixosConfigurations.db1;
};
backend1 = {
hostname = "backend1.banditlair.com";
profiles.system = createSystemProfile self.nixosConfigurations.backend1;
};
storage1 = {
hostname = "78.46.96.243";
profiles.system = createSystemProfile self.nixosConfigurations.storage1;
};
hel1 = {
hostname = "37.27.138.62";
profiles.system = createSystemProfile self.nixosConfigurations.hel1;

View file

@ -1,33 +0,0 @@
{ modulesPath, config, pkgs, ... }: {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.loader.grub.device = "/dev/sda";
fileSystems."/" = {
device = "/dev/sda1";
fsType = "ext4";
};
time.timeZone = "Europe/Amsterdam";
boot.tmp.cleanOnBoot = true;
networking.firewall.allowPing = true;
networking.usePredictableInterfaceNames = false;
networking.useDHCP = false;
networking.dhcpcd.enable = false;
systemd.network = {
enable = true;
networks."10-wan" = {
matchConfig.Name = "eth0";
networkConfig.DHCP = "ipv4";
# make routing on this interface a dependency for network-online.target
linkConfig.RequiredForOnline = "routable";
};
networks."20-lan" = {
matchConfig.Name = "eth1";
networkConfig.DHCP = "ipv4";
linkConfig.RequiredForOnline = "routable";
};
};
}

View file

@ -1,74 +0,0 @@
{ modulesPath, config, lib, pkgs, ... }:
{
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
boot.initrd.availableKernelModules = [ "ahci" "sd_mod" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.swraid.enable = true;
boot.swraid.mdadmConf = ''
HOMEHOST <ignore>
PROGRAM true
'';
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.loader.systemd-boot.enable = false;
boot.loader.grub = {
enable = true;
efiSupport = false;
devices = [ "/dev/sda" "/dev/sdb" "/dev/sdc" "/dev/sdd" ];
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/e5c27021-ce34-4680-ba6f-233070cb944f";
fsType = "ext4";
};
swapDevices = [ ];
time.timeZone = "Europe/Amsterdam";
nix.settings.max-jobs = lib.mkDefault 8;
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
networking = {
useDHCP = false;
defaultGateway = "78.46.96.225";
defaultGateway6 = {
address = "fe80::1";
interface = "enp2s0";
};
nameservers = [ "213.133.100.100" "213.133.99.99" "213.133.98.98" ];
interfaces = {
enp2s0 = {
ipv4.addresses = [{
address = "78.46.96.243";
prefixLength = 24;
}];
ipv6.addresses = [{
address = "2a01:4f8:120:8233::1";
prefixLength = 64;
}];
};
vlan4001 = {
mtu = 1400;
ipv4 = {
addresses = [{
address = "10.0.2.3";
prefixLength = 24;
}];
routes = [{
address = "10.0.0.0";
prefixLength = 16;
via = "10.0.2.1";
}];
};
};
};
vlans.vlan4001 = {
id = 4001;
interface = "enp2s0";
};
};
}

View file

@ -1,28 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
xsFNBAAAAAABEADADktn4dr3tJuDBGF/DFn8bFWhSTdRjtguMV9zhgSug3OVyZpD
Vpz+Isud+fACIDRlOPlgw31hwbZX+qiwrO82636oiToawC1W8RNO1XWXsRv77MUT
aszON9t0LYQoGbp8Vg/5Lv3opuLigWVWf7V9GreBX68Q5QakDe2luliFxl53rQVm
vEXGXHl6JFPPRmEPeq2Axcz5kjcyz5wa7yYlmc9V2gNQ4UvUxhVWe7q9OnmogEbh
DAVdouWDiwJDBuGFcaenmP5p8f9gS/K9nzVBSxY7Ng8X0/VINuvAhn6Nw54rXYsa
J6w5XAoAvdCtt8k+smiDtaUId86ClARI3VzWgLgvSeHDheZwwOCzafAyfqymagXU
o/yIDDvpVImk5BScALFj2cBf4DIT6mftjBNCAxzE50Ze0oFHtjeKXrEu2Wq8MiCo
M6FHSuFKJs1N0oED7pPp3qosXykAU3fk1P1cGt1QVsG0vuvyClJczDOBG2OBE5+b
R0uGy2XQehdaAjG+dP2UprRlICVS0nQ3jBk6KcpIhBGYAidiSPdZrn4K0CJmmgde
iAWjvbarD2QTY9wTMWG4SDqVvy/59C57u+QOh9iEPsD8jOojnxPYJBKYHfC55xZk
lnaSmyfH0vVWPKk2sXCVDyh568g03tGqJ0hwyTDM8dFwSaweTNh0s7MnKQARAQAB
zSlyb290IChJbXBvcnRlZCBmcm9tIFNTSCkgPHJvb3RAbG9jYWxob3N0PsLBYgQT
AQgAFgUCAAAAAAkQNjnY8Mf/Ye0CGw8CGQEAAMZiEAC0x6gNgLVxUBQ4yXrO7fFi
EpIGWSX6AJhee53mZ0xLw+3RVBImCSwOKVPKWl0rTKQ9BpNsr2KEa5JHUGFSkr24
9isttUwunhVpTvRuPVsjOHD0LFUA53lVU8n3pqNuFPi3bt1wH89bn1Yjj6jhLahv
3/k6PhLpiIMp6lyE3KOik2eY9+KYbpZPqpKQ054Hrq+PAUug/A8KYY+/WU3i0ncl
1DWkRsuHNnV3JEXqMubxt9zPpyqhG+OtVwgf82+v18/DvcaU7SLEowBLXgf6hmF5
7zVMujr7NSqqR8v/5HuhNW7QNiQKKHSee3oCe2kdm9abPFGmIl613WH0lUncFOjN
iNYlGw0dNLJYb70zfOf0LSd0RAIkmzIF+UfFhpqv7PHYlJmld+P3AbpjdXGrB2rQ
a0tdchiP9qgyo9f4rXFybCykt9Bu/qnh3lHepm+6/lm7uA9RPiWN3U0qSaCAu0eR
JFfBviz7Kqvz3cxiamP7j6li/dyd9ONrfp7dJsf/yxypv7stKUQWhpMTytU+230j
+zR+7Gv0f/0sDPK46x/NF8BLW8dkTaGfrApOQahjGt1sy1fTwpCFqvIce716yjo8
K72JF5q6WeJWtdC35ZAbJ43GuM94CITOXYt3QJFIsnPf8jSJcJ0axxm9zoMCDra0
jDbjKwt16i8sc2nsdL/T+A==
=wX/N
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,28 +0,0 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
xsFNBAAAAAABEACUt404dF2T1wYrbIyeCBAudXx/2TBCjaO/PO1fZJbX+Dcb2fqi
kU+w1j1DcMtF2Q/4k3oeEFGbJr/foxfgsC74dJ7QKVNSXR4uFH5Br1RpmhMvqwbB
bQOFjrWQHF3f5s2y3Dkc4cPtv3+BBUGdqIEZK+7PXBieJnzHIagz7uRb5hX1zje8
Q+tbyqrpDYitWLyLLlWGQ38Ur9yxGrspyHPFhPZkIigqWJjfmxYYl4sOGjoOm18m
ohu8CijRP6R7qsjf2oOOvCl1RZCDmnPlx5ukIyaVPQswacGKWAMYFv9GHler/1sI
V2Kkbbm0tVpdaeKmKgZQGsbX4WDkrEtyZD193RkpaqAOT8ebL5ZMn+rlXoPFfG/B
3qciVj6gVd4S5EbjvMUviilbuD3rZPzP7hA+QVyMI01eJYGwaYUCGSRRyC9cHR7R
8RQW9q6SE+JQCoWR1/YnwDeD3TmBVZUWsGtca8LfiKgGWn1/8I7OrRW/xLvW2c/X
b9g2h41zv0WmQEd1qI4OYti7Iq0pNYNXFUz4a4ePggQtlgLnv+AJ7poXDoyPMRGn
npRsnTiKSuxlzwHiPkqeDvTyho3c91AE3Id0RyiQDdwjUlsMNxYKaWBc3wN+ZIJa
SmpucCped1+UuqSWA0kLOwM46UKkiIkN7+C7fdudZXhIPIXOWSS7L0ef6wARAQAB
zSlyb290IChJbXBvcnRlZCBmcm9tIFNTSCkgPHJvb3RAbG9jYWxob3N0PsLBYgQT
AQgAFgUCAAAAAAkQ28xIpTAHc6gCGw8CGQEAAO0TEAA/SUCWVWIlm2DNfu1yJcFT
950dY916f+GDl0+em3JiPx0Fd0Rv5bC3GMJGx9FiJ5Q6mxDADrcTpUikFPHvpMCn
KXj3W1Ou49EPF5OzrxsRjY1ODP/tlStEDa/e8myg8RBEhbLbR7k1h5xlg4S9wGaX
0xT1GrJWX7cXIVqFHMp+EkF/Oyz8PKF1hi5l+x6d2iTS9xOza/QYgpUpo48FzizH
W4y+75XK3MsJ++qhSDiMIc6UFngotK87vbBb1SacMxa/lwu2nf060d1AZ4nSg+NN
Mt0/mO78k7tZVWMlNPvojvwdKnJXdXjfFvJe/yhLF2AXc1D6N4TIMCnUWlSoFL3j
yZPptxkCdy7nEvGkiLq0Ek9TNzD5cSmovxpB85SYdUBo+HqRX0sC6CR6iI455p+e
QRyWPs5PE8aBQY6PQQLg5Fp311rya8Zy6Zp8uYouvaH6cka16zBpRurhJ61oKUdB
VBG9eA43rPJ49o+QQFYvVC63wU4rQ2gSjhxkgDCq2porJ1Hl5R/9BfjEv3MYaxjO
HIhhVhQz4ZQ5oawF1SfyLpkAPFzhn09IRNy5xeW7TAl9W6UNcEY5CVjdGXnkrqjW
ZHLBBBrKpmAO0eCRHvAn9u6L4d/0mspw0v0CEMTultgXmOABCv5aaeLeNS/HDAL9
hilGDJyHAQjo/6xEVVzCKQ==
=Vlbm
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,25 +0,0 @@
{ config, ... }:
{
imports = [
../environment.nix
../hardware/hcloud.nix
../modules
];
users.users.root.openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQKmE04ZeXN65PTt5cc0YAgBeFukwhP39Ccq9ZxlCkovUMcm9q1Gqgb1tw0hfHCUYK9D6In/qLgNQ6h0Etnesi9HUncl6GC0EE89kNOANZVLuPir0V9Rm7zo55UUUM/qlZe1L7b19oO4qT5tIUlM1w4LfduZuyaag2RDpJxh4xBontftZnCS6O2OI4++/6OKLkn4qtsepxPWb9M6lY/sb6w75LqyUXyjxxArrQMHpE4RQHTCEJiK9t+z5xpfI4WfTnIRQaCw6LxZhE9Kh/pOSVbLU6c5VdBHfCOPk6xrB3TbuUvMpR0cRtn5q0nJQHGhL0A709UXR1fnPm7Xs4GTIf2LWXch6mcrjkTocz8qmKDuMxQzY76QXy6A+rvghhOxnrZTEhLKExZxNqag72MIeippPFNbyOJgke3htHy74b9WjM1vZJ9VRYnmhxpGz0af//GF6LZQy7gOxBasSOv5u5r//1Ow7FNf2K5xYPGYzWRIDx+abMa+JwOyPHdZ9bR+jmB5R9VohFECFLgjm+O5Ed1LJgRX/6vYlB+8gZeeflbZpYYsSY/EcpsUKgtOmIBJT1svdjVTDdplihdFUzWfjL+n2O30K7yniNz6dGbXhxfqOVlp9R6ZsEdbGTX0IGpG+0ZgkUkLrgROAH1xiOYNhpXuD3l6rNXLw4HP3Mqjp3Fw== root@hel1"
];
custom = {
services.nginx.enable = true;
services.openssh.enable = true;
services.monitoring-exporters.enable = true;
};
networking.firewall.interfaces."eth1".allowedTCPPorts = [
config.services.prometheus.exporters.node.port
9000
];
}

View file

@ -1,27 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
imports = [
../environment.nix
../hardware/hcloud.nix
../modules
];
networking.firewall.interfaces."eth1".allowedTCPPorts = [
config.services.prometheus.exporters.node.port
];
users.users.root.openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQKmE04ZeXN65PTt5cc0YAgBeFukwhP39Ccq9ZxlCkovUMcm9q1Gqgb1tw0hfHCUYK9D6In/qLgNQ6h0Etnesi9HUncl6GC0EE89kNOANZVLuPir0V9Rm7zo55UUUM/qlZe1L7b19oO4qT5tIUlM1w4LfduZuyaag2RDpJxh4xBontftZnCS6O2OI4++/6OKLkn4qtsepxPWb9M6lY/sb6w75LqyUXyjxxArrQMHpE4RQHTCEJiK9t+z5xpfI4WfTnIRQaCw6LxZhE9Kh/pOSVbLU6c5VdBHfCOPk6xrB3TbuUvMpR0cRtn5q0nJQHGhL0A709UXR1fnPm7Xs4GTIf2LWXch6mcrjkTocz8qmKDuMxQzY76QXy6A+rvghhOxnrZTEhLKExZxNqag72MIeippPFNbyOJgke3htHy74b9WjM1vZJ9VRYnmhxpGz0af//GF6LZQy7gOxBasSOv5u5r//1Ow7FNf2K5xYPGYzWRIDx+abMa+JwOyPHdZ9bR+jmB5R9VohFECFLgjm+O5Ed1LJgRX/6vYlB+8gZeeflbZpYYsSY/EcpsUKgtOmIBJT1svdjVTDdplihdFUzWfjL+n2O30K7yniNz6dGbXhxfqOVlp9R6ZsEdbGTX0IGpG+0ZgkUkLrgROAH1xiOYNhpXuD3l6rNXLw4HP3Mqjp3Fw== root@hel1"
];
custom = {
services.openssh.enable = true;
services.monitoring-exporters.enable = true;
};
}

View file

@ -1,22 +0,0 @@
{
config,
pkgs,
pkgs-unstable,
...
}:
{
imports = [
../environment.nix
../hardware/hetzner-dedicated-storage1.nix
../modules
];
custom = {
services.openssh.enable = true;
};
users.users.root.openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQKmE04ZeXN65PTt5cc0YAgBeFukwhP39Ccq9ZxlCkovUMcm9q1Gqgb1tw0hfHCUYK9D6In/qLgNQ6h0Etnesi9HUncl6GC0EE89kNOANZVLuPir0V9Rm7zo55UUUM/qlZe1L7b19oO4qT5tIUlM1w4LfduZuyaag2RDpJxh4xBontftZnCS6O2OI4++/6OKLkn4qtsepxPWb9M6lY/sb6w75LqyUXyjxxArrQMHpE4RQHTCEJiK9t+z5xpfI4WfTnIRQaCw6LxZhE9Kh/pOSVbLU6c5VdBHfCOPk6xrB3TbuUvMpR0cRtn5q0nJQHGhL0A709UXR1fnPm7Xs4GTIf2LWXch6mcrjkTocz8qmKDuMxQzY76QXy6A+rvghhOxnrZTEhLKExZxNqag72MIeippPFNbyOJgke3htHy74b9WjM1vZJ9VRYnmhxpGz0af//GF6LZQy7gOxBasSOv5u5r//1Ow7FNf2K5xYPGYzWRIDx+abMa+JwOyPHdZ9bR+jmB5R9VohFECFLgjm+O5Ed1LJgRX/6vYlB+8gZeeflbZpYYsSY/EcpsUKgtOmIBJT1svdjVTDdplihdFUzWfjL+n2O30K7yniNz6dGbXhxfqOVlp9R6ZsEdbGTX0IGpG+0ZgkUkLrgROAH1xiOYNhpXuD3l6rNXLw4HP3Mqjp3Fw== root@hel1"
];
}

View file

@ -1,303 +0,0 @@
#!/usr/bin/env bash
# Installs NixOS on a Hetzner server, wiping the server.
#
# This is for a specific server configuration; adjust where needed.
#
# Prerequisites:
# * Update the script wherever FIXME is present
#
# Usage:
# ssh root@YOUR_SERVERS_IP bash -s < hetzner-dedicated-wipe-and-install-nixos.sh
#
# When the script is done, make sure to boot the server from HD, not rescue mode again.
# Explanations:
#
# * Adapted from https://gist.github.com/nh2/78d1c65e33806e7728622dbe748c2b6a
# * Following largely https://nixos.org/nixos/manual/index.html#sec-installing-from-other-distro.
# * **Important:** We boot in legacy-BIOS mode, not UEFI, because that's what Hetzner uses.
# * NVMe devices aren't supported for booting (those require EFI boot)
# * We set a custom `configuration.nix` so that we can connect to the machine afterwards,
# inspired by https://nixos.wiki/wiki/Install_NixOS_on_Hetzner_Online
# * This server has 2 HDDs.
# We put everything on RAID1.
# Storage scheme: `partitions -> RAID -> LVM -> ext4`.
# * A root user with empty password is created, so that you can just login
# as root and press enter when using the Hetzner spider KVM.
# Of course that empty-password login isn't exposed to the Internet.
# Change the password afterwards to avoid anyone with physical access
# being able to login without any authentication.
# * The script reboots at the end.
set -eu
set -o pipefail
set -x
# Inspect existing disks
lsblk
# Undo existing setups to allow running the script multiple times to iterate on it.
# We allow these operations to fail for the case the script runs the first time.
set +e
umount /mnt
vgchange -an
set -e
# Stop all mdadm arrays that the boot may have activated.
mdadm --stop --scan
# Prevent mdadm from auto-assembling arrays.
# Otherwise, as soon as we create the partition tables below, it will try to
# re-assemple a previous RAID if any remaining RAID signatures are present,
# before we even get the chance to wipe them.
# From:
# https://unix.stackexchange.com/questions/166688/prevent-debian-from-auto-assembling-raid-at-boot/504035#504035
# We use `>` because the file may already contain some detected RAID arrays,
# which would take precedence over our `<ignore>`.
echo 'AUTO -all
ARRAY <ignore> UUID=00000000:00000000:00000000:00000000' > /etc/mdadm/mdadm.conf
# Create partition tables (--script to not ask)
parted --script /dev/sda mklabel gpt
parted --script /dev/sdb mklabel gpt
parted --script /dev/sdc mklabel gpt
parted --script /dev/sdd mklabel gpt
# Create partitions (--script to not ask)
#
# We create the 1MB BIOS boot partition at the front.
#
# Note we use "MB" instead of "MiB" because otherwise `--align optimal` has no effect;
# as per documentation https://www.gnu.org/software/parted/manual/html_node/unit.html#unit:
# > Note that as of parted-2.4, when you specify start and/or end values using IEC
# > binary units like "MiB", "GiB", "TiB", etc., parted treats those values as exact
#
# Note: When using `mkpart` on GPT, as per
# https://www.gnu.org/software/parted/manual/html_node/mkpart.html#mkpart
# the first argument to `mkpart` is not a `part-type`, but the GPT partition name:
# ... part-type is one of 'primary', 'extended' or 'logical', and may be specified only with 'msdos' or 'dvh' partition tables.
# A name must be specified for a 'gpt' partition table.
# GPT partition names are limited to 36 UTF-16 chars, see https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries_(LBA_2-33).
parted --script --align optimal /dev/sda -- mklabel gpt mkpart 'BIOS-boot-partition' 1MB 2MB set 1 bios_grub on mkpart 'data-partition' 2MB '100%'
parted --script --align optimal /dev/sdb -- mklabel gpt mkpart 'BIOS-boot-partition' 1MB 2MB set 1 bios_grub on mkpart 'data-partition' 2MB '100%'
parted --script --align optimal /dev/sdc -- mklabel gpt mkpart 'BIOS-boot-partition' 1MB 2MB set 1 bios_grub on mkpart 'data-partition' 2MB '100%'
parted --script --align optimal /dev/sdd -- mklabel gpt mkpart 'BIOS-boot-partition' 1MB 2MB set 1 bios_grub on mkpart 'data-partition' 2MB '100%'
# Relaod partitions
partprobe
# Wait for all devices to exist
udevadm settle --timeout=5 --exit-if-exists=/dev/sda1
udevadm settle --timeout=5 --exit-if-exists=/dev/sda2
udevadm settle --timeout=5 --exit-if-exists=/dev/sdb1
udevadm settle --timeout=5 --exit-if-exists=/dev/sdb2
udevadm settle --timeout=5 --exit-if-exists=/dev/sdc1
udevadm settle --timeout=5 --exit-if-exists=/dev/sdc2
udevadm settle --timeout=5 --exit-if-exists=/dev/sdd1
udevadm settle --timeout=5 --exit-if-exists=/dev/sdd2
# Wipe any previous RAID signatures
mdadm --zero-superblock --force /dev/sda2
mdadm --zero-superblock --force /dev/sdb2
mdadm --zero-superblock --force /dev/sdc2
mdadm --zero-superblock --force /dev/sdd2
# Create RAIDs
# Note that during creating and boot-time assembly, mdadm cares about the
# host name, and the existence and contents of `mdadm.conf`!
# This also affects the names appearing in /dev/md/ being different
# before and after reboot in general (but we take extra care here
# to pass explicit names, and set HOMEHOST for the rebooting system further
# down, so that the names appear the same).
# Almost all details of this are explained in
# https://bugzilla.redhat.com/show_bug.cgi?id=606481#c14
# and the followup comments by Doug Ledford.
mdadm --create --run --verbose /dev/md0 --level=5 --raid-devices=4 --homehost=hetzner --name=root0 /dev/sda2 /dev/sdb2 /dev/sdc2 /dev/sdd2
# Assembling the RAID can result in auto-activation of previously-existing LVM
# groups, preventing the RAID block device wiping below with
# `Device or resource busy`. So disable all VGs first.
vgchange -an
# Wipe filesystem signatures that might be on the RAID from some
# possibly existing older use of the disks (RAID creation does not do that).
# See https://serverfault.com/questions/911370/why-does-mdadm-zero-superblock-preserve-file-system-information
wipefs -a /dev/md0
# Disable RAID recovery. We don't want this to slow down machine provisioning
# in the rescue mode. It can run in normal operation after reboot.
echo 0 > /proc/sys/dev/raid/speed_limit_max
# LVM
# PVs
pvcreate /dev/md0
# VGs
vgcreate vg0 /dev/md0
# LVs (--yes to automatically wipe detected file system signatures)
lvcreate --yes --extents 95%FREE -n root0 vg0 # 5% slack space
# Filesystems (-F to not ask on preexisting FS)
mkfs.ext4 -F -L root /dev/mapper/vg0-root0
# Creating file systems changes their UUIDs.
# Trigger udev so that the entries in /dev/disk/by-uuid get refreshed.
# `nixos-generate-config` depends on those being up-to-date.
# See https://github.com/NixOS/nixpkgs/issues/62444
udevadm trigger
# Wait for FS labels to appear
udevadm settle --timeout=5 --exit-if-exists=/dev/disk/by-label/root
# NixOS pre-installation mounts
# Mount target root partition
mount /dev/disk/by-label/root /mnt
# Installing nix
# Installing nix requires `sudo`; the Hetzner rescue mode doesn't have it.
apt-get install -y sudo
# Allow installing nix as root, see
# https://github.com/NixOS/nix/issues/936#issuecomment-475795730
mkdir -p /etc/nix
echo "build-users-group =" > /etc/nix/nix.conf
curl -L https://nixos.org/nix/install | sh
set +u +x # sourcing this may refer to unset variables that we have no control over
. $HOME/.nix-profile/etc/profile.d/nix.sh
set -u -x
# FIXME Keep in sync with `system.stateVersion` set below!
nix-channel --add https://nixos.org/channels/nixos-21.05 nixpkgs
nix-channel --update
# Getting NixOS installation tools
nix-env -iE "_: with import <nixpkgs/nixos> { configuration = {}; }; with config.system.build; [ nixos-generate-config nixos-install nixos-enter manual.manpages ]"
nixos-generate-config --root /mnt
# Find the name of the network interface that connects us to the Internet.
# Inspired by https://unix.stackexchange.com/questions/14961/how-to-find-out-which-interface-am-i-using-for-connecting-to-the-internet/302613#302613
RESCUE_INTERFACE=$(ip route get 8.8.8.8 | grep -Po '(?<=dev )(\S+)')
# Find what its name will be under NixOS, which uses stable interface names.
# See https://major.io/2015/08/21/understanding-systemds-predictable-network-device-names/#comment-545626
# NICs for most Hetzner servers are not onboard, which is why we use
# `ID_NET_NAME_PATH`otherwise it would be `ID_NET_NAME_ONBOARD`.
INTERFACE_DEVICE_PATH=$(udevadm info -e | grep -Po "(?<=^P: )(.*${RESCUE_INTERFACE})")
UDEVADM_PROPERTIES_FOR_INTERFACE=$(udevadm info --query=property "--path=$INTERFACE_DEVICE_PATH")
NIXOS_INTERFACE=$(echo "$UDEVADM_PROPERTIES_FOR_INTERFACE" | grep -o -E 'ID_NET_NAME_PATH=\w+' | cut -d= -f2)
echo "Determined NIXOS_INTERFACE as '$NIXOS_INTERFACE'"
IP_V4=$(ip route get 8.8.8.8 | grep -Po '(?<=src )(\S+)')
echo "Determined IP_V4 as $IP_V4"
# Determine Internet IPv6 by checking route, and using ::1
# (because Hetzner rescue mode uses ::2 by default).
# The `ip -6 route get` output on Hetzner looks like:
# # ip -6 route get 2001:4860:4860:0:0:0:0:8888
# 2001:4860:4860::8888 via fe80::1 dev eth0 src 2a01:4f8:151:62aa::2 metric 1024 pref medium
IP_V6="$(ip route get 2001:4860:4860:0:0:0:0:8888 | head -1 | cut -d' ' -f7 | cut -d: -f1-4)::1"
echo "Determined IP_V6 as $IP_V6"
# From https://stackoverflow.com/questions/1204629/how-do-i-get-the-default-gateway-in-linux-given-the-destination/15973156#15973156
read _ _ DEFAULT_GATEWAY _ < <(ip route list match 0/0); echo "$DEFAULT_GATEWAY"
echo "Determined DEFAULT_GATEWAY as $DEFAULT_GATEWAY"
# Generate `configuration.nix`. Note that we splice in shell variables.
cat > /mnt/etc/nixos/configuration.nix <<EOF
{ config, pkgs, ... }:
{
imports =
[ # Include the results of the hardware scan.
./hardware-configuration.nix
];
# Use GRUB2 as the boot loader.
# We don't use systemd-boot because Hetzner uses BIOS legacy boot.
boot.loader.systemd-boot.enable = false;
boot.loader.grub = {
enable = true;
efiSupport = false;
devices = [ "/dev/sda" "/dev/sdb" "/dev/sdc" "/dev/sdd" ];
};
networking.hostName = "hetzner";
# The mdadm RAID1s were created with 'mdadm --create ... --homehost=hetzner',
# but the hostname for each machine may be different, and mdadm's HOMEHOST
# setting defaults to '<system>' (using the system hostname).
# This results mdadm considering such disks as "foreign" as opposed to
# "local", and showing them as e.g. '/dev/md/hetzner:root0'
# instead of '/dev/md/root0'.
# This is mdadm's protection against accidentally putting a RAID disk
# into the wrong machine and corrupting data by accidental sync, see
# https://bugzilla.redhat.com/show_bug.cgi?id=606481#c14 and onward.
# We do not worry about plugging disks into the wrong machine because
# we will never exchange disks between machines, so we tell mdadm to
# ignore the homehost entirely.
environment.etc."mdadm.conf".text = ''
HOMEHOST <ignore>
'';
# The RAIDs are assembled in stage1, so we need to make the config
# available there.
boot.initrd.mdadmConf = config.environment.etc."mdadm.conf".text;
# Network (Hetzner uses static IP assignments, and we don't use DHCP here)
networking.useDHCP = false;
networking.interfaces."$NIXOS_INTERFACE".ipv4.addresses = [
{
address = "$IP_V4";
# FIXME: The prefix length is commonly, but not always, 24.
# You should check what the prefix length is for your server
# by inspecting the netmask in the "IPs" tab of the Hetzner UI.
# For example, a netmask of 255.255.255.0 means prefix length 24
# (24 leading 1s), and 255.255.255.192 means prefix length 26
# (26 leading 1s).
prefixLength = 24;
}
];
networking.interfaces."$NIXOS_INTERFACE".ipv6.addresses = [
{
address = "$IP_V6";
prefixLength = 64;
}
];
networking.defaultGateway = "$DEFAULT_GATEWAY";
networking.defaultGateway6 = { address = "fe80::1"; interface = "$NIXOS_INTERFACE"; };
networking.nameservers = [ "8.8.8.8" ];
# Initial empty root password for easy login:
users.users.root.initialHashedPassword = "";
services.openssh.permitRootLogin = "prohibit-password";
users.users.root.openssh.authorizedKeys.keys = [
# FIXME Replace this by your SSH pubkey!
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDMPhCld0dsDzpdkMvPRdiwd6IX8HF8Mb2V6uQzBl8/syeny8FbZxlZR8gk39RGxNYcLaZ+nA50DS6mOIplXCGdtozfw0Vm+FdITN3apMufWIdobG7Igs1vxKBBbkAb5lwxkEFUCUMzPdCLFHd5zabVH0WE42Be8+hYPLd5W/ikPCOgxRaGwryHHroxRMdkD3PcNE8upSEMdGl51pzgXhO6Fcig8UokOYHxV92SiQ0KEsCbc+oe8e9Gkr7g78tz+6YcTYLY2p2ygR7Vrh/WyTaUVnrNNqL8NIqp+Lc2kVtnqGXHFBJ0Wggaly+AeKWygy+dnOMEGSirhQ6/dUcB/Phz phfroidmont@archdesktop-2017-07-31"
];
services.openssh.enable = true;
# FIXME
# This value determines the NixOS release with which your system is to be
# compatible, in order to avoid breaking some software such as database
# servers. You should change this only after NixOS release notes say you
# should.
system.stateVersion = "21.05"; # Did you read the comment?
}
EOF
# Install NixOS
NIX_CONFIG="experimental-features = nix-command flakes" PATH="$PATH" `which nixos-install` --no-root-passwd --root /mnt --max-jobs 40
# NIX_PATH="$NIX_PATH"
umount /mnt
reboot

View file

@ -8,11 +8,6 @@ terraform {
username = "phfroidmont"
}
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = ">=1.24.1"
}
hetznerdns = {
source = "timohirt/hetznerdns"
version = ">= 2.2.0"
@ -30,20 +25,7 @@ data "sops_file" "secrets" {
}
provider "hcloud" {
token = data.sops_file.secrets.data["hcloud.token"]
}
provider "hetznerdns" {
apitoken = data.sops_file.secrets.data["hcloud.dns_token"]
}
resource "hcloud_ssh_key" "froidmpa-desktop" {
name = "froidmpa-desktop"
public_key = file("../ssh_keys/froidmpa-desktop.pub")
}
resource "hcloud_ssh_key" "froidmpa-laptop" {
name = "froidmpa-laptop"
public_key = file("../ssh_keys/froidmpa-laptop.pub")
}

View file

@ -1,6 +1,5 @@
locals {
dmarc_value = "\"v=DMARC1; p=none; rua=mailto:failed-dmarc@banditlair.com; ruf=mailto:dmarc@banditlair.com\""
storage1_ip = "78.46.96.243"
hel1_ip = "37.27.138.62"
hel1_ipv6 = "2a01:4f9:3100:1202::2"
}
@ -45,14 +44,6 @@ resource "hetznerdns_record" "mail_aaaa" {
ttl = 600
}
resource "hetznerdns_record" "storage1_a" {
zone_id = data.hetznerdns_zone.banditlair_zone.id
name = "storage1"
value = local.storage1_ip
type = "A"
ttl = 600
}
resource "hetznerdns_record" "hel1_a" {
zone_id = data.hetznerdns_zone.banditlair_zone.id
name = "hel1"