From bcbcc8b17b582e7fcc7bafe0f97408c047c2b0be Mon Sep 17 00:00:00 2001 From: plasmagoat Date: Mon, 28 Jul 2025 02:05:13 +0200 Subject: [PATCH 1/2] homelab framework module init (everything is a mess) --- .forgejo/workflows/colmena-apply.yml | 5 - colmena.nix | 59 +++ flake.lock | 6 +- flake.nix | 56 ++- hive.nix | 37 -- hosts/default.nix | 106 ++++ hosts/photos/default.nix | 28 ++ hosts/photos/ente.nix | 73 +++ hosts/photos/minio.nix | 35 ++ hosts/sandbox/default.nix | 57 +++ infrastructure/nixos-cloud-init/README.md | 5 +- infrastructure/proxmox/main.tf | 10 +- infrastructure/proxmox/providers.tf | 6 +- infrastructure/proxmox/terraform.tfvars | 3 + infrastructure/proxmox/versions.tf | 2 +- machines/_default/configuration.nix | 10 +- machines/auth/authelia.nix | 306 ++++++------ machines/auth/bootstrap/service-accounts.nix | 11 + machines/auth/configuration.nix | 1 + machines/auth/postgres.nix | 1 + machines/mail/configuration.nix | 1 + machines/mail/mailserver.nix | 17 +- machines/mail/roundcube.nix | 22 + machines/modules/README.md | 11 + machines/modules/pgbackrest.nix | 43 ++ machines/photos/configuration.nix | 1 + machines/photos/ente.nix | 60 ++- machines/photos/minio.nix | 31 +- machines/sandbox/configuration.nix | 15 +- modules/homelab/backup-config.nix | 116 +++++ modules/homelab/backup/restic.nix | 105 ++++ modules/homelab/default.nix | 133 +++++ modules/homelab/lib/node-aggregation.nix | 226 +++++++++ modules/homelab/lib/service-interface.nix | 295 +++++++++++ modules/homelab/monitoring-config.nix | 214 ++++++++ modules/homelab/motd/default.nix | 397 +++++++++++++++ modules/homelab/proxy-config.nix | 53 ++ modules/homelab/services/default.nix | 7 + modules/homelab/services/example-service.nix | 161 ++++++ modules/homelab/services/jellyfin.nix | 125 +++++ modules/homelab/services/minio.nix | 66 +++ .../services/monitoring/alertmanager.nix | 237 +++++++++ .../services/monitoring/alertmanager_new.nix | 326 ++++++++++++ .../homelab/services/monitoring/example.nix | 148 ++++++ modules/homelab/services/monitoring/gatus.nix | 244 +++++++++ .../homelab/services/monitoring/grafana.nix | 416 ++++++++++++++++ .../services/monitoring/grafana_new.nix | 369 ++++++++++++++ .../homelab/services/monitoring/influxdb.nix | 0 modules/homelab/services/monitoring/loki.nix | 0 .../services/monitoring/monitoring-stack.nix | 60 +++ .../services/monitoring/prometheus.nix | 203 ++++++++ .../homelab/services/monitoring/promtail.nix | 0 modules/homelab/services/monitoring/tempo.nix | 0 modules/homelab/services/postgres.nix | 0 modules/homelab/services/prometheus_old.nix | 208 ++++++++ modules/lib/helpers.nix | 126 +++++ modules/nixos/backup-manager.nix | 187 +++++++ modules/nixos/default.nix | 5 + modules/nixos/ente.nix | 47 +- modules/nixos/global-config.nix | 462 ++++++++++++++++++ modules/nixos/motd/default.nix | 304 ++++++++++++ modules/nixos/services/default.nix | 4 + modules/nixos/services/forgejo-runner.nix | 0 modules/nixos/services/forgejo.nix | 1 + modules/nixos/services/grafana.nix | 72 +++ modules/nixos/services/jellyfin.nix | 125 +++++ modules/nixos/services/postgres.nix | 0 modules/nixos/services/prometheus.nix | 208 ++++++++ modules/nixos/system/backups/backrest.nix | 4 + .../nixos/system/backups/backups-option.nix | 95 ++++ modules/nixos/system/backups/default.nix | 6 + modules/nixos/system/backups/restic.nix | 234 +++++++++ modules/nixos/system/backups/root.nix | 66 +++ nixos/README.md | 2 +- nixos/hosts/forgejo/README.md | 17 - nixos/hosts/forgejo/forgejo.nix | 14 +- nixos/hosts/forgejo/sops.nix | 7 +- .../traefik/configuration/infra/routers.nix | 13 +- .../traefik/configuration/infra/services.nix | 3 +- .../traefik/configuration/photos/routers.nix | 48 ++ .../traefik/configuration/photos/services.nix | 8 + nixos/hosts/vpn/README.md | 127 ----- nixos/hosts/warpgate/warpgate.nix | 14 - nixos/secrets/forgejo/secrets.yml | 5 +- pkgs/ente-web.nix | 8 +- profiles/proxmox-vm.nix | 43 ++ scripts/config.nix | 18 + scripts/deploy-homelab.sh | 115 +++++ scripts/generate-docs.sh | 41 ++ scripts/validate-config.nix | 79 +++ secrets/.gitignore | 1 + secrets/default.nix | 42 ++ secrets/secrets.yaml | 16 +- users/plasmagoat.nix | 31 ++ 94 files changed, 7289 insertions(+), 436 deletions(-) create mode 100644 colmena.nix delete mode 100644 hive.nix create mode 100644 hosts/default.nix create mode 100644 hosts/photos/default.nix create mode 100644 hosts/photos/ente.nix create mode 100644 hosts/photos/minio.nix create mode 100644 hosts/sandbox/default.nix create mode 100644 machines/mail/roundcube.nix create mode 100644 machines/modules/README.md create mode 100644 machines/modules/pgbackrest.nix create mode 100644 modules/homelab/backup-config.nix create mode 100644 modules/homelab/backup/restic.nix create mode 100644 modules/homelab/default.nix create mode 100644 modules/homelab/lib/node-aggregation.nix create mode 100644 modules/homelab/lib/service-interface.nix create mode 100644 modules/homelab/monitoring-config.nix create mode 100644 modules/homelab/motd/default.nix create mode 100644 modules/homelab/proxy-config.nix create mode 100644 modules/homelab/services/default.nix create mode 100644 modules/homelab/services/example-service.nix create mode 100644 modules/homelab/services/jellyfin.nix create mode 100644 modules/homelab/services/minio.nix create mode 100644 modules/homelab/services/monitoring/alertmanager.nix create mode 100644 modules/homelab/services/monitoring/alertmanager_new.nix create mode 100644 modules/homelab/services/monitoring/example.nix create mode 100644 modules/homelab/services/monitoring/gatus.nix create mode 100644 modules/homelab/services/monitoring/grafana.nix create mode 100644 modules/homelab/services/monitoring/grafana_new.nix rename nixos/hosts/warpgate/host.nix => modules/homelab/services/monitoring/influxdb.nix (100%) create mode 100644 modules/homelab/services/monitoring/loki.nix create mode 100644 modules/homelab/services/monitoring/monitoring-stack.nix create mode 100644 modules/homelab/services/monitoring/prometheus.nix create mode 100644 modules/homelab/services/monitoring/promtail.nix create mode 100644 modules/homelab/services/monitoring/tempo.nix create mode 100644 modules/homelab/services/postgres.nix create mode 100644 modules/homelab/services/prometheus_old.nix create mode 100644 modules/lib/helpers.nix create mode 100644 modules/nixos/backup-manager.nix create mode 100644 modules/nixos/global-config.nix create mode 100644 modules/nixos/motd/default.nix create mode 100644 modules/nixos/services/default.nix create mode 100644 modules/nixos/services/forgejo-runner.nix create mode 100644 modules/nixos/services/forgejo.nix create mode 100644 modules/nixos/services/grafana.nix create mode 100644 modules/nixos/services/jellyfin.nix create mode 100644 modules/nixos/services/postgres.nix create mode 100644 modules/nixos/services/prometheus.nix create mode 100644 modules/nixos/system/backups/backrest.nix create mode 100644 modules/nixos/system/backups/backups-option.nix create mode 100644 modules/nixos/system/backups/default.nix create mode 100644 modules/nixos/system/backups/restic.nix create mode 100644 modules/nixos/system/backups/root.nix delete mode 100644 nixos/hosts/forgejo/README.md delete mode 100644 nixos/hosts/vpn/README.md delete mode 100644 nixos/hosts/warpgate/warpgate.nix create mode 100644 profiles/proxmox-vm.nix create mode 100644 scripts/config.nix create mode 100755 scripts/deploy-homelab.sh create mode 100755 scripts/generate-docs.sh create mode 100644 scripts/validate-config.nix create mode 100644 secrets/.gitignore create mode 100644 secrets/default.nix create mode 100644 users/plasmagoat.nix diff --git a/.forgejo/workflows/colmena-apply.yml b/.forgejo/workflows/colmena-apply.yml index 6882556..dbfd93d 100644 --- a/.forgejo/workflows/colmena-apply.yml +++ b/.forgejo/workflows/colmena-apply.yml @@ -41,11 +41,6 @@ jobs: ssh-keyscan -H "$NIXOS_BUILER_HOST" >> ~/.ssh/known_hosts chmod 600 ~/.ssh/known_hosts - - name: Test SSH connection to NixOS Builder - run: | - echo "Testing SSH connection to $NIXOS_BUILER_HOST..." - ssh -o StrictHostKeyChecking=yes "$NIXOS_BUILER_USER"@"$NIXOS_BUILER_HOST" "echo 'SSH success. Hostname:' && hostname" - - name: Apply Colmena id: apply run: colmena apply diff --git a/colmena.nix b/colmena.nix new file mode 100644 index 0000000..bb1d099 --- /dev/null +++ b/colmena.nix @@ -0,0 +1,59 @@ +# colmena.nix - Separate file to keep flake.nix clean +{ + inputs, + outputs, +}: let + inherit (inputs.nixpkgs) lib; + + # Helper to create a host configuration + mkHost = { + hostname, + profile ? "proxmox-vm", + modules ? [], + specialArgs ? {}, + }: { + imports = + [ + # Base profile (determines hardware/platform specifics) + (./. + "/profiles/${profile}.nix") + # Host-specific configuration + (./. + "/hosts/${hostname}") + # Additional modules + ] + ++ modules; + + # Pass through special args and our outputs + _module.args = + specialArgs + // { + inherit inputs outputs; + }; + }; +in { + meta = { + nixpkgs = import inputs.nixpkgs { + system = "x86_64-linux"; + overlays = [ + outputs.overlays.additions + outputs.overlays.modifications + outputs.overlays.unstable-packages + inputs.colmena.overlays.default + ]; + }; + + specialArgs = {inherit inputs outputs;}; + }; + + defaults = import ./hosts/default.nix; + + # Define your hosts + sandbox = mkHost { + hostname = "sandbox"; + profile = "proxmox-vm"; + }; + + photos = mkHost { + hostname = "photos"; + profile = "proxmox-vm"; + }; +} diff --git a/flake.lock b/flake.lock index 39e8693..61e80ac 100644 --- a/flake.lock +++ b/flake.lock @@ -156,11 +156,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1750134718, - "narHash": "sha256-v263g4GbxXv87hMXMCpjkIxd/viIF7p3JpJrwgKdNiI=", + "lastModified": 1753429684, + "narHash": "sha256-9h7+4/53cSfQ/uA3pSvCaBepmZaz/dLlLVJnbQ+SJjk=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9e83b64f727c88a7711a2c463a7b16eedb69a84c", + "rev": "7fd36ee82c0275fb545775cc5e4d30542899511d", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 8918622..9e6905f 100644 --- a/flake.nix +++ b/flake.nix @@ -27,18 +27,18 @@ # systems, sops-nix, # home-manager, - colmena, simple-nixos-mailserver, ... } @ inputs: let inherit (self) outputs; + lib = nixpkgs.lib; # Supported systems for your flake packages, shell, etc. systems = [ "x86_64-linux" ]; # This is a function that generates an attribute by calling a function you # pass to it, with each system as an argument - forAllSystems = nixpkgs.lib.genAttrs systems; + forAllSystems = lib.genAttrs systems; in { # Custom packages # Accessible through 'nix build', 'nix shell', etc @@ -54,33 +54,35 @@ # Reusable nixos modules nixosModules = import ./modules/nixos; - colmenaHive = colmena.lib.makeHive self.outputs.colmena; - colmena = { - meta = { - nixpkgs = import nixpkgs { - system = "x86_64-linux"; - overlays = [ - outputs.overlays.additions - outputs.overlays.modifications - outputs.overlays.unstable-packages + colmenaHive = inputs.colmena.lib.makeHive self.outputs.colmena; + colmena = import ./colmena.nix {inherit inputs outputs;}; - colmena.overlays.default + # Development shells + devShells = forAllSystems ( + system: let + inherit (inputs.colmena.packages."${pkgs.system}") colmena; + pkgs = nixpkgs.legacyPackages.${system}; + in { + default = pkgs.mkShell { + packages = with pkgs; [ + # colmena + sops + age + nix-output-monitor + jq + ssh-to-age # For converting SSH keys to age keys ]; - config.allowUnfree = true; + + shellHook = '' + echo "🏠 Homelab Development Environment" + echo "Available commands:" + echo " colmena apply - Deploy all hosts" + echo " colmena apply --on HOST - Deploy specific host" + echo " sops secrets/secrets.yaml - Edit secrets" + echo "" + ''; }; - - specialArgs = { - inherit inputs outputs; - }; - }; - - defaults = import ./machines/_default/configuration.nix; - - sandbox = import ./machines/sandbox/configuration.nix; - auth = import ./machines/auth/configuration.nix; - mail = import ./machines/mail/configuration.nix; - monitor = import ./machines/monitor/configuration.nix; - photos = import ./machines/photos/configuration.nix; - }; + } + ); }; } diff --git a/hive.nix b/hive.nix deleted file mode 100644 index e0fc390..0000000 --- a/hive.nix +++ /dev/null @@ -1,37 +0,0 @@ -inputs @ { - self, - nixpkgs, - sops-nix, - simple-nixos-mailserver, - # home-manager, - outputs, - ... -}: { - sandbox = {name, ...}: { - imports = [./machines/${name}/definition.nix]; - deployment.tags = ["sandbox"]; - }; - - monitor = {name, ...}: { - imports = [./machines/${name}/definition.nix]; - deployment.tags = ["grafana" "prometheus"]; - }; - - auth = {name, ...}: { - imports = [./machines/${name}/definition.nix]; - deployment.tags = ["zitadel" "sso" "ldap"]; - }; - - mail = {name, ...}: { - imports = [ - ./machines/${name}/definition.nix - simple-nixos-mailserver.nixosModule - ]; - deployment.tags = ["mail"]; - }; - - photos = {name, ...}: { - imports = [./machines/${name}/definition.nix]; - deployment.tags = ["ente"]; - }; -} diff --git a/hosts/default.nix b/hosts/default.nix new file mode 100644 index 0000000..57da4f5 --- /dev/null +++ b/hosts/default.nix @@ -0,0 +1,106 @@ +{ + config, + lib, + pkgs, + inputs, + outputs, + ... +}: { + imports = [ + # Essential modules for all systems + inputs.sops-nix.nixosModules.sops + ../modules/homelab + # User configurations + ../users/plasmagoat.nix + + # Secrets management + ../secrets + ]; + + # Colmena deployment defaults + deployment = { + targetHost = lib.mkDefault "${config.homelab.hostname}.${config.homelab.domain}"; + tags = [config.nixpkgs.system config.networking.hostName]; + replaceUnknownProfiles = lib.mkDefault true; + buildOnTarget = lib.mkDefault false; + }; + + # Basic system configuration that applies to ALL systems + nix = { + settings = { + experimental-features = ["nix-command" "flakes"]; + auto-optimise-store = true; + allowed-users = ["@wheel"]; + trusted-users = ["root" "@wheel"]; + }; + + gc = { + automatic = true; + options = "--delete-older-than 15d"; + dates = "daily"; + }; + + optimise.automatic = true; + + extraOptions = '' + keep-outputs = true + keep-derivations = true + ''; + }; + + # Basic security + security.sudo.wheelNeedsPassword = false; + + # SSH configuration + services.openssh = { + enable = true; + openFirewall = true; + settings = { + PasswordAuthentication = false; + PermitRootLogin = "prohibit-password"; + KbdInteractiveAuthentication = false; + }; + }; + + services.sshguard.enable = true; + programs.ssh.startAgent = true; + + # Basic packages for all systems + environment.systemPackages = with pkgs; [ + dig + nmap + traceroute + vim + git + curl + python3 + htop + tree + ]; + + # Timezone and locale + time.timeZone = lib.mkDefault "Europe/Copenhagen"; + console.keyMap = lib.mkDefault "dk-latin1"; + i18n.defaultLocale = lib.mkDefault "en_US.UTF-8"; + + # System backup job (applies to all systems) + # homelab.global.backups.jobs = [ + # { + # name = "system-config"; + # backend = "restic"; + # paths = [ + # "/etc/nixos" + # "/etc/sops" + # "/var/lib/nixos" + # ]; + # schedule = "daily"; + # excludePatterns = [ + # "*/cache/*" + # "*/tmp/*" + # ]; + # } + # ]; + + # Default state version + system.stateVersion = lib.mkDefault "25.05"; +} diff --git a/hosts/photos/default.nix b/hosts/photos/default.nix new file mode 100644 index 0000000..d3b3038 --- /dev/null +++ b/hosts/photos/default.nix @@ -0,0 +1,28 @@ +{ + outputs, + name, + ... +}: let +in { + imports = [ + outputs.nixosModules.ente + ./ente.nix + # ./minio.nix + ]; + + homelab = { + enable = true; + hostname = name; + tags = [name]; + + monitoring.enable = true; + motd.enable = true; + services = { + minio.enable = true; + }; + }; + + deployment.tags = ["ente"]; + + system.stateVersion = "25.05"; +} diff --git a/hosts/photos/ente.nix b/hosts/photos/ente.nix new file mode 100644 index 0000000..e1e9a0c --- /dev/null +++ b/hosts/photos/ente.nix @@ -0,0 +1,73 @@ +{ + config, + pkgs, + ... +}: { + sops.secrets."ente/minio/root_password".owner = "ente"; + sops.secrets."ente/minio/root_user".owner = "ente"; + sops.secrets."service_accounts/ente/password".owner = "ente"; + + environment.systemPackages = with pkgs; [ + ente-cli + ]; + + services.ente.api = { + enable = true; + enableLocalDB = true; + + domain = "ente-museum.procopius.dk"; + settings = { + # apps = { + # accounts = "https://accounts.procopius.dk"; + # cast = "https://cast.procopius.dk"; + # public-albums = "https://albums.procopius.dk"; + # }; + + smtp = { + host = "mail.procopius.dk"; + port = "465"; + username = "ente@procopius.dk"; + password._secret = config.sops.secrets."service_accounts/ente/password".path; + # The email address from which to send the email. Set this to an email + # address whose credentials you're providing. + email = "ente@procopius.dk"; + # Optional override for the sender name in the emails. If specified, it will + # be used for all emails sent by the instance (default is email specific). + sender-name = "ente"; + }; + internal.admins = [ + 1580559962386438 + ]; + s3 = { + use_path_style_urls = true; + b2-eu-cen = { + endpoint = "https://ente-minio-api.procopius.dk"; + region = "us-east-1"; + bucket = "ente"; + key._secret = config.sops.secrets."ente/minio/root_user".path; + secret._secret = config.sops.secrets."ente/minio/root_password".path; + }; + }; + }; + }; + services.ente.web = { + enable = true; + domains = { + api = "ente-museum.procopius.dk"; + accounts = "ente-accounts.procopius.dk"; + albums = "ente-albums.procopius.dk"; + cast = "ente-cast.procopius.dk"; + photos = "ente-photos.procopius.dk"; + auth = "ente-auth.procopius.dk"; + }; + }; + + networking.firewall.allowedTCPPorts = [ + 3000 + 3001 + 3002 + 3003 + 3004 + 8080 + ]; +} diff --git a/hosts/photos/minio.nix b/hosts/photos/minio.nix new file mode 100644 index 0000000..1326b15 --- /dev/null +++ b/hosts/photos/minio.nix @@ -0,0 +1,35 @@ +{ + config, + pkgs, + lib, + ... +}: { + sops.secrets."ente/minio/root_user" = {}; + sops.secrets."ente/minio/root_password" = {}; + + sops.templates."minio-root-credentials".content = '' + MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} + MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} + ''; + + services.minio = { + enable = true; + rootCredentialsFile = config.sops.templates."minio-root-credentials".path; + }; + + systemd.services.minio = { + environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk"; + postStart = '' + # Wait until minio is up + ${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live" + + # Make sure bucket exists + mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente + ''; + }; + + networking.firewall.allowedTCPPorts = [ + 9000 + 9001 + ]; +} diff --git a/hosts/sandbox/default.nix b/hosts/sandbox/default.nix new file mode 100644 index 0000000..2782e30 --- /dev/null +++ b/hosts/sandbox/default.nix @@ -0,0 +1,57 @@ +{ + config, + name, + ... +}: { + sops.secrets."restic/default-password" = {}; + + homelab = { + enable = true; + hostname = name; + tags = [name]; + + monitoring.enable = true; + motd.enable = true; + + backups = { + enable = true; + backends = { + restic = { + enable = true; + repository = "/srv/restic-repo"; + passwordFile = config.sops.secrets."restic/default-password".path; + }; + }; + jobs = [ + { + name = "sandbox-home"; + backend = "restic"; + backendOptions = { + paths = ["/home/plasmagoat"]; + repository = "/srv/restic-repo"; + pruneOpts = [ + "--keep-daily 7" + "--keep-weekly 4" + "--keep-monthly 6" + "--keep-yearly 3" + ]; + }; + } + ]; + }; + + services.prometheus = { + enable = true; + }; + + services.gatus = { + enable = true; + ui = { + title = "Homelab Status Dashboard"; + header = "My Homelab Services"; + }; + }; + }; + + system.stateVersion = "25.05"; +} diff --git a/infrastructure/nixos-cloud-init/README.md b/infrastructure/nixos-cloud-init/README.md index f2b880b..f733ea9 100644 --- a/infrastructure/nixos-cloud-init/README.md +++ b/infrastructure/nixos-cloud-init/README.md @@ -5,7 +5,7 @@ nix run github:nix-community/nixos-generators -- -f proxmox -c configuration.nix ``` -## Update to proxmox +## Upload to proxmox ``` scp /nix/store/jvwxp7agny9979fglf76s0ca9m2h6950-proxmox-nixos-cloud-init/vzdump-qemu-nixos-cloud-init.vma.zst root@192.168.1.206:/var/lib/vz/dump ``` @@ -16,3 +16,6 @@ qmrestore /var/lib/vz/dump/vzdump-qemu-nixos-cloud-init.vma.zst 9000 --unique tr qm template 9000 ``` + +## Future +Maybe look into nixos-everywhere like done here https://github.com/solomon-b/nixos-config diff --git a/infrastructure/proxmox/main.tf b/infrastructure/proxmox/main.tf index ca08aca..2741f89 100644 --- a/infrastructure/proxmox/main.tf +++ b/infrastructure/proxmox/main.tf @@ -1,11 +1,11 @@ module "sandbox_vm" { source = "./modules/nixos-vm" - vmid = 123 - name = "sandbox" - target_node = var.pm_node - sshkeys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" - cipassword = "$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/" + vmid = 123 + name = "sandbox" + target_node = var.pm_node + sshkeys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" + cipassword = "$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/" # You can override any default variable here: # cpu_cores = 4 # memory = 2048 diff --git a/infrastructure/proxmox/providers.tf b/infrastructure/proxmox/providers.tf index 07ff31b..2d5b30c 100644 --- a/infrastructure/proxmox/providers.tf +++ b/infrastructure/proxmox/providers.tf @@ -1,6 +1,6 @@ provider "proxmox" { - pm_tls_insecure = true - pm_api_url = var.pm_api_url - pm_api_token_id = var.pm_api_token_id + pm_tls_insecure = true + pm_api_url = var.pm_api_url + pm_api_token_id = var.pm_api_token_id pm_api_token_secret = var.pm_api_token_secret } diff --git a/infrastructure/proxmox/terraform.tfvars b/infrastructure/proxmox/terraform.tfvars index 32006a4..2460b7a 100644 --- a/infrastructure/proxmox/terraform.tfvars +++ b/infrastructure/proxmox/terraform.tfvars @@ -1,3 +1,6 @@ +pm_api_url = "https://192.168.1.205:8006/api2/json" +pm_api_token_id = "root@pam!opentofu" +pm_api_token_secret = "7660e962-9240-44ea-b1dc-e5176caba450" pm_node = "proxmox-01" # nixos_template_id = 9100 diff --git a/infrastructure/proxmox/versions.tf b/infrastructure/proxmox/versions.tf index a2341f9..3330c3b 100644 --- a/infrastructure/proxmox/versions.tf +++ b/infrastructure/proxmox/versions.tf @@ -1,7 +1,7 @@ terraform { required_providers { proxmox = { - source = "Telmate/proxmox" + source = "Telmate/proxmox" version = "3.0.2-rc01" } } diff --git a/machines/_default/configuration.nix b/machines/_default/configuration.nix index 3d38005..6489a29 100644 --- a/machines/_default/configuration.nix +++ b/machines/_default/configuration.nix @@ -18,11 +18,17 @@ replaceUnknownProfiles = lib.mkDefault true; buildOnTarget = lib.mkDefault false; targetHost = lib.mkDefault "${name}.lab"; - tags = lib.mkDefault [config.nixpkgs.system name "homelab"]; + tags = [config.nixpkgs.system name "homelab"]; + keys = { + "age.key" = { + destDir = "/run/keys"; + keyFile = "/home/plasmagoat/.config/age/age.key"; + }; + }; }; sops = { - age.keyFile = "/etc/sops/age.key"; + age.keyFile = "/run/keys/age.key"; defaultSopsFile = ../../secrets/secrets.yaml; }; diff --git a/machines/auth/authelia.nix b/machines/auth/authelia.nix index dc57c96..01a5a04 100644 --- a/machines/auth/authelia.nix +++ b/machines/auth/authelia.nix @@ -9,166 +9,164 @@ in { 9091 ]; - services = { - authelia.instances.procopius = { - enable = true; - settings = { - theme = "auto"; - server = { - buffers = { - read = 16384; - write = 16384; - }; + services.authelia.instances.procopius = { + enable = true; + settings = { + theme = "auto"; + server = { + buffers = { + read = 16384; + write = 16384; }; - authentication_backend.ldap = { - implementation = "lldap"; - address = "ldap://localhost:3890"; - base_dn = "dc=procopius,dc=dk"; - user = "uid=authelia,ou=people,dc=procopius,dc=dk"; + }; + authentication_backend.ldap = { + implementation = "lldap"; + address = "ldap://localhost:3890"; + base_dn = "dc=procopius,dc=dk"; + user = "uid=authelia,ou=people,dc=procopius,dc=dk"; + }; + definitions = { + network = { + internal = [ + "192.168.1.0/24" + ]; }; - definitions = { - network = { - internal = [ - "192.168.1.0/24" + }; + access_control = { + default_policy = "deny"; + # We want this rule to be low priority so it doesn't override the others + rules = lib.mkAfter [ + { + domain = [ + "proxmox.procopius.dk" + "traefik.procopius.dk" + "prometheus.procopius.dk" + "alertmanager.procopius.dk" ]; - }; - }; - access_control = { - default_policy = "deny"; - # We want this rule to be low priority so it doesn't override the others - rules = lib.mkAfter [ - { - domain = [ - "proxmox.procopius.dk" - "traefik.procopius.dk" - "prometheus.procopius.dk" - "alertmanager.procopius.dk" - ]; - policy = "one_factor"; - subject = [ - ["group:server-admin"] - ]; - } - # bypass /api and /ping - { - domain = ["*.procopius.dk"]; - policy = "bypass"; - resources = [ - "^/api$" - "^/api/" - "^/ping$" - ]; - } - # media - { - domain = [ - "sonarr.procopius.dk" - "radarr.procopius.dk" - "readarr.procopius.dk" - "lidarr.procopius.dk" - "bazarr.procopius.dk" - "prowlarr.procopius.dk" - ]; - policy = "one_factor"; - subject = [ - ["group:media-admin"] - ]; - } - # authenticated - { - domain = [ - "gatus.procopius.dk" - ]; - policy = "one_factor"; - } - # bypass auth internally - # { - # domain = [ - # "gatus.procopius.dk" - # "prometheus.procopius.dk" - # "alertmanager.procopius.dk" - # "sonarr.procopius.dk" - # "radarr.procopius.dk" - # "readarr.procopius.dk" - # "lidarr.procopius.dk" - # "bazarr.procopius.dk" - # "prowlarr.procopius.dk" - # ]; - # policy = "bypass"; - # networks = [ - # "internal" - # ]; - # } - ]; - }; - storage.postgres = { - address = "unix:///run/postgresql"; - database = authelia; - username = authelia; - # I'm using peer authentication, so this doesn't actually matter, but Authelia - # complains if I don't have it. - # https://github.com/authelia/authelia/discussions/7646 - password = authelia; - }; - session = { - redis.host = "/var/run/redis-procopius/redis.sock"; - cookies = [ - { - domain = "procopius.dk"; - authelia_url = "https://authelia.procopius.dk"; - # The period of time the user can be inactive for before the session is destroyed - inactivity = "1M"; - # The period of time before the cookie expires and the session is destroyed - expiration = "3M"; - # The period of time before the cookie expires and the session is destroyed - # when the remember me box is checked - remember_me = "1y"; - } - ]; - }; - notifier.smtp = { - address = "smtp://mail.procopius.dk"; - username = "authelia@procopius.dk"; - sender = "authelia@procopius.dk"; - }; - log.level = "info"; - # identity_providers.oidc = { - # # https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter - # claims_policies = { - # # karakeep.id_token = ["email"]; - # }; - # cors = { - # endpoints = ["token"]; - # allowed_origins_from_client_redirect_uris = true; - # }; - # authorization_policies.default = { - # default_policy = "one_factor"; - # rules = [ - # { - # policy = "deny"; - # subject = "group:lldap_strict_readonly"; - # } - # ]; - # }; - # }; - # Necessary for Traefik integration - # See https://www.authelia.com/integration/proxies/traefik/#implementation - server.endpoints.authz.forward-auth.implementation = "ForwardAuth"; + policy = "one_factor"; + subject = [ + ["group:server-admin"] + ]; + } + # bypass /api and /ping + { + domain = ["*.procopius.dk"]; + policy = "bypass"; + resources = [ + "^/api$" + "^/api/" + "^/ping$" + ]; + } + # media + { + domain = [ + "sonarr.procopius.dk" + "radarr.procopius.dk" + "readarr.procopius.dk" + "lidarr.procopius.dk" + "bazarr.procopius.dk" + "prowlarr.procopius.dk" + ]; + policy = "one_factor"; + subject = [ + ["group:media-admin"] + ]; + } + # authenticated + { + domain = [ + "gatus.procopius.dk" + ]; + policy = "one_factor"; + } + # bypass auth internally + # { + # domain = [ + # "gatus.procopius.dk" + # "prometheus.procopius.dk" + # "alertmanager.procopius.dk" + # "sonarr.procopius.dk" + # "radarr.procopius.dk" + # "readarr.procopius.dk" + # "lidarr.procopius.dk" + # "bazarr.procopius.dk" + # "prowlarr.procopius.dk" + # ]; + # policy = "bypass"; + # networks = [ + # "internal" + # ]; + # } + ]; }; - # Templates don't work correctly when parsed from Nix, so our OIDC clients are defined here - # settingsFiles = [./oidc_clients.yaml]; - secrets = with config.sops; { - jwtSecretFile = secrets."authelia/jwt_secret".path; - # oidcIssuerPrivateKeyFile = secrets."authelia/jwks".path; - # oidcHmacSecretFile = secrets."authelia/hmac_secret".path; - sessionSecretFile = secrets."authelia/session_secret".path; - storageEncryptionKeyFile = secrets."authelia/storage_encryption_key".path; + storage.postgres = { + address = "unix:///run/postgresql"; + database = authelia; + username = authelia; + # I'm using peer authentication, so this doesn't actually matter, but Authelia + # complains if I don't have it. + # https://github.com/authelia/authelia/discussions/7646 + password = authelia; }; - environmentVariables = with config.sops; { - AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = - secrets."authelia/lldap_authelia_password".path; - AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path; + session = { + redis.host = "/var/run/redis-procopius/redis.sock"; + cookies = [ + { + domain = "procopius.dk"; + authelia_url = "https://authelia.procopius.dk"; + # The period of time the user can be inactive for before the session is destroyed + inactivity = "1M"; + # The period of time before the cookie expires and the session is destroyed + expiration = "3M"; + # The period of time before the cookie expires and the session is destroyed + # when the remember me box is checked + remember_me = "1y"; + } + ]; }; + notifier.smtp = { + address = "smtp://mail.procopius.dk"; + username = "authelia@procopius.dk"; + sender = "authelia@procopius.dk"; + }; + log.level = "info"; + # identity_providers.oidc = { + # # https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter + # claims_policies = { + # # karakeep.id_token = ["email"]; + # }; + # cors = { + # endpoints = ["token"]; + # allowed_origins_from_client_redirect_uris = true; + # }; + # authorization_policies.default = { + # default_policy = "one_factor"; + # rules = [ + # { + # policy = "deny"; + # subject = "group:lldap_strict_readonly"; + # } + # ]; + # }; + # }; + # Necessary for Traefik integration + # See https://www.authelia.com/integration/proxies/traefik/#implementation + server.endpoints.authz.forward-auth.implementation = "ForwardAuth"; + }; + # Templates don't work correctly when parsed from Nix, so our OIDC clients are defined here + # settingsFiles = [./oidc_clients.yaml]; + secrets = with config.sops; { + jwtSecretFile = secrets."authelia/jwt_secret".path; + # oidcIssuerPrivateKeyFile = secrets."authelia/jwks".path; + # oidcHmacSecretFile = secrets."authelia/hmac_secret".path; + sessionSecretFile = secrets."authelia/session_secret".path; + storageEncryptionKeyFile = secrets."authelia/storage_encryption_key".path; + }; + environmentVariables = with config.sops; { + AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = + secrets."authelia/lldap_authelia_password".path; + AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path; }; }; diff --git a/machines/auth/bootstrap/service-accounts.nix b/machines/auth/bootstrap/service-accounts.nix index dc37282..e4a295e 100644 --- a/machines/auth/bootstrap/service-accounts.nix +++ b/machines/auth/bootstrap/service-accounts.nix @@ -3,6 +3,7 @@ sops.secrets."service_accounts/forgejo/password" = {}; sops.secrets."service_accounts/jellyfin/password" = {}; sops.secrets."service_accounts/mail/password" = {}; + sops.secrets."service_accounts/ente/password" = {}; sops.templates."service-accounts.json" = { content = '' { @@ -44,6 +45,16 @@ "mail" ] } + { + "id": "ente", + "email": "ente@procopius.dk", + "password": "${config.sops.placeholder."service_accounts/ente/password"}", + "displayName": "ente", + "groups": [ + "lldap_password_manager", + "mail" + ] + } ''; path = "/bootstrap/user-configs/service-accounts.json"; owner = "lldap"; diff --git a/machines/auth/configuration.nix b/machines/auth/configuration.nix index 9f51678..37900b9 100644 --- a/machines/auth/configuration.nix +++ b/machines/auth/configuration.nix @@ -4,6 +4,7 @@ ./authelia.nix ./postgres.nix ./redis.nix + ../modules/pgbackrest.nix ]; deployment.tags = ["authelia" "sso" "ldap" "lldap"]; diff --git a/machines/auth/postgres.nix b/machines/auth/postgres.nix index f73a57a..5b28cbe 100644 --- a/machines/auth/postgres.nix +++ b/machines/auth/postgres.nix @@ -18,6 +18,7 @@ authentication = lib.mkForce '' # TYPE DATABASE USER ADDRESS METHOD local all all trust + host all all 127.0.0.1/32 trust ''; }; } diff --git a/machines/mail/configuration.nix b/machines/mail/configuration.nix index 15700a3..9582a67 100644 --- a/machines/mail/configuration.nix +++ b/machines/mail/configuration.nix @@ -2,6 +2,7 @@ imports = [ ./mailserver.nix ./networking.nix + ./roundcube.nix inputs.simple-nixos-mailserver.nixosModule ]; diff --git a/machines/mail/mailserver.nix b/machines/mail/mailserver.nix index 5b0563a..6cb0872 100644 --- a/machines/mail/mailserver.nix +++ b/machines/mail/mailserver.nix @@ -1,10 +1,14 @@ {config, ...}: { sops.secrets."service_accounts/mail/password" = {}; + sops.secrets."cloudflare/dns-api-token" = {}; + sops.secrets."cloudflare/zone-api-token" = {}; + mailserver = { enable = true; stateVersion = 3; fqdn = "mail.procopius.dk"; domains = ["procopius.dk"]; + dmarcReporting.enable = true; localDnsResolver = false; ldap = { enable = true; @@ -28,10 +32,17 @@ searchBase = "ou=people,dc=procopius,dc=dk"; }; - # Use Let's Encrypt certificates. Note that this needs to set up a stripped - # down nginx and opens port 80. - certificateScheme = "acme-nginx"; + certificateScheme = "acme"; + acmeCertificateName = "mail.procopius.dk"; }; security.acme.acceptTerms = true; security.acme.defaults.email = "david.mikael@proton.me"; + security.acme.defaults = { + dnsProvider = "cloudflare"; + dnsResolver = "1.1.1.1:53"; + credentialFiles = { + "CF_DNS_API_TOKEN_FILE" = config.sops.secrets."cloudflare/dns-api-token".path; + "CF_ZONE_API_TOKEN_FILE" = config.sops.secrets."cloudflare/zone-api-token".path; + }; + }; } diff --git a/machines/mail/roundcube.nix b/machines/mail/roundcube.nix new file mode 100644 index 0000000..447f8b0 --- /dev/null +++ b/machines/mail/roundcube.nix @@ -0,0 +1,22 @@ +{ + lib, + config, + ... +}: { + services.roundcube = { + enable = true; + hostName = "roundcube.procopius.dk"; + extraConfig = '' + # starttls needed for authentication, so the fqdn required to match + # the certificate + $config['smtp_host'] = "tls://${config.mailserver.fqdn}"; + $config['smtp_user'] = "%u"; + $config['smtp_pass'] = "%p"; + ''; + }; + + services.nginx.virtualHosts."roundcube.procopius.dk" = { + forceSSL = lib.mkForce false; + enableACME = lib.mkForce false; + }; +} diff --git a/machines/modules/README.md b/machines/modules/README.md new file mode 100644 index 0000000..b775dd2 --- /dev/null +++ b/machines/modules/README.md @@ -0,0 +1,11 @@ +# Homelab nixos global config + +A global module config for my homelab, where we gather: +* Monitoring endpoints (/metrics + port + host) +* Promtail log files +* Reverse proxy configuration +* Postgres backups (pgbackrest) +* Restic backups +* ...? +* LDAP config +* OIDC configs diff --git a/machines/modules/pgbackrest.nix b/machines/modules/pgbackrest.nix new file mode 100644 index 0000000..4c4cf12 --- /dev/null +++ b/machines/modules/pgbackrest.nix @@ -0,0 +1,43 @@ +{ + lib, + config, + name, + # meta, + ... +}: { + fileSystems."/mnt/pgdumps" = { + device = "192.168.1.226:/volume1/database_backups/${name}"; + fsType = "nfs4"; + options = ["x-systemd.automount" "noatime" "_netdev"]; + }; + services.postgresqlBackup = { + enable = true; + # We trigger this through restic + startAt = []; + # startAt = "*-*-* 01:15:00"; + compression = "zstd"; + databases = [ + "authelia-procopius" + "lldap" + ]; + }; + + # services.restic.backups.b2 = { + # environmentFile = config.sops.templates.restic_floofs_env.path; + # repositoryFile = config.sops.secrets.b2_floofs_server_repository.path; + # passwordFile = config.sops.secrets.b2_floofs_server_password.path; + + # paths = ["/var/backup/postgresql"]; + # initialize = true; + # pruneOpts = [ + # "--keep-daily 7" + # "--keep-weekly 3" + # "--keep-monthly 3" + # ]; + # timerConfig = { + # OnCalendar = "04:45"; + # Persistent = true; + # }; + # }; + # systemd.services.restic-backups-b2.wants = ["postgresqlBackup.service"]; +} diff --git a/machines/photos/configuration.nix b/machines/photos/configuration.nix index db54baf..3369b76 100644 --- a/machines/photos/configuration.nix +++ b/machines/photos/configuration.nix @@ -2,6 +2,7 @@ imports = [ outputs.nixosModules.ente ./ente.nix + ./minio.nix ]; deployment.tags = ["ente"]; diff --git a/machines/photos/ente.nix b/machines/photos/ente.nix index abca1d9..e1e9a0c 100644 --- a/machines/photos/ente.nix +++ b/machines/photos/ente.nix @@ -1,25 +1,73 @@ { + config, + pkgs, + ... +}: { + sops.secrets."ente/minio/root_password".owner = "ente"; + sops.secrets."ente/minio/root_user".owner = "ente"; + sops.secrets."service_accounts/ente/password".owner = "ente"; + + environment.systemPackages = with pkgs; [ + ente-cli + ]; + services.ente.api = { enable = true; enableLocalDB = true; - domain = "ente-v2.procopius.dk"; + domain = "ente-museum.procopius.dk"; settings = { # apps = { # accounts = "https://accounts.procopius.dk"; # cast = "https://cast.procopius.dk"; # public-albums = "https://albums.procopius.dk"; # }; + + smtp = { + host = "mail.procopius.dk"; + port = "465"; + username = "ente@procopius.dk"; + password._secret = config.sops.secrets."service_accounts/ente/password".path; + # The email address from which to send the email. Set this to an email + # address whose credentials you're providing. + email = "ente@procopius.dk"; + # Optional override for the sender name in the emails. If specified, it will + # be used for all emails sent by the instance (default is email specific). + sender-name = "ente"; + }; + internal.admins = [ + 1580559962386438 + ]; + s3 = { + use_path_style_urls = true; + b2-eu-cen = { + endpoint = "https://ente-minio-api.procopius.dk"; + region = "us-east-1"; + bucket = "ente"; + key._secret = config.sops.secrets."ente/minio/root_user".path; + secret._secret = config.sops.secrets."ente/minio/root_password".path; + }; + }; }; }; services.ente.web = { enable = true; domains = { - api = "ente-v2.procopius.dk"; - accounts = "accounts.procopius.dk"; - albums = "albums.procopius.dk"; - cast = "cast.procopius.dk"; - photos = "photos.procopius.dk"; + api = "ente-museum.procopius.dk"; + accounts = "ente-accounts.procopius.dk"; + albums = "ente-albums.procopius.dk"; + cast = "ente-cast.procopius.dk"; + photos = "ente-photos.procopius.dk"; + auth = "ente-auth.procopius.dk"; }; }; + + networking.firewall.allowedTCPPorts = [ + 3000 + 3001 + 3002 + 3003 + 3004 + 8080 + ]; } diff --git a/machines/photos/minio.nix b/machines/photos/minio.nix index c3d6ee2..1326b15 100644 --- a/machines/photos/minio.nix +++ b/machines/photos/minio.nix @@ -1,6 +1,35 @@ { + config, + pkgs, + lib, + ... +}: { + sops.secrets."ente/minio/root_user" = {}; + sops.secrets."ente/minio/root_password" = {}; + + sops.templates."minio-root-credentials".content = '' + MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} + MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} + ''; + services.minio = { enable = true; - rootCredentialsFile = "/etc/nixos/minio-root-credentials"; + rootCredentialsFile = config.sops.templates."minio-root-credentials".path; }; + + systemd.services.minio = { + environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk"; + postStart = '' + # Wait until minio is up + ${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live" + + # Make sure bucket exists + mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente + ''; + }; + + networking.firewall.allowedTCPPorts = [ + 9000 + 9001 + ]; } diff --git a/machines/sandbox/configuration.nix b/machines/sandbox/configuration.nix index 6c1ca72..62d1715 100644 --- a/machines/sandbox/configuration.nix +++ b/machines/sandbox/configuration.nix @@ -1,5 +1,18 @@ -{ +{outputs, ...}: { deployment.tags = ["sandbox"]; + imports = [ + outputs.nixosModules.global-config + ]; + + homelab.global = { + enable = true; + hostname = "sandbox"; + domain = "sandbox.local"; + environment = "production"; + location = "proxmox"; + tags = ["sandbox"]; + }; + system.stateVersion = "25.05"; } diff --git a/modules/homelab/backup-config.nix b/modules/homelab/backup-config.nix new file mode 100644 index 0000000..e26dcb2 --- /dev/null +++ b/modules/homelab/backup-config.nix @@ -0,0 +1,116 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.backups; + homelabCfg = config.homelab; + + # Get all defined backend names dynamically + backendNames = attrNames cfg.backends or {}; + + backupJobType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Name of the backup job"; + }; + backend = mkOption { + type = types.enum backendNames; + description = "Backend to use for this backup job"; + }; + backendOptions = mkOption { + type = types.attrs; + default = {}; + description = "Backend-specific options to override or extend the backend configuration"; + }; + labels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Additional labels for this backup job"; + }; + }; + }; +in { + imports = [ + ./backup/restic.nix + # ./backup/borgbackup.nix + ]; + + options.homelab.backups = { + enable = mkEnableOption "Homelab backup system"; + + jobs = mkOption { + type = types.listOf backupJobType; + default = []; + description = "Backup jobs to execute on this system"; + }; + + defaultLabels = mkOption { + type = types.attrsOf types.str; + default = { + hostname = homelabCfg.hostname; + environment = homelabCfg.environment; + location = homelabCfg.location; + }; + description = "Default labels applied to all backup jobs"; + }; + + monitoring = mkOption { + type = types.bool; + default = true; + description = "Enable backup monitoring and metrics"; + }; + }; + + config = mkIf cfg.enable { + # Validate that all job backends exist + assertions = [ + { + assertion = all (job: cfg.backends.${job.backend} != null) cfg.jobs; + message = "All backup jobs must reference backends that are defined and not null in homelab.backups.backends"; + } + ]; + + # Add backup jobs to monitoring endpoints if monitoring is enabled + # homelab.monitoring.endpoints = + # mkIf (cfg.monitoring && config.homelab.monitoring.enable) + # (map (job: { + # name = "backup-${job.name}"; + # port = 9100; # Assuming node exporter collects backup metrics + # path = "/metrics"; + # jobName = "backup"; + # labels = + # cfg.defaultLabels + # // job.labels + # // { + # backup_job = job.name; + # backup_backend = job.backend; + # }; + # }) + # cfg.jobs); + + # Export backup configuration for external consumption + environment.etc."homelab/backup-config.json".text = builtins.toJSON { + backends = + mapAttrs (name: config: { + inherit name; + enabled = config.enable or false; + }) + cfg.backends; + + jobs = + map (job: { + inherit (job) name backend labels; + allLabels = cfg.defaultLabels // job.labels; + paths = job.backendOptions.paths or []; + schedule = job.backendOptions.timerConfig.OnCalendar or job.backendOptions.startAt or "unknown"; + node = homelabCfg.hostname; + environment = homelabCfg.environment; + location = homelabCfg.location; + }) + cfg.jobs; + }; + }; +} diff --git a/modules/homelab/backup/restic.nix b/modules/homelab/backup/restic.nix new file mode 100644 index 0000000..31e150a --- /dev/null +++ b/modules/homelab/backup/restic.nix @@ -0,0 +1,105 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.backups; + + # Get restic backend config if it exists + resticBackend = cfg.backends.restic or null; + resticEnabled = resticBackend.enable or false; + + # Filter jobs that use the restic backend + resticJobs = filter (job: job.backend == "restic") cfg.jobs; +in { + options.homelab.backups.backends.restic = mkOption { + type = types.nullOr (types.submodule { + options = { + enable = mkEnableOption "Restic backup backend"; + + # Default restic options - these map directly to services.restic.backups. + repository = mkOption { + type = types.str; + description = "Default repository for restic backups"; + }; + + initialize = lib.mkOption { + type = lib.types.bool; + default = true; + description = '' + Create the repository if it doesn't exist. + ''; + }; + + passwordFile = mkOption { + type = types.nullOr types.path; + default = null; + description = "Default password file for restic repository"; + }; + + environmentFile = mkOption { + type = types.nullOr types.path; + default = null; + description = "Default environment file for restic credentials"; + }; + + paths = mkOption { + type = types.listOf types.str; + default = []; + description = "Default paths to backup"; + }; + + exclude = mkOption { + type = types.listOf types.str; + default = []; + description = "Default exclude patterns"; + }; + + timerConfig = mkOption { + type = types.attrs; + default = { + OnCalendar = "daily"; + RandomizedDelaySec = "1h"; + }; + description = "Default timer configuration"; + }; + + pruneOpts = mkOption { + type = types.listOf types.str; + default = [ + "--keep-daily 7" + "--keep-weekly 4" + "--keep-monthly 6" + ]; + description = "Default pruning options"; + }; + + # Allow any other restic options + extraOptions = mkOption { + type = types.attrs; + default = {}; + description = "Additional default restic options"; + }; + }; + }); + default = null; + description = "Restic backend configuration"; + }; + + config = mkIf (cfg.enable && resticEnabled && length resticJobs > 0) { + # Configure restic service for each job using the restic backend + services.restic.backups = listToAttrs (map ( + job: let + # Get base config without the 'enable' field + baseConfig = removeAttrs resticBackend ["enable"]; + # Merge extraOptions into base config + baseWithExtras = recursiveUpdate (removeAttrs baseConfig ["extraOptions"]) (baseConfig.extraOptions or {}); + # Apply job-specific overrides + finalConfig = recursiveUpdate baseWithExtras job.backendOptions; + in + nameValuePair job.name finalConfig + ) + resticJobs); + }; +} diff --git a/modules/homelab/default.nix b/modules/homelab/default.nix new file mode 100644 index 0000000..e56aae9 --- /dev/null +++ b/modules/homelab/default.nix @@ -0,0 +1,133 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab; + + nodeAgg = import ./lib/node-aggregation.nix {inherit lib;}; +in { + imports = [ + ./monitoring-config.nix + ./proxy-config.nix + ./backup-config.nix + ./motd + + ./services + + # Global aggregation modules + (nodeAgg.mkGlobalModule "monitoring" nodeAgg.aggregators.monitoring) + # (nodeAgg.mkGlobalModule "logs" nodeAgg.aggregators.logs) + (nodeAgg.mkGlobalModule "reverseProxy" nodeAgg.aggregators.reverseProxy) + (nodeAgg.mkGlobalModule "backups" nodeAgg.aggregators.backups) + ]; + + options.homelab = { + enable = mkEnableOption "Homelab fleet configuration"; + hostname = mkOption { + type = types.str; + description = "Hostname for this system"; + }; + domain = mkOption { + type = types.str; + default = "lab"; + description = "Base domain for the homelab"; + }; + externalDomain = mkOption { + type = types.str; + default = "procopius.dk"; + description = "External doamin to the homelab"; + }; + environment = mkOption { + type = types.enum ["production" "staging" "development"]; + default = "production"; + description = "Environment type"; + }; + location = mkOption { + type = types.str; + default = "homelab"; + description = "Physical location identifier"; + }; + tags = mkOption { + type = types.listOf types.str; + default = []; + description = "Tags for this system"; + }; + }; + + config = mkIf cfg.enable { + # Set hostname + networking.hostName = cfg.hostname; + + # Export configuration for external consumption + environment.etc."homelab/config.json".text = builtins.toJSON { + inherit (cfg) hostname domain environment location tags; + + monitoring = { + # Metrics endpoints (Prometheus, etc.) + metrics = + map (endpoint: { + inherit (endpoint) name host port path jobName scrapeInterval labels; + url = "http://${endpoint.host}:${toString endpoint.port}${endpoint.path}"; + }) + cfg.global.monitoring.allMetrics or []; + + # Health check endpoints + healthChecks = + map (check: let + # Determine the host based on useExternalDomain + actualHost = + if check.useExternalDomain + then "${check.subdomain}.${cfg.externalDomain}" + else check.host; + + # Build the URL + portPart = + if check.port != null + then ":${toString check.port}" + else ""; + url = "${check.protocol}://${actualHost}${portPart}${check.path}"; + in { + inherit (check) name protocol method interval timeout conditions alerts group labels enabled; + host = actualHost; + port = check.port; + path = check.path; + url = url; + useExternalDomain = check.useExternalDomain; + subdomain = check.subdomain; + sourceNode = cfg.hostname; + }) + cfg.global.monitoring.allHealthChecks or []; + }; + + reverseProxy = { + entries = + map (entry: { + inherit (entry) subdomain host port path enableAuth enableSSL; + internalHost = "${cfg.hostname}:${toString entry.port}${entry.path}"; + externalHost = "${entry.subdomain}.${cfg.externalDomain}"; + }) + cfg.global.reverseProxy.all; + }; + + backups = { + jobs = + map (job: { + inherit (job) name backend labels; + backupId = job._backupId; + sourceNode = job._sourceNode; + }) + cfg.global.backups.all; + + backends = cfg.global.backups.allBackends; + + summary = { + totalJobs = length cfg.global.backups.all; + jobsByBackend = mapAttrs (backend: jobs: length jobs) cfg.global.backups.byBackend; + jobsByNode = mapAttrs (node: jobs: length jobs) cfg.global.backups.byNode; + }; + }; + }; + }; +} diff --git a/modules/homelab/lib/node-aggregation.nix b/modules/homelab/lib/node-aggregation.nix new file mode 100644 index 0000000..1719012 --- /dev/null +++ b/modules/homelab/lib/node-aggregation.nix @@ -0,0 +1,226 @@ +{lib}: let + inherit (lib) flatten mapAttrs mapAttrsToList filter groupBy length unique attrByPath splitString; + + # Generic function to aggregate any attribute across nodes + aggregateFromNodes = { + nodes, + attributePath, # e.g. "homelab.monitoring.endpoints" or "homelab.backups.jobs" + enhancer ? null, # optional function to enhance each item with node context + }: let + # Extract the attribute from each node using the path + getNestedAttr = path: config: let + pathList = splitString "." path; + in + attrByPath pathList [] config; + + # Get all items from all nodes + allItems = flatten (mapAttrsToList + (nodeName: nodeConfig: let + items = getNestedAttr attributePath nodeConfig.config; + baseEnhancer = item: + item + // { + _nodeName = nodeName; + _nodeConfig = nodeConfig; + _nodeAddress = nodeConfig.config.networking.hostName or nodeName; + }; + finalEnhancer = + if enhancer != null + then (item: enhancer (baseEnhancer item)) + else baseEnhancer; + in + map finalEnhancer items) + nodes); + in { + # Raw aggregated data + all = allItems; + + # Common grouping patterns + byNode = groupBy (item: item._nodeName) allItems; + byType = groupBy (item: item.type or "unknown") allItems; + byService = groupBy (item: item.service or "unknown") allItems; + + # Utility functions for filtering + filterBy = predicate: filter predicate allItems; + ofType = type: filter (item: (item.type or "") == type) allItems; + + count = length allItems; + countBy = fn: mapAttrs (key: items: length items) (groupBy fn allItems); + }; + + # Specialized aggregators for common use cases + aggregators = { + monitoring = nodes: let + # Aggregate metrics endpoints + metricsAgg = aggregateFromNodes { + inherit nodes; + attributePath = "homelab.monitoring.metrics"; + enhancer = endpoint: + endpoint + // { + _fullAddress = "${endpoint.host or endpoint._nodeAddress}:${toString endpoint.port}"; + _metricsUrl = "http://${endpoint.host or endpoint._nodeAddress}:${toString endpoint.port}${endpoint.path or "/metrics"}"; + _type = "metrics"; + }; + }; + # Aggregate health checks + healthChecksAgg = aggregateFromNodes { + inherit nodes; + attributePath = "homelab.monitoring.healthChecks"; + enhancer = check: let + # Compute the actual host and URL + actualHost = + if check.useExternalDomain or false + then "${check.subdomain}.${check._nodeConfig.config.homelab.externalDomain or "example.com"}" + else check.host or check._nodeAddress; + portPart = + if check.port != null + then ":${toString check.port}" + else ""; + url = "${check.protocol or "http"}://${actualHost}${portPart}${check.path or "/"}"; + in + check + // { + _actualHost = actualHost; + _url = url; + _type = "health-check"; + # Merge default labels with node context + labels = + (check.labels or {}) + // { + node = check._nodeName; + environment = check._nodeConfig.config.homelab.environment or "unknown"; + }; + }; + }; + in + metricsAgg + // healthChecksAgg + // { + # Metrics-specific aggregations + allMetrics = metricsAgg.all; + metricsByNode = metricsAgg.byNode; + metricsByJobName = groupBy (m: m.jobName or "unknown") metricsAgg.all; + + # Health checks-specific aggregations + allHealthChecks = healthChecksAgg.all; + healthChecksByNode = healthChecksAgg.byNode; + healthChecksByGroup = groupBy (hc: hc.group or "default") healthChecksAgg.all; + healthChecksByProtocol = groupBy (hc: hc.protocol or "http") healthChecksAgg.all; + + # Filtered health checks + externalHealthChecks = filter (hc: hc.useExternalDomain or false) healthChecksAgg.all; + internalHealthChecks = filter (hc: !(hc.useExternalDomain or false)) healthChecksAgg.all; + enabledHealthChecks = filter (hc: hc.enabled or true) healthChecksAgg.all; + + # Summary statistics + summary = { + totalMetrics = length metricsAgg.all; + totalHealthChecks = length healthChecksAgg.all; + healthChecksByGroup = + mapAttrs (group: checks: length checks) + (groupBy (hc: hc.group or "default") healthChecksAgg.all); + healthChecksByProtocol = + mapAttrs (protocol: checks: length checks) + (groupBy (hc: hc.protocol or "http") healthChecksAgg.all); + externalChecksCount = length (filter (hc: hc.useExternalDomain or false) healthChecksAgg.all); + internalChecksCount = length (filter (hc: !(hc.useExternalDomain or false)) healthChecksAgg.all); + }; + }; + + # Promtail log configurations + # logs = nodes: + # aggregateFromNodes { + # inherit nodes; + # attributePath = "homelab.logging.sources"; + # enhancer = logSource: + # logSource + # // { + # # Add log-specific computed fields + # _logPath = logSource.path or "/var/log/${logSource.service}.log"; + # _labels = + # (logSource.labels or {}) + # // { + # node = logSource._nodeName; + # service = logSource.service or "unknown"; + # }; + # }; + # }; + + # Reverse proxy configurations + reverseProxy = nodes: + aggregateFromNodes { + inherit nodes; + attributePath = "homelab.reverseProxy.entries"; + enhancer = entry: + entry + // { + # Add proxy-specific computed fields + _upstream = "http://${entry.host or entry._nodeAddress}:${toString entry.port}"; + _fqdn = "${entry.subdomain or entry.service}.${entry.domain or "local"}"; + }; + }; + + # Backup jobs with enhanced aggregation + backups = nodes: let + baseAgg = aggregateFromNodes { + inherit nodes; + attributePath = "homelab.backups.jobs"; + enhancer = backup: + backup + // { + _sourceNode = backup._nodeName; + _backupId = "${backup._nodeName}-${backup.name}"; + _jobFqdn = "${backup.name}.${backup._nodeName}"; + }; + }; + + # Get all unique backends across all nodes + allBackends = let + allBackendConfigs = + mapAttrsToList + (nodeName: nodeConfig: + attrByPath ["homelab" "backups" "backends"] {} nodeConfig.config) + nodes; + enabledBackends = flatten (map (backends: + filter (name: backends.${name} != null) (lib.attrNames backends)) + allBackendConfigs); + in + unique enabledBackends; + in + baseAgg + // { + # Backup-specific aggregations + byBackend = groupBy (job: job.backend) baseAgg.all; + allBackends = allBackends; + + # Enhanced summary + summary = { + totalJobs = length baseAgg.all; + jobsByBackend = + mapAttrs (backend: jobs: length jobs) + (groupBy (job: job.backend) baseAgg.all); + jobsByNode = baseAgg.countBy (job: job._nodeName); + availableBackends = allBackends; + backendsInUse = unique (map (job: job.backend) baseAgg.all); + }; + }; + }; +in { + inherit aggregateFromNodes aggregators; + + # Convenience function to create a module that provides global aggregations + mkGlobalModule = attributeName: aggregatorFn: { + lib, + nodes, + ... + }: { + options.homelab.global.${attributeName} = lib.mkOption { + type = lib.types.attrs; + readOnly = true; + description = "Globally aggregated ${attributeName} from all nodes"; + }; + + config.homelab.global.${attributeName} = aggregatorFn nodes; + }; +} diff --git a/modules/homelab/lib/service-interface.nix b/modules/homelab/lib/service-interface.nix new file mode 100644 index 0000000..2bc7ed8 --- /dev/null +++ b/modules/homelab/lib/service-interface.nix @@ -0,0 +1,295 @@ +# Standard service interface for homelab services +# This provides a consistent contract that all services should follow +{lib}: let + inherit (lib) mkOption mkEnableOption types; + + # Define the standard service interface + mkServiceInterface = { + serviceName, + defaultPort ? null, + defaultSubdomain ? serviceName, + defaultDescription ? "Homelab ${serviceName} service", + monitoringPath ? "/metrics", + healthCheckPath ? "/health", + healthCheckConditions ? ["[STATUS] == 200"], + # Custom options that the service wants to expose + serviceOptions ? {}, + }: + { + # Standard interface options that all services must have + enable = mkEnableOption defaultDescription; + + port = mkOption { + type = types.port; + default = + if defaultPort != null + then defaultPort + else throw "Service ${serviceName} must specify a default port"; + description = "Port for ${serviceName} service"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Whether to automatically open firewall ports"; + }; + + proxy = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable reverse proxy for this service"; + }; + + subdomain = mkOption { + type = types.str; + default = defaultSubdomain; + description = "Subdomain for reverse proxy (${defaultSubdomain}.yourdomain.com)"; + }; + + enableAuth = mkOption { + type = types.bool; + default = false; + description = "Enable authentication for reverse proxy"; + }; + + enableSSL = mkOption { + type = types.bool; + default = true; + description = "Enable SSL for reverse proxy"; + }; + }; + + monitoring = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable monitoring (metrics and health checks)"; + }; + + metricsPath = mkOption { + type = types.str; + default = monitoringPath; + description = "Path for metrics endpoint"; + }; + + jobName = mkOption { + type = types.str; + default = serviceName; + description = "Prometheus job name"; + }; + + scrapeInterval = mkOption { + type = types.str; + default = "30s"; + description = "Prometheus scrape interval"; + }; + + healthCheck = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable health check monitoring"; + }; + + path = mkOption { + type = types.str; + default = healthCheckPath; + description = "Path for health check endpoint"; + }; + + interval = mkOption { + type = types.str; + default = "30s"; + description = "Health check interval"; + }; + + timeout = mkOption { + type = types.str; + default = "10s"; + description = "Health check timeout"; + }; + + conditions = mkOption { + type = types.listOf types.str; + default = healthCheckConditions; + description = "Health check conditions"; + }; + + group = mkOption { + type = types.str; + default = "services"; + description = "Health check group name"; + }; + }; + + extraLabels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Additional labels for monitoring"; + }; + }; + + description = mkOption { + type = types.str; + default = defaultDescription; + description = "Service description"; + }; + + extraOptions = mkOption { + type = types.attrs; + default = {}; + description = "Additional service-specific configuration options"; + }; + + # Merge in service-specific options + } + // serviceOptions; + + # Helper function to implement the standard service behavior + mkServiceConfig = { + config, + cfg, + homelabCfg, + serviceName, + # Function that returns the actual service configuration + serviceConfig, + # Optional: custom monitoring labels + extraMonitoringLabels ? {}, + # Optional: custom health check configuration + customHealthChecks ? [], + # Optional: custom reverse proxy configuration + customProxyConfig ? {}, + }: let + # Standard monitoring labels + standardLabels = + { + service = serviceName; + component = "main"; + instance = "${homelabCfg.hostname}.${homelabCfg.domain}"; + } + // extraMonitoringLabels // cfg.monitoring.extraLabels; + + # Standard reverse proxy entry + standardProxyEntry = + { + subdomain = cfg.proxy.subdomain; + host = homelabCfg.hostname; + port = cfg.port; + enableAuth = cfg.proxy.enableAuth; + enableSSL = cfg.proxy.enableSSL; + } + // customProxyConfig; + + # Standard metrics configuration + standardMetrics = lib.optional cfg.monitoring.enable { + name = "${serviceName}-metrics"; + port = cfg.port; + path = cfg.monitoring.metricsPath; + jobName = cfg.monitoring.jobName; + scrapeInterval = cfg.monitoring.scrapeInterval; + labels = standardLabels; + }; + + # Standard health check configuration + standardHealthCheck = lib.optional (cfg.monitoring.enable && cfg.monitoring.healthCheck.enable) { + name = "${serviceName}-health"; + port = cfg.port; + path = cfg.monitoring.healthCheck.path; + interval = cfg.monitoring.healthCheck.interval; + timeout = cfg.monitoring.healthCheck.timeout; + conditions = cfg.monitoring.healthCheck.conditions; + group = cfg.monitoring.healthCheck.group; + labels = standardLabels; + }; + + # Merge service config with standard behaviors + baseConfig = lib.mkMerge [ + # Service-specific configuration + serviceConfig + + # Standard firewall configuration + (lib.mkIf cfg.openFirewall { + networking.firewall.allowedTCPPorts = [cfg.port]; + }) + + # Standard monitoring configuration + (lib.mkIf cfg.monitoring.enable { + homelab.monitoring.metrics = standardMetrics; + homelab.monitoring.healthChecks = standardHealthCheck ++ customHealthChecks; + }) + + # Standard reverse proxy configuration + (lib.mkIf cfg.proxy.enable { + homelab.reverseProxy.entries = [standardProxyEntry]; + }) + ]; + in + lib.mkIf cfg.enable baseConfig; + + # Validation helper to ensure required options are set + validateServiceConfig = cfg: serviceName: [ + # Validate that if proxy is enabled, subdomain is set + (lib.mkIf (cfg.proxy.enable && cfg.proxy.subdomain == "") + (throw "Service ${serviceName}: proxy.subdomain is required when proxy.enable is true")) + + # Validate that if monitoring is enabled, required paths are set + (lib.mkIf (cfg.monitoring.enable && cfg.monitoring.metricsPath == "") + (throw "Service ${serviceName}: monitoring.metricsPath cannot be empty when monitoring is enabled")) + ]; +in { + inherit mkServiceInterface mkServiceConfig validateServiceConfig; + + # Common service option patterns + commonOptions = { + # Log level option + logLevel = mkOption { + type = types.enum ["debug" "info" "warn" "error"]; + default = "info"; + description = "Log level"; + }; + + # Environment file option (for secrets) + environmentFile = mkOption { + type = types.nullOr types.path; + default = null; + description = "Environment file for secrets"; + }; + + # External URL option + externalUrl = serviceName: homelabCfg: + mkOption { + type = types.str; + default = "https://${serviceName}.${homelabCfg.externalDomain}"; + description = "External URL for ${serviceName}"; + }; + }; + + # Helper for creating service modules with the interface + mkServiceModule = { + serviceName, + defaultPort, + defaultSubdomain ? serviceName, + serviceOptions ? {}, + ... + } @ args: { + config, + lib, + ... + }: let + cfg = config.homelab.services.${serviceName}; + homelabCfg = config.homelab; + + serviceInterface = mkServiceInterface { + inherit serviceName defaultPort defaultSubdomain serviceOptions; + }; + in { + options.homelab.services.${serviceName} = serviceInterface; + + config = mkServiceConfig { + inherit config cfg homelabCfg serviceName; + # Service implementor must provide this function + serviceConfig = args.serviceConfig or (throw "mkServiceModule requires serviceConfig function"); + }; + }; +} diff --git a/modules/homelab/monitoring-config.nix b/modules/homelab/monitoring-config.nix new file mode 100644 index 0000000..2490467 --- /dev/null +++ b/modules/homelab/monitoring-config.nix @@ -0,0 +1,214 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.monitoring; + homelabCfg = config.homelab; + + metricsEndpointType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Name of the metrics endpoint"; + }; + host = mkOption { + type = types.str; + description = "Domain name of the host (default: hostname.domain)"; + default = "${homelabCfg.hostname}.${homelabCfg.domain}"; + }; + port = mkOption { + type = types.port; + description = "Port number for the endpoint"; + }; + path = mkOption { + type = types.str; + default = "/metrics"; + description = "Path for the metrics endpoint"; + }; + jobName = mkOption { + type = types.str; + description = "Prometheus job name"; + }; + scrapeInterval = mkOption { + type = types.str; + default = "30s"; + description = "Prometheus scrape interval"; + }; + labels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Additional labels for this endpoint"; + }; + }; + }; + + healthCheckEndpointType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Name of the health check endpoint"; + }; + host = mkOption { + type = types.str; + description = "Domain name of the host"; + default = "${homelabCfg.hostname}.${homelabCfg.domain}"; + }; + port = mkOption { + type = types.nullOr types.port; + default = null; + description = "Port number for the endpoint (null for standard HTTP/HTTPS)"; + }; + path = mkOption { + type = types.str; + default = "/"; + description = "Path for the health check endpoint"; + }; + protocol = mkOption { + type = types.enum ["http" "https" "tcp" "icmp"]; + default = "http"; + description = "Protocol to use for health checks"; + }; + method = mkOption { + type = types.str; + default = "GET"; + description = "HTTP method for health checks (only applies to http/https)"; + }; + interval = mkOption { + type = types.str; + default = "30s"; + description = "Health check interval"; + }; + timeout = mkOption { + type = types.str; + default = "10s"; + description = "Health check timeout"; + }; + conditions = mkOption { + type = types.listOf types.str; + default = ["[STATUS] == 200"]; + description = "Health check conditions (Gatus format)"; + example = ["[STATUS] == 200" "[BODY].status == UP" "[RESPONSE_TIME] < 500"]; + }; + alerts = mkOption { + type = types.listOf (types.submodule { + options = { + type = mkOption { + type = types.str; + description = "Alert type"; + example = "discord"; + }; + enabled = mkOption { + type = types.bool; + default = true; + description = "Whether this alert is enabled"; + }; + failure-threshold = mkOption { + type = types.int; + default = 3; + description = "Number of failures before alerting"; + }; + success-threshold = mkOption { + type = types.int; + default = 2; + description = "Number of successes before resolving alert"; + }; + }; + }); + default = []; + description = "Alert configurations"; + }; + group = mkOption { + type = types.str; + default = "default"; + description = "Group name for organizing health checks"; + }; + labels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Additional labels for this health check"; + }; + enabled = mkOption { + type = types.bool; + default = true; + description = "Whether this health check is enabled"; + }; + # External domain support + useExternalDomain = mkOption { + type = types.bool; + default = false; + description = "Use external domain instead of internal"; + }; + subdomain = mkOption { + type = types.nullOr types.str; + default = null; + description = "Subdomain for external domain (required if useExternalDomain is true)"; + }; + }; + }; +in { + options.homelab.monitoring = { + enable = mkEnableOption "Homelab monitoring"; + metrics = mkOption { + type = types.listOf metricsEndpointType; + default = []; + description = "Metric endpoints exposed by this system"; + }; + + healthChecks = mkOption { + type = types.listOf healthCheckEndpointType; + default = []; + description = "Health check endpoints for uptime monitoring"; + }; + + nodeExporter = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable node exporter"; + }; + port = mkOption { + type = types.port; + default = 9100; + description = "Node exporter port"; + }; + }; + }; + + config = mkIf cfg.enable { + # Configure node exporter if enabled + services.prometheus.exporters.node = mkIf cfg.nodeExporter.enable { + enable = true; + port = cfg.nodeExporter.port; + enabledCollectors = [ + "systemd" + "textfile" + "filesystem" + "loadavg" + "meminfo" + "netdev" + "stat" + ]; + }; + + # Automatically add node exporter to monitoring endpoints + homelab.monitoring.metrics = mkIf cfg.nodeExporter.enable [ + { + name = "node-exporter"; + port = cfg.nodeExporter.port; + path = "/metrics"; + jobName = "node"; + labels = { + instance = "${homelabCfg.hostname}.${homelabCfg.domain}"; + environment = homelabCfg.environment; + location = homelabCfg.location; + }; + } + ]; + + networking.firewall.allowedTCPPorts = optionals cfg.nodeExporter.enable [ + cfg.nodeExporter.port + ]; + }; +} diff --git a/modules/homelab/motd/default.nix b/modules/homelab/motd/default.nix new file mode 100644 index 0000000..b5f3bb7 --- /dev/null +++ b/modules/homelab/motd/default.nix @@ -0,0 +1,397 @@ +# modules/motd/default.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.motd; + + homelab-motd = pkgs.writeShellScriptBin "homelab-motd" '' + #! /usr/bin/env bash + + # Colors for output + RED="\e[31m" + GREEN="\e[32m" + YELLOW="\e[33m" + BLUE='\e[0;34m' + CYAN='\e[0;36m' + WHITE='\e[1;37m' + NC='\e[0m' # No Color + BOLD='\e[1m' + + # Helper functions + print_header() { + echo -e "''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC}''${WHITE} 🏠 $(hostname -s) HOMELAB ''${NC}''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•''${NC}" + } + + print_section() { + echo -e "\n''${BOLD}''${CYAN}β–Ά $1''${NC}" + echo -e "''${CYAN}─────────────────────────────────────────────────────────────''${NC}" + } + + get_service_status() { + local service="$1" + if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then + echo -e "''${GREEN}●''${NC}" + elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then + echo -e "''${YELLOW}β—‹''${NC}" + else + echo -e "''${RED}Γ—''${NC}" + fi + } + + check_backup_issues() { + local issues=0 + # Check for failed backup services in the last 24 hours + if ${pkgs.systemd}/bin/journalctl --since "24 hours ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | grep -q .; then + issues=$((issues + 1)) + fi + + # Check for failed backup timers + local failed_timers=$(${pkgs.systemd}/bin/systemctl list-timers --failed --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)" | wc -l) + issues=$((issues + failed_timers)) + + echo $issues + } + + # Main script + ${optionalString cfg.clearScreen "clear"} + print_header + + # System info + print_section "SYSTEM" + echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p | sed 's/up //')" + echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)" + echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s", $3, $2}')" + echo -e " ''${BOLD}Disk:''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')" + + ${optionalString cfg.showServices '' + # Local homelab services (auto-detected + manual) + print_section "HOMELAB SERVICES" + + # Auto-detect services from homelab configuration + ${optionalString (config.homelab.services.gatus.enable or false) '' + status=$(get_service_status "gatus") + printf " %-20s %b %s\n" "gatus" "$status" "Uptime monitoring" + ''} + + ${optionalString (config.homelab.services.prometheus.enable or false) '' + status=$(get_service_status "prometheus") + printf " %-20s %b %s\n" "prometheus" "$status" "Metrics collection" + ''} + + ${optionalString (config.homelab.services.grafana.enable or false) '' + status=$(get_service_status "grafana") + printf " %-20s %b %s\n" "grafana" "$status" "Monitoring dashboard" + ''} + + ${optionalString (config.homelab.services.alertmanager.enable or false) '' + status=$(get_service_status "alertmanager") + printf " %-20s %b %s\n" "alertmanager" "$status" "Alert routing" + ''} + + ${optionalString (config.services.nginx.enable or false) '' + status=$(get_service_status "nginx") + printf " %-20s %b %s\n" "nginx" "$status" "Web server/proxy" + ''} + + ${optionalString (config.services.postgresql.enable or false) '' + status=$(get_service_status "postgresql") + printf " %-20s %b %s\n" "postgresql" "$status" "Database server" + ''} + + ${optionalString (config.services.redis.server.enable or false) '' + status=$(get_service_status "redis") + printf " %-20s %b %s\n" "redis" "$status" "Key-value store" + ''} + + # Manual services from configuration + ${concatStringsSep "\n" (mapAttrsToList (name: service: '' + status=$(get_service_status "${service.systemdService}") + printf " %-20s %b %s\n" "${name}" "$status" "${service.description}" + '') + cfg.services)} + + # Show legend + echo -e "\n ''${GREEN}●''${NC} Active ''${YELLOW}β—‹''${NC} Inactive ''${RED}Γ—''${NC} Disabled" + ''} + + # Quick backup check + backup_issues=$(check_backup_issues) + if [[ $backup_issues -gt 0 ]]; then + echo -e "\n''${BOLD}''${RED}⚠ WARNING: $backup_issues backup issues detected!''${NC}" + echo -e " Run ''${BOLD}homelab-backup-status''${NC} for details" + fi + + # Recent critical issues + error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q 2>/dev/null | wc -l || echo 0) + if [[ "$error_count" -gt 0 ]]; then + echo -e "\n''${BOLD}''${YELLOW}⚠ $error_count system errors in last 24h''${NC}" + echo -e " Run ''${BOLD}journalctl --priority=err --since='24 hours ago' ''${NC} for details" + fi + + # Helpful commands + echo -e "\n''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${WHITE}Useful commands: ''${NC}''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${CYAN}homelab-monitor-status''${NC} - Monitoring overview ''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${CYAN}homelab-backup-status''${NC} - Backup jobs status ''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${CYAN}homelab-proxy-status''${NC} - Reverse proxy entries ''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${CYAN}systemctl status ''${NC} - Check specific service ''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•''${NC}" + echo + ''; + + # Helper script for monitoring status + homelab-monitor-status = pkgs.writeShellScriptBin "homelab-monitor-status" '' + #! /usr/bin/env bash + + # Colors + RED="\e[31m" + GREEN="\e[32m" + YELLOW="\e[33m" + BLUE='\e[0;34m' + CYAN='\e[0;36m' + WHITE='\e[1;37m' + NC='\e[0m' + BOLD='\e[1m' + + CONFIG_FILE="/etc/homelab/config.json" + if [[ ! -f "$CONFIG_FILE" ]]; then + echo -e "''${RED}❌ Global homelab configuration not found''${NC}" + exit 1 + fi + + echo -e "''${BOLD}''${BLUE}πŸ“Š Homelab Monitoring Status''${NC}" + echo -e "''${BLUE}=============================''${NC}" + + # Show metrics endpoints + echo -e "\n''${BOLD}''${CYAN}Metrics Endpoints:''${NC}" + metrics_count=$(${pkgs.jq}/bin/jq '.monitoring.metrics | length' "$CONFIG_FILE" 2>/dev/null || echo 0) + if [[ $metrics_count -gt 0 ]]; then + ${pkgs.jq}/bin/jq -r '.monitoring.metrics[]? | " ''${GREEN}●''${NC} \(.name): ''${BOLD}\(.host):\(.port)''${NC}\(.path) ''${YELLOW}(job: \(.jobName))''${NC}"' "$CONFIG_FILE" 2>/dev/null + echo -e "\n ''${BOLD}Total: ''${metrics_count} endpoints''${NC}" + else + echo -e " ''${YELLOW}No metrics endpoints configured''${NC}" + fi + + # Show health checks by group + echo -e "\n''${BOLD}''${CYAN}Health Checks:''${NC}" + health_count=$(${pkgs.jq}/bin/jq '.monitoring.healthChecks | length' "$CONFIG_FILE" 2>/dev/null || echo 0) + if [[ $health_count -gt 0 ]]; then + # Group health checks + ${pkgs.jq}/bin/jq -r ' + .monitoring.healthChecks | + group_by(.group // "default") | + .[] | + "''${BOLD} \(.[0].group // "default" | ascii_upcase) Group:''${NC}" as $header | + ($header, ( + .[] | + " ''${if .enabled // true then "''${GREEN}●" else "''${YELLOW}β—‹" end}''${NC} \(.name): ''${BOLD}\(.protocol)://\(.host)\(if .port then ":\(.port)" else "" end)''${NC}\(.path)" + )) + ' "$CONFIG_FILE" 2>/dev/null + echo -e "\n ''${BOLD}Total: ''${health_count} health checks''${NC}" + else + echo -e " ''${YELLOW}No health checks configured''${NC}" + fi + + echo -e "\n''${CYAN}Run ''${BOLD}homelab-proxy-status''${NC}''${CYAN} and ''${BOLD}homelab-backup-status''${NC}''${CYAN} for more details.''${NC}" + ''; + + # Helper script for backup status + homelab-backup-status = pkgs.writeShellScriptBin "homelab-backup-status" '' + #! /usr/bin/env bash + + # Colors + RED="\e[31m" + GREEN="\e[32m" + YELLOW="\e[33m" + BLUE='\e[0;34m' + CYAN='\e[0;36m' + WHITE='\e[1;37m' + NC='\e[0m' + BOLD='\e[1m' + + echo -e "''${BOLD}''${BLUE}πŸ’Ύ Backup Status''${NC}" + echo -e "''${BLUE}===============''${NC}" + + # Check backup timers + echo -e "\n''${BOLD}''${CYAN}Backup Timers:''${NC}" + backup_timers=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)") + if [[ -n "$backup_timers" ]]; then + while IFS= read -r line; do + if [[ -n "$line" ]]; then + next=$(echo "$line" | awk '{print $1, $2}') + left=$(echo "$line" | awk '{print $3}') + timer=$(echo "$line" | awk '{print $5}') + service=$(echo "$line" | awk '{print $6}') + + # Color code based on time left + if [[ "$left" == "n/a" ]]; then + color="''${RED}" + status="●" + elif echo "$left" | grep -qE "(sec|min|[0-9]h)"; then + color="''${YELLOW}" + status="●" + else + color="''${GREEN}" + status="●" + fi + + printf " %b%s%b %-25s Next: %s (%s)\n" "$color" "$status" "$NC" "$(basename "$timer" .timer)" "$next" "$left" + fi + done <<< "$backup_timers" + else + echo -e " ''${YELLOW}No backup timers found''${NC}" + fi + + # Check recent backup activity (last 3 days, summarized) + echo -e "\n''${BOLD}''${CYAN}Recent Activity (3 days):''${NC}" + + # Count successful vs failed backups + success_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --no-pager -q 2>/dev/null | grep -iE "(completed|success|finished)" | wc -l) + error_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | wc -l) + + if [[ $success_count -gt 0 ]]; then + echo -e " ''${GREEN}βœ… $success_count successful backups''${NC}" + fi + if [[ $error_count -gt 0 ]]; then + echo -e " ''${RED}❌ $error_count failed backups''${NC}" + echo -e "\n''${BOLD}''${RED}Recent Failures:''${NC}" + ${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager --lines=3 2>/dev/null | while read -r line; do + # Extract just the important parts + timestamp=$(echo "$line" | awk '{print $1, $2, $3}') + service=$(echo "$line" | grep -oE "(restic-backups-[^[]+|borgbackup-job-[^[]+|[^[]*backup[^[]*)" | head -1) + message=$(echo "$line" | sed -E 's/.*\]: //' | cut -c1-60) + echo -e " ''${YELLOW}$timestamp''${NC} ''${BOLD}$service''${NC}: $message..." + done + elif [[ $success_count -eq 0 ]]; then + echo -e " ''${YELLOW}⚠️ No backup activity in last 3 days''${NC}" + else + echo -e " ''${GREEN}βœ… All backups completed successfully''${NC}" + fi + + # Show backup summary from global config if available + CONFIG_FILE="/etc/homelab/config.json" + if [[ -f "$CONFIG_FILE" ]]; then + total_jobs=$(${pkgs.jq}/bin/jq -r '.backups.summary.totalJobs // 0' "$CONFIG_FILE" 2>/dev/null) + backends=$(${pkgs.jq}/bin/jq -r '.backups.summary.backendsInUse[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ') + + if [[ $total_jobs -gt 0 ]]; then + echo -e "\n''${BOLD}''${CYAN}Configuration:''${NC}" + echo -e " ''${BOLD}Total jobs:''${NC} $total_jobs" + if [[ -n "$backends" ]]; then + echo -e " ''${BOLD}Backends:''${NC} $backends" + fi + fi + fi + ''; + + # Helper script for proxy status + homelab-proxy-status = pkgs.writeShellScriptBin "homelab-proxy-status" '' + #! /usr/bin/env bash + + # Colors + RED="\e[31m" + GREEN="\e[32m" + YELLOW="\e[33m" + BLUE='\e[0;34m' + CYAN='\e[0;36m' + WHITE='\e[1;37m' + NC='\e[0m' + BOLD='\e[1m' + + CONFIG_FILE="/etc/homelab/config.json" + if [[ ! -f "$CONFIG_FILE" ]]; then + echo -e "''${RED}❌ Global homelab configuration not found''${NC}" + exit 1 + fi + + echo -e "''${BOLD}''${BLUE}πŸ”— Reverse Proxy Status''${NC}" + echo -e "''${BLUE}======================''${NC}" + + proxy_count=$(${pkgs.jq}/bin/jq '.reverseProxy.entries | length' "$CONFIG_FILE" 2>/dev/null || echo 0) + if [[ $proxy_count -gt 0 ]]; then + ${pkgs.jq}/bin/jq -r '.reverseProxy.entries[]? | + " ''${GREEN}●''${NC} ''${BOLD}\(.subdomain)''${NC}: \(.externalHost) β†’ \(.internalHost)\(if .enableAuth then " ''${YELLOW}πŸ”''${NC}" else "" end)\(if .enableSSL then " ''${GREEN}πŸ”’''${NC}" else "" end)"' "$CONFIG_FILE" 2>/dev/null + + echo -e "\n''${BOLD}Legend:''${NC} ''${YELLOW}πŸ”''${NC} Auth enabled, ''${GREEN}πŸ”’''${NC} SSL enabled" + echo -e "''${BOLD}Total: ''${proxy_count} proxy entries''${NC}" + else + echo -e " ''${YELLOW}No proxy entries configured''${NC}" + fi + ''; +in { + options.homelab.motd = { + enable = mkEnableOption "Simple homelab MOTD"; + + clearScreen = mkOption { + type = types.bool; + default = true; + description = "Clear screen before showing MOTD"; + }; + + showServices = mkOption { + type = types.bool; + default = true; + description = "Show local homelab services status"; + }; + + services = mkOption { + type = types.attrsOf (types.submodule { + options = { + systemdService = mkOption { + type = types.str; + description = "Name of the systemd service to monitor"; + }; + description = mkOption { + type = types.str; + default = ""; + description = "Human-readable description of the service"; + }; + }; + }); + default = {}; + description = "Local homelab services to show in MOTD"; + example = literalExpression '' + { + "nginx" = { + systemdService = "nginx"; + description = "Web server"; + }; + "grafana" = { + systemdService = "grafana"; + description = "Monitoring dashboard"; + }; + } + ''; + }; + }; + + config = mkIf cfg.enable { + # Create helper commands + environment.systemPackages = with pkgs; [ + jq + homelab-motd + homelab-monitor-status + homelab-backup-status + homelab-proxy-status + ]; + + # Set up MOTD to run on login + programs.bash.interactiveShellInit = '' + # Run homelab MOTD on interactive login (only once per session) + if [[ $- == *i* ]] && [[ -z "$MOTD_SHOWN" ]] && [[ -n "$SSH_CONNECTION" || "$TERM" == "linux" ]]; then + export MOTD_SHOWN=1 + ${homelab-motd}/bin/homelab-motd + fi + ''; + + # Disable default MOTD + users.motd = mkDefault ""; + security.pam.services.login.showMotd = mkDefault false; + }; +} diff --git a/modules/homelab/proxy-config.nix b/modules/homelab/proxy-config.nix new file mode 100644 index 0000000..e7236d8 --- /dev/null +++ b/modules/homelab/proxy-config.nix @@ -0,0 +1,53 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.reverseProxy; + homelabCfg = config.homelab; + + reverseProxyEntryType = types.submodule { + options = { + subdomain = mkOption { + type = types.str; + description = "Subdomain for the service"; + }; + host = mkOption { + type = types.str; + description = "Host to proxy to"; + default = "${homelabCfg.hostname}.${homelabCfg.domain}"; + }; + port = mkOption { + type = types.port; + description = "Port to proxy to"; + }; + path = mkOption { + type = types.str; + default = "/"; + description = "Path prefix for the service"; + }; + enableAuth = mkOption { + type = types.bool; + default = false; + description = "Enable authentication for this service"; + }; + enableSSL = mkOption { + type = types.bool; + default = true; + description = "Enable SSL for this service"; + }; + }; + }; +in { + options.homelab.reverseProxy = { + entries = mkOption { + type = types.listOf reverseProxyEntryType; + default = []; + description = "Reverse proxy entries for this system"; + }; + }; + + config = { + }; +} diff --git a/modules/homelab/services/default.nix b/modules/homelab/services/default.nix new file mode 100644 index 0000000..2847a3c --- /dev/null +++ b/modules/homelab/services/default.nix @@ -0,0 +1,7 @@ +{ + imports = [ + ./minio.nix + ./monitoring/gatus.nix + ./monitoring/prometheus.nix + ]; +} diff --git a/modules/homelab/services/example-service.nix b/modules/homelab/services/example-service.nix new file mode 100644 index 0000000..df59348 --- /dev/null +++ b/modules/homelab/services/example-service.nix @@ -0,0 +1,161 @@ +# Example showing how to create a service using the standard interface +{ + config, + lib, + pkgs, + ... +}: +with lib; let + serviceInterface = import ../lib/service-interface.nix {inherit lib;}; + + cfg = config.homelab.services.grafana; + homelabCfg = config.homelab; + + # Service-specific options beyond the standard interface + grafanaServiceOptions = { + domain = mkOption { + type = types.str; + default = "grafana.${homelabCfg.externalDomain}"; + description = "Domain for Grafana"; + }; + + rootUrl = mkOption { + type = types.str; + default = "https://grafana.${homelabCfg.externalDomain}"; + description = "Root URL for Grafana"; + }; + + dataDir = serviceInterface.commonOptions.dataDir "grafana"; + + admin = { + user = mkOption { + type = types.str; + default = "admin"; + description = "Admin username"; + }; + + password = mkOption { + type = types.str; + default = "admin"; + description = "Admin password"; + }; + }; + + datasources = { + prometheus = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable Prometheus datasource"; + }; + + url = mkOption { + type = types.str; + default = "http://localhost:9090"; + description = "Prometheus URL"; + }; + }; + }; + + plugins = mkOption { + type = types.listOf types.package; + default = []; + description = "Grafana plugins to install"; + }; + }; +in { + options.homelab.services.grafana = serviceInterface.mkServiceInterface { + serviceName = "grafana"; + defaultPort = 3000; + defaultSubdomain = "grafana"; + monitoringPath = "/metrics"; + healthCheckPath = "/api/health"; + healthCheckConditions = [ + "[STATUS] == 200" + "[BODY].database == ok" + "[RESPONSE_TIME] < 2000" + ]; + serviceOptions = grafanaServiceOptions; + }; + + config = serviceInterface.mkServiceConfig { + inherit config cfg homelabCfg; + serviceName = "grafana"; + + extraMonitoringLabels = { + component = "dashboard"; + }; + + customHealthChecks = [ + { + name = "grafana-login"; + port = cfg.port; + path = "/login"; + interval = "60s"; + conditions = [ + "[STATUS] == 200" + "[RESPONSE_TIME] < 3000" + ]; + group = "monitoring"; + labels = { + service = "grafana"; + component = "login"; + }; + } + ]; + + serviceConfig = { + services.grafana = { + enable = true; + dataDir = cfg.dataDir; + declarativePlugins = cfg.plugins; + + settings = { + server = { + http_port = cfg.port; + http_addr = "0.0.0.0"; + domain = cfg.domain; + root_url = cfg.rootUrl; + }; + + security = { + admin_user = cfg.admin.user; + admin_password = cfg.admin.password; + }; + }; + + provision = { + enable = true; + datasources.settings.datasources = mkIf cfg.datasources.prometheus.enable [ + { + name = "Prometheus"; + type = "prometheus"; + url = cfg.datasources.prometheus.url; + isDefault = true; + } + ]; + }; + }; + }; + }; +} +# Usage example in your configuration: +/* +{ + homelab.services.grafana = { + enable = true; + # Standard interface options: + port = 3000; # Optional: defaults to 3000 + openFirewall = true; # Optional: defaults to true + proxy.subdomain = "grafana"; # Optional: defaults to "grafana" + proxy.enableAuth = false; # Optional: defaults to false + monitoring.enable = true; # Optional: defaults to true + + # Service-specific options: + admin.password = "secure-password"; + datasources.prometheus.url = "http://prometheus.lab:9090"; + plugins = with pkgs.grafanaPlugins; [ grafana-piechart-panel ]; + }; +} +*/ + diff --git a/modules/homelab/services/jellyfin.nix b/modules/homelab/services/jellyfin.nix new file mode 100644 index 0000000..1aac7e5 --- /dev/null +++ b/modules/homelab/services/jellyfin.nix @@ -0,0 +1,125 @@ +# modules/services/jellyfin.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.services.jellyfin; +in { + options.services.jellyfin = { + enable = mkEnableOption "Jellyfin media server"; + + port = mkOption { + type = types.port; + default = 8096; + description = "Port for Jellyfin web interface"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/jellyfin"; + description = "Directory to store Jellyfin data"; + }; + + mediaDir = mkOption { + type = types.str; + default = "/media"; + description = "Directory containing media files"; + }; + + enableMetrics = mkOption { + type = types.bool; + default = true; + description = "Enable Prometheus metrics"; + }; + + exposeWeb = mkOption { + type = types.bool; + default = true; + description = "Expose web interface through reverse proxy"; + }; + }; + + config = mkIf cfg.enable { + # Enable the service + services.jellyfin = { + enable = true; + dataDir = cfg.dataDir; + }; + + # Configure global settings + homelab.global = { + # Add backup job for Jellyfin data + backups.jobs = [ + { + name = "jellyfin-config"; + backend = "restic"; + paths = ["${cfg.dataDir}/config" "${cfg.dataDir}/data"]; + schedule = "0 2 * * *"; # Daily at 2 AM + excludePatterns = [ + "*/cache/*" + "*/transcodes/*" + "*/logs/*" + ]; + preHook = '' + # Stop jellyfin for consistent backup + systemctl stop jellyfin + ''; + postHook = '' + # Restart jellyfin after backup + systemctl start jellyfin + ''; + } + { + name = "jellyfin-media"; + backend = "restic"; + paths = [cfg.mediaDir]; + schedule = "0 3 * * 0"; # Weekly on Sunday at 3 AM + excludePatterns = [ + "*.tmp" + "*/.@__thumb/*" # Synology thumbnails + ]; + } + ]; + + # Add reverse proxy entry if enabled + reverseProxy.entries = mkIf cfg.exposeWeb [ + { + subdomain = "jellyfin"; + port = cfg.port; + enableAuth = false; # Jellyfin has its own auth + websockets = true; + customHeaders = { + "X-Forwarded-Proto" = "$scheme"; + "X-Forwarded-Host" = "$host"; + }; + } + ]; + + # Add monitoring endpoint if metrics enabled + monitoring.endpoints = mkIf cfg.enableMetrics [ + { + name = "jellyfin"; + port = cfg.port; + path = "/metrics"; # Assuming you have a metrics plugin + jobName = "jellyfin"; + scrapeInterval = "60s"; + labels = { + service = "jellyfin"; + type = "media-server"; + }; + } + ]; + }; + + # Open firewall + networking.firewall.allowedTCPPorts = [cfg.port]; + + # Create media directory + systemd.tmpfiles.rules = [ + "d ${cfg.mediaDir} 0755 jellyfin jellyfin -" + ]; + }; +} diff --git a/modules/homelab/services/minio.nix b/modules/homelab/services/minio.nix new file mode 100644 index 0000000..cebdd50 --- /dev/null +++ b/modules/homelab/services/minio.nix @@ -0,0 +1,66 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + service = "minio"; + cfg = config.homelab.services.${service}; + homelabCfg = config.homelab; +in { + options.homelab.services.${service} = { + enable = mkEnableOption "Minio Object Storage"; + + port = mkOption { + default = 9000; + type = types.port; + description = "Port of the server."; + }; + + webPort = mkOption { + default = 9001; + type = types.port; + description = "Port of the web UI (console)."; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = '' + Whether to open the ports specified in `port` and `webPort` in the firewall. + ''; + }; + }; + + config = mkIf cfg.enable { + sops.secrets."ente/minio/root_user" = {}; + sops.secrets."ente/minio/root_password" = {}; + + sops.templates."minio-root-credentials".content = '' + MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} + MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} + ''; + + services.minio = { + enable = true; + rootCredentialsFile = config.sops.templates."minio-root-credentials".path; + }; + + networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [cfg.port cfg.webPort]; + + homelab.reverseProxy.entries = [ + { + subdomain = "${service}-api"; + port = cfg.port; + } + { + subdomain = "${service}"; + port = cfg.webPort; + } + ]; + + # https://min.io/docs/minio/linux/operations/monitoring/collect-minio-metrics-using-prometheus.html + # metrics and monitoring... + }; +} diff --git a/modules/homelab/services/monitoring/alertmanager.nix b/modules/homelab/services/monitoring/alertmanager.nix new file mode 100644 index 0000000..b8da33d --- /dev/null +++ b/modules/homelab/services/monitoring/alertmanager.nix @@ -0,0 +1,237 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.alertmanager; + homelabCfg = config.homelab; + + # Default alertmanager configuration + defaultConfig = { + global = { + smtp_smarthost = cfg.smtp.host; + smtp_from = cfg.smtp.from; + smtp_auth_username = cfg.smtp.username; + smtp_auth_password = cfg.smtp.password; + }; + + # Inhibit rules to prevent spam + inhibit_rules = [ + { + source_match = { + severity = "critical"; + }; + target_match = { + severity = "warning"; + }; + equal = ["alertname" "dev" "instance"]; + } + ]; + + route = { + group_by = ["alertname"]; + group_wait = "10s"; + group_interval = "10s"; + repeat_interval = "1h"; + receiver = "web.hook"; + routes = cfg.routes; + }; + + receivers = + [ + { + name = "web.hook"; + webhook_configs = [ + { + url = "http://127.0.0.1:5001/"; + } + ]; + } + ] + ++ cfg.receivers; + }; + + # Merge with user config + alertmanagerConfig = recursiveUpdate defaultConfig cfg.extraConfig; +in { + options.homelab.services.alertmanager = { + enable = mkEnableOption "Alertmanager for handling alerts"; + + port = mkOption { + type = types.port; + default = 9093; + description = "Port for Alertmanager web interface"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Whether to open firewall ports"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/alertmanager"; + description = "Directory to store Alertmanager data"; + }; + + smtp = { + host = mkOption { + type = types.str; + default = "localhost:587"; + description = "SMTP server host:port"; + }; + + from = mkOption { + type = types.str; + default = "alertmanager@${homelabCfg.externalDomain}"; + description = "From email address"; + }; + + username = mkOption { + type = types.str; + default = ""; + description = "SMTP username"; + }; + + password = mkOption { + type = types.str; + default = ""; + description = "SMTP password"; + }; + }; + + routes = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional routing rules"; + example = literalExpression '' + [ + { + match = { + service = "gatus"; + }; + receiver = "discord-webhook"; + } + { + match = { + severity = "critical"; + }; + receiver = "email-alerts"; + } + ] + ''; + }; + + receivers = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Alert receivers configuration"; + example = literalExpression '' + [ + { + name = "email-alerts"; + email_configs = [{ + to = "admin@example.com"; + subject = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}"; + body = "{{ range .Alerts }}{{ .Annotations.description }}{{ end }}"; + }]; + } + { + name = "discord-webhook"; + webhook_configs = [{ + url = "https://discord.com/api/webhooks/..."; + title = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}"; + }]; + } + ] + ''; + }; + + extraConfig = mkOption { + type = types.attrs; + default = {}; + description = "Additional Alertmanager configuration"; + }; + + webExternalUrl = mkOption { + type = types.str; + default = "https://alertmanager.${homelabCfg.externalDomain}"; + description = "External URL for Alertmanager web interface"; + }; + }; + + config = mkIf cfg.enable { + services.prometheus.alertmanager = { + enable = true; + port = cfg.port; + listenAddress = "0.0.0.0"; + webExternalUrl = cfg.webExternalUrl; + dataDir = cfg.dataDir; + + # Write configuration to file + configuration = alertmanagerConfig; + }; + + # Open firewall if requested + networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; + + # Add to monitoring endpoints + homelab.monitoring.metrics = [ + { + name = "alertmanager"; + port = cfg.port; + path = "/metrics"; + jobName = "alertmanager"; + labels = { + service = "alertmanager"; + component = "monitoring"; + }; + } + ]; + + # Add health checks + homelab.monitoring.healthChecks = [ + { + name = "alertmanager-web-interface"; + port = cfg.port; + path = "/-/healthy"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + "[RESPONSE_TIME] < 1000" + ]; + group = "monitoring"; + labels = { + service = "alertmanager"; + component = "web-interface"; + }; + } + { + name = "alertmanager-ready"; + port = cfg.port; + path = "/-/ready"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + ]; + group = "monitoring"; + labels = { + service = "alertmanager"; + component = "readiness"; + }; + } + ]; + + # Add reverse proxy entry + homelab.reverseProxy.entries = [ + { + subdomain = "alertmanager"; + host = homelabCfg.hostname; + port = cfg.port; + } + ]; + }; +} diff --git a/modules/homelab/services/monitoring/alertmanager_new.nix b/modules/homelab/services/monitoring/alertmanager_new.nix new file mode 100644 index 0000000..f64d7c3 --- /dev/null +++ b/modules/homelab/services/monitoring/alertmanager_new.nix @@ -0,0 +1,326 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.alertmanager; + homelabCfg = config.homelab; + + # Build alertmanager configuration + alertmanagerConfig = { + route = { + receiver = cfg.defaultReceiver; + group_by = cfg.groupBy; + group_wait = cfg.groupWait; + group_interval = cfg.groupInterval; + repeat_interval = cfg.repeatInterval; + routes = cfg.routes; + }; + + receivers = + [ + {name = cfg.defaultReceiver;} + ] + ++ cfg.receivers; + + inhibit_rules = cfg.inhibitRules; + + templates = cfg.templates; + }; +in { + options.homelab.services.alertmanager = { + enable = mkEnableOption "Alertmanager for handling alerts"; + + port = mkOption { + type = types.port; + default = 9093; + description = "Port for Alertmanager web interface"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Whether to open firewall ports"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/alertmanager"; + description = "Directory to store Alertmanager data"; + }; + + webExternalUrl = mkOption { + type = types.str; + default = "http://${homelabCfg.hostname}.${homelabCfg.domain}:${toString cfg.port}"; + description = "External URL for Alertmanager web interface"; + }; + + environmentFile = mkOption { + type = types.nullOr types.path; + default = null; + description = "Environment file for secrets (e.g., Telegram bot token)"; + example = "/run/secrets/alertmanager-env"; + }; + + # Routing configuration + defaultReceiver = mkOption { + type = types.str; + default = "null"; + description = "Default receiver for unmatched alerts"; + }; + + groupBy = mkOption { + type = types.listOf types.str; + default = ["alertname"]; + description = "Labels to group alerts by"; + }; + + groupWait = mkOption { + type = types.str; + default = "10s"; + description = "Time to wait before sending initial notification"; + }; + + groupInterval = mkOption { + type = types.str; + default = "5m"; + description = "Time to wait before sending updates for a group"; + }; + + repeatInterval = mkOption { + type = types.str; + default = "4h"; + description = "Time to wait before re-sending an alert"; + }; + + routes = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Alert routing rules"; + example = literalExpression '' + [ + { + receiver = "telegram"; + matchers = ["severity =~ \"warning|critical\""]; + group_wait = "10s"; + continue = true; + } + ] + ''; + }; + + receivers = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Alert receivers configuration"; + example = literalExpression '' + [ + { + name = "telegram"; + telegram_configs = [{ + api_url = "https://api.telegram.org"; + bot_token = "$TELEGRAM_BOT_TOKEN"; + chat_id = -1002642560007; + message_thread_id = 4; + parse_mode = "HTML"; + send_resolved = true; + message = "{{ template \"telegram.message\" . }}"; + }]; + } + ] + ''; + }; + + inhibitRules = mkOption { + type = types.listOf types.attrs; + default = [ + { + source_match = {severity = "critical";}; + target_match = {severity = "warning";}; + equal = ["alertname" "instance"]; + } + ]; + description = "Rules for inhibiting alerts"; + }; + + templates = mkOption { + type = types.listOf types.path; + default = []; + description = "Template files for alert formatting"; + example = literalExpression '' + [ + (pkgs.writeText "telegram.tmpl" ''' + {{- define "telegram.message" -}} + {{- if gt (len .Alerts.Firing) 0 -}} + πŸ”₯ FIRING πŸ”₯ + {{- range .Alerts.Firing }} + {{ .Annotations.summary }} + {{ .Annotations.description }} + {{- end }} + {{- end }} + {{- if gt (len .Alerts.Resolved) 0 -}} + βœ… RESOLVED βœ… + {{- range .Alerts.Resolved }} + {{ .Annotations.summary }} + {{- end }} + {{- end }} + {{- end -}} + ''') + ] + ''; + }; + + # Convenience options for common receivers + telegram = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable Telegram notifications"; + }; + + botToken = mkOption { + type = types.str; + default = "$TELEGRAM_BOT_TOKEN"; + description = "Telegram bot token (use environment variable)"; + }; + + chatId = mkOption { + type = types.int; + description = "Telegram chat ID"; + example = -1002642560007; + }; + + messageThreadId = mkOption { + type = types.nullOr types.int; + default = null; + description = "Telegram message thread ID (for forum groups)"; + }; + + template = mkOption { + type = types.str; + default = "telegram.message"; + description = "Template to use for Telegram messages"; + }; + }; + + discord = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable Discord notifications"; + }; + + webhookUrl = mkOption { + type = types.str; + default = "$DISCORD_WEBHOOK_URL"; + description = "Discord webhook URL (use environment variable)"; + }; + + username = mkOption { + type = types.str; + default = "Alertmanager"; + description = "Discord bot username"; + }; + }; + }; + + config = mkIf cfg.enable { + services.prometheus.alertmanager = { + enable = true; + port = cfg.port; + listenAddress = "0.0.0.0"; + openFirewall = cfg.openFirewall; + webExternalUrl = cfg.webExternalUrl; + dataDir = cfg.dataDir; + environmentFile = cfg.environmentFile; + configuration = alertmanagerConfig; + }; + + # Auto-configure Telegram and Discord receiver if enabled + homelab.services.alertmanager.receivers = [ + (optional cfg.telegram.enable { + name = "telegram"; + telegram_configs = [ + { + api_url = "https://api.telegram.org"; + bot_token = cfg.telegram.botToken; + chat_id = cfg.telegram.chatId; + message_thread_id = cfg.telegram.messageThreadId; + parse_mode = "HTML"; + send_resolved = true; + message = "{{ template \"${cfg.telegram.template}\" . }}"; + } + ]; + }) + (optional cfg.discord.enable { + name = "discord"; + discord_configs = [ + { + webhook_url = cfg.discord.webhookUrl; + username = cfg.discord.username; + send_resolved = true; + } + ]; + }) + ]; + + # Auto-configure routes for convenience receivers + homelab.services.alertmanager.routes = + (optional cfg.telegram.enable { + receiver = "telegram"; + matchers = ["severity =~ \"warning|critical\""]; + group_wait = "10s"; + continue = true; + }) + ++ (optional cfg.discord.enable { + receiver = "discord"; + matchers = ["severity =~ \"warning|critical\""]; + group_wait = "10s"; + continue = true; + }); + + # Add to monitoring endpoints + homelab.monitoring.metrics = [ + { + name = "alertmanager"; + port = cfg.port; + path = "/metrics"; + jobName = "alertmanager"; + labels = { + service = "alertmanager"; + component = "monitoring"; + }; + } + ]; + + # Add health checks + homelab.monitoring.healthChecks = [ + { + name = "alertmanager-web-interface"; + port = cfg.port; + path = "/-/healthy"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + "[RESPONSE_TIME] < 1000" + ]; + group = "monitoring"; + labels = { + service = "alertmanager"; + component = "web-interface"; + }; + } + ]; + + # Add reverse proxy entry + homelab.reverseProxy.entries = [ + { + subdomain = "alertmanager"; + host = homelabCfg.hostname; + port = cfg.port; + } + ]; + }; +} diff --git a/modules/homelab/services/monitoring/example.nix b/modules/homelab/services/monitoring/example.nix new file mode 100644 index 0000000..a1ab301 --- /dev/null +++ b/modules/homelab/services/monitoring/example.nix @@ -0,0 +1,148 @@ +# Example configuration showing how to use the monitoring stack +# with the homelab.global approach for dynamic discovery +{ + config, + pkgs, + ... +}: { + # Import the monitoring services + imports = [ + ./services/prometheus.nix + ./services/alertmanager.nix + ./services/grafana.nix + ./services/monitoring-stack.nix + ]; + + # Enable the full monitoring stack + homelab.services.monitoring-stack.enable = true; + + # Configure Prometheus - it will automatically discover scrape targets + # from homelab.global.monitoring.allMetrics + homelab.services.prometheus = { + enable = true; + port = 9090; + retention = "7d"; + + # Optional: Add custom scrape configs if needed + extraScrapeConfigs = [ + # Any additional manual scrape configs can go here + # but most should be discovered via homelab.monitoring.metrics + ]; + + # Optional: Add custom alerting rules + extraAlertingRules = [ + # Custom alert groups can be added here + ]; + + # Optional: Add external rule files + ruleFiles = [ + # ./path/to/custom-rules.yml + ]; + }; + + # Configure Alertmanager with Telegram support (like your original) + homelab.services.alertmanager = { + enable = true; + port = 9093; + + # Use sops secrets for environment variables + environmentFile = config.sops.secrets."alertmanager/env".path; + + # Enable Telegram notifications + telegram = { + enable = true; + botToken = "$TELEGRAM_BOT_TOKEN"; # From environment file + chatId = -1002642560007; + messageThreadId = 4; + }; + + # Custom templates (similar to your setup) + templates = [ + (pkgs.writeText "telegram.tmpl" '' + {{- define "telegram.message" -}} + {{- if gt (len .Alerts.Firing) 0 -}} + πŸ”₯ FIRING πŸ”₯ + {{- range .Alerts.Firing }} + {{ .Annotations.summary }} + {{ .Annotations.description }} + {{- end }} + {{- end }} + {{- if gt (len .Alerts.Resolved) 0 -}} + βœ… RESOLVED βœ… + {{- range .Alerts.Resolved }} + {{ .Annotations.summary }} + {{- end }} + {{- end }} + {{- end -}} + '') + ]; + }; + + # Configure Grafana with data sources (similar to your setup) + homelab.services.grafana = { + enable = true; + port = 3000; + domain = "grafana.procopius.dk"; + rootUrl = "https://grafana.procopius.dk"; + + # Add grafana user to influxdb2 group for accessing secrets + extraGroups = ["influxdb2"]; + + # Enable data sources + datasources = { + prometheus.enable = true; + loki.enable = true; + influxdb = { + enable = true; + database = "proxmox"; + tokenPath = config.sops.secrets."influxdb/token".path; + }; + }; + + # Provision dashboards (similar to your environment.etc approach) + dashboards.files = [ + { + name = "traefik"; + source = ./dashboards/traefik.json; + } + { + name = "traefik-access"; + source = ./dashboards/traefik-access.json; + } + { + name = "grafana-traefik"; + source = ./dashboards/grafana-traefik.json; + } + { + name = "node-exporter"; + source = ./dashboards/node-exporter.json; + } + { + name = "promtail"; + source = ./dashboards/promtail.json; + } + { + name = "gitea"; + source = ./dashboards/gitea.json; + } + { + name = "postgres"; + source = ./dashboards/postgres.json; + } + { + name = "gatus"; + source = ./dashboards/gatus.json; + } + ]; + }; + + # Configure sops secrets (keep your existing setup) + sops.secrets."alertmanager/env" = { + sopsFile = ../../secrets/secrets.yaml; + mode = "0440"; + }; + + # All services automatically register with homelab.monitoring.metrics + # and homelab.monitoring.healthChecks for Gatus monitoring + # All services automatically get reverse proxy entries +} diff --git a/modules/homelab/services/monitoring/gatus.nix b/modules/homelab/services/monitoring/gatus.nix new file mode 100644 index 0000000..8d1f20f --- /dev/null +++ b/modules/homelab/services/monitoring/gatus.nix @@ -0,0 +1,244 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.services.gatus; + homelabCfg = config.homelab; + + # Convert our health check format to Gatus format + formatHealthCheck = check: let + # Build the URL + url = check._url; + + # Convert conditions to Gatus format (they should already be compatible) + conditions = check.conditions or ["[STATUS] == 200"]; + + # Convert alerts to Gatus format + alerts = map (alert: { + inherit (alert) type enabled; + failure-threshold = alert.failure-threshold or 3; + success-threshold = alert.success-threshold or 2; + description = "Health check alert for ${check.name}"; + }) (check.alerts or []); + in { + name = check.name; + group = check.group or "default"; + url = url; + interval = check.interval or "30s"; + + # Add method and headers for HTTP/HTTPS checks + method = + if (check.protocol == "http" || check.protocol == "https") + then check.method or "GET" + else null; + + conditions = conditions; + + # Add timeout + client = { + timeout = check.timeout or "10s"; + }; + + # Add alerts if configured + alerts = + if alerts != [] + then alerts + else []; + + # Add labels for UI organization + ui = { + hide-hostname = false; + hide-url = false; + description = "Health check for ${check.name} on ${check._nodeName}"; + }; + }; + + # Generate Gatus configuration + gatusConfig = { + # Global Gatus settings + alerting = mkIf (cfg.alerting != {}) cfg.alerting; + + web = { + address = "0.0.0.0"; + port = cfg.port; + }; + + # TODO: Introduce monitor option to toggle monitoring + metrics = true; + + ui = { + title = cfg.ui.title; + header = cfg.ui.header; + link = cfg.ui.link; + buttons = cfg.ui.buttons; + }; + + storage = mkIf (cfg.storage != {}) cfg.storage; + + # Convert all enabled health checks to Gatus endpoints + endpoints = let + # Get all health checks from global config + allHealthChecks = homelabCfg.global.monitoring.enabledHealthChecks or []; + + # Group by group name for better organization + # groupedChecks = homelabCfg.global.monitoring.healthChecksByGroup or {}; + + # Convert to Gatus format + gatusEndpoints = map formatHealthCheck allHealthChecks; + in + gatusEndpoints; + }; +in { + options.homelab.services.gatus = { + enable = mkEnableOption "Gatus uptime monitoring service"; + + port = mkOption { + type = types.port; + default = 8080; + description = "Port for Gatus web interface"; + }; + + openFirewall = lib.mkOption { + type = lib.types.bool; + default = true; + description = '' + Whether to automatically open the specified ports in the firewall. + ''; + }; + + ui = { + title = mkOption { + type = types.str; + default = "Homelab Status"; + description = "Title for the Gatus web interface"; + }; + + header = mkOption { + type = types.str; + default = "Homelab Services Status"; + description = "Header text for the Gatus interface"; + }; + + link = mkOption { + type = types.str; + default = "https://gatus.${homelabCfg.externalDomain}"; + description = "Link in the Gatus header"; + }; + + buttons = mkOption { + type = types.listOf (types.submodule { + options = { + name = mkOption {type = types.str;}; + link = mkOption {type = types.str;}; + }; + }); + default = [ + { + name = "Grafana"; + link = "https://grafana.${homelabCfg.externalDomain}"; + } + { + name = "Prometheus"; + link = "https://prometheus.${homelabCfg.externalDomain}"; + } + ]; + description = "Navigation buttons in the Gatus interface"; + }; + }; + + alerting = mkOption { + type = types.attrs; + default = {}; + description = "Gatus alerting configuration"; + example = literalExpression '' + { + discord = { + webhook-url = "https://discord.com/api/webhooks/..."; + default-alert = { + enabled = true; + description = "Health check failed"; + failure-threshold = 3; + success-threshold = 2; + }; + }; + } + ''; + }; + + storage = mkOption { + type = types.attrs; + default = { + type = "memory"; + }; + description = "Gatus storage configuration"; + example = literalExpression '' + { + type = "postgres"; + path = "postgres://user:password@localhost/gatus?sslmode=disable"; + } + ''; + }; + + extraConfig = mkOption { + type = types.attrs; + default = {}; + description = "Additional Gatus configuration options"; + }; + }; + + config = mkIf cfg.enable { + services.gatus = { + enable = true; + openFirewall = cfg.openFirewall; + settings = gatusConfig; + }; + + # Add to monitoring endpoints + homelab.monitoring.metrics = [ + { + name = "gatus"; + port = cfg.port; + path = "/metrics"; + jobName = "gatus"; + labels = { + service = "gatus"; + component = "monitoring"; + }; + } + ]; + + # Add health check for Gatus itself + homelab.monitoring.healthChecks = [ + { + name = "gatus-web-interface"; + port = cfg.port; + path = "/health"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + "[BODY].status == UP" + "[RESPONSE_TIME] < 1000" + ]; + group = "monitoring"; + labels = { + service = "gatus"; + component = "web-interface"; + }; + } + ]; + + # Add reverse proxy entry if needed + homelab.reverseProxy.entries = [ + { + subdomain = "status"; + host = homelabCfg.hostname; + port = cfg.port; + # path = "/"; + # enableAuth = false; # Status page should be publicly accessible + # enableSSL = true; + } + ]; + }; +} diff --git a/modules/homelab/services/monitoring/grafana.nix b/modules/homelab/services/monitoring/grafana.nix new file mode 100644 index 0000000..64650cf --- /dev/null +++ b/modules/homelab/services/monitoring/grafana.nix @@ -0,0 +1,416 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.grafana; + homelabCfg = config.homelab; + + # Default dashboards for homelab monitoring + defaultDashboards = { + "node-exporter" = pkgs.fetchurl { + url = "https://grafana.com/api/dashboards/1860/revisions/37/download"; + sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this + }; + "prometheus-stats" = pkgs.fetchurl { + url = "https://grafana.com/api/dashboards/2/revisions/2/download"; + sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this + }; + }; + + # Grafana provisioning configuration + provisioningConfig = { + # Data sources + datasources = + [ + { + name = "Prometheus"; + type = "prometheus"; + access = "proxy"; + url = cfg.datasources.prometheus.url; + isDefault = true; + editable = false; + jsonData = { + timeInterval = "5s"; + queryTimeout = "60s"; + httpMethod = "POST"; + }; + } + ] + ++ cfg.datasources.extra; + + # Dashboard providers + dashboards = [ + { + name = "homelab"; + type = "file"; + disableDeletion = false; + updateIntervalSeconds = 10; + allowUiUpdates = true; + options = { + path = "/var/lib/grafana/dashboards"; + }; + } + ]; + + # Notification channels + notifiers = cfg.notifications; + }; +in { + options.homelab.services.grafana = { + enable = mkEnableOption "Grafana dashboard service"; + + port = mkOption { + type = types.port; + default = 3000; + description = "Port for Grafana web interface"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Whether to open firewall ports"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/grafana"; + description = "Directory to store Grafana data"; + }; + + domain = mkOption { + type = types.str; + default = "grafana.${homelabCfg.externalDomain}"; + description = "Domain for Grafana"; + }; + + rootUrl = mkOption { + type = types.str; + default = "https://grafana.${homelabCfg.externalDomain}"; + description = "Root URL for Grafana"; + }; + + admin = { + user = mkOption { + type = types.str; + default = "admin"; + description = "Admin username"; + }; + + password = mkOption { + type = types.str; + default = "admin"; + description = "Admin password (change this!)"; + }; + + email = mkOption { + type = types.str; + default = "admin@${homelabCfg.externalDomain}"; + description = "Admin email"; + }; + }; + + datasources = { + prometheus = { + url = mkOption { + type = types.str; + default = "http://localhost:9090"; + description = "Prometheus URL"; + }; + }; + + extra = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional data sources"; + example = literalExpression '' + [ + { + name = "Loki"; + type = "loki"; + url = "http://localhost:3100"; + } + ] + ''; + }; + }; + + notifications = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Notification channels configuration"; + example = literalExpression '' + [ + { + name = "discord-webhook"; + type = "discord"; + settings = { + url = "https://discord.com/api/webhooks/..."; + username = "Grafana"; + }; + } + ] + ''; + }; + + plugins = mkOption { + type = types.listOf types.str; + default = [ + "grafana-piechart-panel" + "grafana-worldmap-panel" + "grafana-clock-panel" + "grafana-simple-json-datasource" + ]; + description = "Grafana plugins to install"; + }; + + smtp = { + enabled = mkOption { + type = types.bool; + default = false; + description = "Enable SMTP for email notifications"; + }; + + host = mkOption { + type = types.str; + default = "localhost:587"; + description = "SMTP server host:port"; + }; + + user = mkOption { + type = types.str; + default = ""; + description = "SMTP username"; + }; + + password = mkOption { + type = types.str; + default = ""; + description = "SMTP password"; + }; + + fromAddress = mkOption { + type = types.str; + default = "grafana@${homelabCfg.externalDomain}"; + description = "From email address"; + }; + + fromName = mkOption { + type = types.str; + default = "Homelab Grafana"; + description = "From name"; + }; + }; + + security = { + allowEmbedding = mkOption { + type = types.bool; + default = false; + description = "Allow embedding Grafana in iframes"; + }; + + cookieSecure = mkOption { + type = types.bool; + default = true; + description = "Set secure flag on cookies"; + }; + + secretKey = mkOption { + type = types.str; + default = "change-this-secret-key"; + description = "Secret key for signing (change this!)"; + }; + }; + + auth = { + anonymousEnabled = mkOption { + type = types.bool; + default = false; + description = "Enable anonymous access"; + }; + + disableLoginForm = mkOption { + type = types.bool; + default = false; + description = "Disable login form"; + }; + }; + + extraConfig = mkOption { + type = types.attrs; + default = {}; + description = "Additional Grafana configuration"; + }; + }; + + config = mkIf cfg.enable { + services.grafana = { + enable = true; + settings = + recursiveUpdate { + server = { + http_addr = "0.0.0.0"; + http_port = cfg.port; + domain = cfg.domain; + root_url = cfg.rootUrl; + serve_from_sub_path = false; + }; + + database = { + type = "sqlite3"; + path = "${cfg.dataDir}/grafana.db"; + }; + + security = { + admin_user = cfg.admin.user; + admin_password = cfg.admin.password; + admin_email = cfg.admin.email; + allow_embedding = cfg.security.allowEmbedding; + cookie_secure = cfg.security.cookieSecure; + secret_key = cfg.security.secretKey; + }; + + users = { + allow_sign_up = false; + auto_assign_org = true; + auto_assign_org_role = "Viewer"; + }; + + auth.anonymous = { + enabled = cfg.auth.anonymousEnabled; + org_name = "Homelab"; + org_role = "Viewer"; + }; + + auth.basic = { + enabled = !cfg.auth.disableLoginForm; + }; + + smtp = mkIf cfg.smtp.enabled { + enabled = true; + host = cfg.smtp.host; + user = cfg.smtp.user; + password = cfg.smtp.password; + from_address = cfg.smtp.fromAddress; + from_name = cfg.smtp.fromName; + }; + + analytics = { + reporting_enabled = false; + check_for_updates = false; + }; + + log = { + mode = "console"; + level = "info"; + }; + + paths = { + data = cfg.dataDir; + logs = "${cfg.dataDir}/log"; + plugins = "${cfg.dataDir}/plugins"; + provisioning = "/etc/grafana/provisioning"; + }; + } + cfg.extraConfig; + + dataDir = cfg.dataDir; + }; + + # Install plugins + systemd.services.grafana.preStart = mkIf (cfg.plugins != []) ( + concatStringsSep "\n" (map ( + plugin: "${pkgs.grafana}/bin/grafana-cli --pluginsDir ${cfg.dataDir}/plugins plugins install ${plugin} || true" + ) + cfg.plugins) + ); + + # Provisioning configuration + environment.etc = + { + "grafana/provisioning/datasources/datasources.yaml".text = builtins.toJSON { + apiVersion = 1; + datasources = provisioningConfig.datasources; + }; + + "grafana/provisioning/dashboards/dashboards.yaml".text = builtins.toJSON { + apiVersion = 1; + providers = provisioningConfig.dashboards; + }; + } + // (mkIf (cfg.notifications != []) { + "grafana/provisioning/notifiers/notifiers.yaml".text = builtins.toJSON { + apiVersion = 1; + notifiers = provisioningConfig.notifiers; + }; + }); + + # Create dashboard directory + systemd.tmpfiles.rules = [ + "d ${cfg.dataDir}/dashboards 0755 grafana grafana -" + ]; + + # Open firewall if requested + networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; + + # Add to monitoring endpoints + homelab.monitoring.metrics = [ + { + name = "grafana"; + port = cfg.port; + path = "/metrics"; + jobName = "grafana"; + labels = { + service = "grafana"; + component = "monitoring"; + }; + } + ]; + + # Add health checks + homelab.monitoring.healthChecks = [ + { + name = "grafana-web-interface"; + port = cfg.port; + path = "/api/health"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + "[BODY].database == ok" + "[RESPONSE_TIME] < 2000" + ]; + group = "monitoring"; + labels = { + service = "grafana"; + component = "web-interface"; + }; + } + { + name = "grafana-login-page"; + port = cfg.port; + path = "/login"; + interval = "60s"; + conditions = [ + "[STATUS] == 200" + "[RESPONSE_TIME] < 3000" + ]; + group = "monitoring"; + labels = { + service = "grafana"; + component = "login"; + }; + } + ]; + + # Add reverse proxy entry + homelab.reverseProxy.entries = [ + { + subdomain = "grafana"; + host = homelabCfg.hostname; + port = cfg.port; + } + ]; + }; +} diff --git a/modules/homelab/services/monitoring/grafana_new.nix b/modules/homelab/services/monitoring/grafana_new.nix new file mode 100644 index 0000000..a73eadb --- /dev/null +++ b/modules/homelab/services/monitoring/grafana_new.nix @@ -0,0 +1,369 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.grafana; + homelabCfg = config.homelab; + + # Dashboard provisioning + provisionDashboard = name: source: { + "grafana-dashboards/${name}.json" = { + inherit source; + user = "grafana"; + group = "grafana"; + mode = "0644"; + }; + }; + + # Generate all dashboard files + dashboardFiles = + fold ( + dashboard: acc: + acc // (provisionDashboard dashboard.name dashboard.source) + ) {} + cfg.dashboards.files; +in { + options.homelab.services.grafana = { + enable = mkEnableOption "Grafana dashboard service"; + + port = mkOption { + type = types.port; + default = 3000; + description = "Port for Grafana web interface"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Whether to open firewall ports"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/grafana"; + description = "Directory to store Grafana data"; + }; + + domain = mkOption { + type = types.str; + default = "grafana.${homelabCfg.externalDomain}"; + description = "Domain for Grafana"; + }; + + rootUrl = mkOption { + type = types.str; + default = "https://grafana.${homelabCfg.externalDomain}"; + description = "Root URL for Grafana"; + }; + + # Authentication settings + auth = { + disableLoginForm = mkOption { + type = types.bool; + default = false; + description = "Disable the login form"; + }; + + oauthAutoLogin = mkOption { + type = types.bool; + default = false; + description = "Enable OAuth auto-login"; + }; + + genericOauth = { + enabled = mkOption { + type = types.bool; + default = false; + description = "Enable generic OAuth"; + }; + }; + }; + + # Data source configuration + datasources = { + prometheus = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable Prometheus datasource"; + }; + + url = mkOption { + type = types.str; + default = "http://127.0.0.1:9090"; + description = "Prometheus URL"; + }; + + uid = mkOption { + type = types.str; + default = "prometheus"; + description = "Unique identifier for Prometheus datasource"; + }; + }; + + loki = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable Loki datasource"; + }; + + url = mkOption { + type = types.str; + default = "http://127.0.0.1:3100"; + description = "Loki URL"; + }; + + uid = mkOption { + type = types.str; + default = "loki"; + description = "Unique identifier for Loki datasource"; + }; + }; + + influxdb = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable InfluxDB datasource"; + }; + + url = mkOption { + type = types.str; + default = "http://127.0.0.1:8086"; + description = "InfluxDB URL"; + }; + + database = mkOption { + type = types.str; + default = "homelab"; + description = "InfluxDB database name"; + }; + + tokenPath = mkOption { + type = types.nullOr types.path; + default = null; + description = "Path to InfluxDB token file"; + }; + + uid = mkOption { + type = types.str; + default = "influxdb"; + description = "Unique identifier for InfluxDB datasource"; + }; + }; + + extra = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional data sources"; + }; + }; + + # Dashboard configuration + dashboards = { + path = mkOption { + type = types.str; + default = "/etc/grafana-dashboards"; + description = "Path to dashboard files"; + }; + + files = mkOption { + type = types.listOf (types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Dashboard name (without .json extension)"; + example = "node-exporter"; + }; + source = mkOption { + type = types.path; + description = "Path to dashboard JSON file"; + }; + }; + }); + default = []; + description = "Dashboard files to provision"; + example = literalExpression '' + [ + { + name = "node-exporter"; + source = ./dashboards/node-exporter.json; + } + { + name = "traefik"; + source = ./dashboards/traefik.json; + } + ] + ''; + }; + }; + + # Extra user groups for accessing secrets + extraGroups = mkOption { + type = types.listOf types.str; + default = []; + description = "Additional groups for the grafana user"; + example = ["influxdb2"]; + }; + + # Additional settings + extraSettings = mkOption { + type = types.attrs; + default = {}; + description = "Additional Grafana settings"; + }; + + plugins = mkOption { + type = types.listOf types.package; + default = []; + description = "Grafana plugins to install"; + example = literalExpression "with pkgs.grafanaPlugins; [ grafana-piechart-panel ]"; + }; + }; + + config = mkIf cfg.enable { + # Add grafana user to extra groups (e.g., for accessing secrets) + users.users.grafana.extraGroups = cfg.extraGroups; + + services.grafana = { + enable = true; + dataDir = cfg.dataDir; + declarativePlugins = cfg.plugins; + + settings = + recursiveUpdate { + server = { + http_port = cfg.port; + http_addr = "0.0.0.0"; + domain = cfg.domain; + root_url = cfg.rootUrl; + oauth_auto_login = cfg.auth.oauthAutoLogin; + }; + + "auth.generic_oauth" = { + enabled = cfg.auth.genericOauth.enabled; + }; + + auth = { + disable_login_form = cfg.auth.disableLoginForm; + }; + } + cfg.extraSettings; + + provision = { + enable = true; + + datasources.settings = { + datasources = let + # Build datasource list + datasources = + [] + ++ optional cfg.datasources.prometheus.enable { + uid = cfg.datasources.prometheus.uid; + name = "Prometheus"; + type = "prometheus"; + url = cfg.datasources.prometheus.url; + } + ++ optional cfg.datasources.loki.enable { + uid = cfg.datasources.loki.uid; + name = "Loki"; + type = "loki"; + url = cfg.datasources.loki.url; + } + ++ optional cfg.datasources.influxdb.enable { + uid = cfg.datasources.influxdb.uid; + name = "InfluxDB"; + type = "influxdb"; + url = cfg.datasources.influxdb.url; + access = "proxy"; + jsonData = { + dbName = cfg.datasources.influxdb.database; + httpHeaderName1 = "Authorization"; + }; + secureJsonData = mkIf (cfg.datasources.influxdb.tokenPath != null) { + httpHeaderValue1 = "$__file{${cfg.datasources.influxdb.tokenPath}}"; + }; + } + ++ cfg.datasources.extra; + in + datasources; + }; + + dashboards.settings.providers = mkIf (cfg.dashboards.files != []) [ + { + name = "homelab-dashboards"; + options.path = cfg.dashboards.path; + } + ]; + }; + }; + + # Open firewall if requested + networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; + + # Provision dashboard files + environment.etc = dashboardFiles; + + # Add to monitoring endpoints + homelab.monitoring.metrics = [ + { + name = "grafana"; + port = cfg.port; + path = "/metrics"; + jobName = "grafana"; + labels = { + service = "grafana"; + component = "monitoring"; + }; + } + ]; + + # Add health checks + homelab.monitoring.healthChecks = [ + { + name = "grafana-web-interface"; + port = cfg.port; + path = "/api/health"; + interval = "30s"; + conditions = [ + "[STATUS] == 200" + "[BODY].database == ok" + "[RESPONSE_TIME] < 2000" + ]; + group = "monitoring"; + labels = { + service = "grafana"; + component = "web-interface"; + }; + } + { + name = "grafana-login-page"; + port = cfg.port; + path = "/login"; + interval = "60s"; + conditions = [ + "[STATUS] == 200" + "[RESPONSE_TIME] < 3000" + ]; + group = "monitoring"; + labels = { + service = "grafana"; + component = "login"; + }; + } + ]; + + # Add reverse proxy entry + homelab.reverseProxy.entries = [ + { + subdomain = "grafana"; + host = homelabCfg.hostname; + port = cfg.port; + } + ]; + }; +} diff --git a/nixos/hosts/warpgate/host.nix b/modules/homelab/services/monitoring/influxdb.nix similarity index 100% rename from nixos/hosts/warpgate/host.nix rename to modules/homelab/services/monitoring/influxdb.nix diff --git a/modules/homelab/services/monitoring/loki.nix b/modules/homelab/services/monitoring/loki.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/homelab/services/monitoring/monitoring-stack.nix b/modules/homelab/services/monitoring/monitoring-stack.nix new file mode 100644 index 0000000..5275460 --- /dev/null +++ b/modules/homelab/services/monitoring/monitoring-stack.nix @@ -0,0 +1,60 @@ +{ + config, + lib, + ... +}: +with lib; let + cfg = config.homelab.services.monitoring-stack; +in { + imports = [ + ./prometheus.nix + ./alertmanager.nix + ./grafana.nix + ]; + + options.homelab.services.monitoring-stack = { + enable = mkEnableOption "Complete monitoring stack (Prometheus + Alertmanager + Grafana)"; + + prometheus = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable Prometheus"; + }; + }; + + alertmanager = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable Alertmanager"; + }; + }; + + grafana = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable Grafana"; + }; + }; + }; + + config = mkIf cfg.enable { + # Enable services based on configuration + homelab.services.prometheus.enable = mkDefault cfg.prometheus.enable; + homelab.services.alertmanager.enable = mkDefault cfg.alertmanager.enable; + homelab.services.grafana.enable = mkDefault cfg.grafana.enable; + + # Configure Prometheus to use Alertmanager if both are enabled + homelab.services.prometheus.alertmanager = mkIf (cfg.prometheus.enable && cfg.alertmanager.enable) { + enable = true; + url = "http://localhost:${toString config.homelab.services.alertmanager.port}"; + }; + + # Configure Grafana to use Prometheus if both are enabled + homelab.services.grafana.datasources.prometheus = mkIf (cfg.prometheus.enable && cfg.grafana.enable) { + url = "http://localhost:${toString config.homelab.services.prometheus.port}"; + }; + }; +} diff --git a/modules/homelab/services/monitoring/prometheus.nix b/modules/homelab/services/monitoring/prometheus.nix new file mode 100644 index 0000000..76c30ff --- /dev/null +++ b/modules/homelab/services/monitoring/prometheus.nix @@ -0,0 +1,203 @@ +{ + config, + lib, + pkgs, + ... +}: +with lib; let + serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; + + cfg = config.homelab.services.prometheus; + homelabCfg = config.homelab; + + # Generate Prometheus scrape configs from global monitoring data + prometheusScrapeConfigs = let + allMetrics = homelabCfg.global.monitoring.allMetrics or []; + jobGroups = groupBy (m: m.jobName) allMetrics; + + scrapeConfigs = + mapAttrsToList (jobName: endpoints: { + job_name = jobName; + scrape_interval = head endpoints.scrapeInterval or ["30s"]; + static_configs = [ + { + targets = map (endpoint: "${endpoint.host}:${toString endpoint.port}") endpoints; + labels = fold (endpoint: acc: acc // endpoint.labels) {} endpoints; + } + ]; + metrics_path = head endpoints.path or [null]; + }) + jobGroups; + in + scrapeConfigs; + + # Service-specific options beyond the standard interface + prometheusServiceOptions = { + retention = mkOption { + type = types.str; + default = "15d"; + description = "How long to retain metrics data"; + }; + + alertmanager = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable integration with Alertmanager"; + }; + + url = mkOption { + type = types.str; + default = "${homelabCfg.hostname}.${homelabCfg.domain}:9093"; + description = "Alertmanager URL"; + }; + }; + + extraScrapeConfigs = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional scrape configurations"; + }; + + extraAlertingRules = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional alerting rules"; + }; + + globalConfig = mkOption { + type = types.attrs; + default = { + scrape_interval = "15s"; + evaluation_interval = "15s"; + }; + description = "Global Prometheus configuration"; + }; + + extraFlags = mkOption { + type = types.listOf types.str; + default = []; + description = "Extra command line flags"; + }; + + ruleFiles = mkOption { + type = types.listOf types.path; + default = []; + description = "Additional rule files to load"; + }; + }; + + # Standard alerting rules + alertingRules = [ + { + name = "homelab.rules"; + rules = [ + { + alert = "InstanceDown"; + expr = "up == 0"; + for = "5m"; + labels = {severity = "critical";}; + annotations = { + summary = "Instance {{ $labels.instance }} down"; + description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."; + }; + } + { + alert = "HighCPUUsage"; + expr = "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100) > 80"; + for = "10m"; + labels = {severity = "warning";}; + annotations = { + summary = "High CPU usage on {{ $labels.instance }}"; + description = "CPU usage is above 80% for more than 10 minutes on {{ $labels.instance }}."; + }; + } + { + alert = "HighMemoryUsage"; + expr = "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100 > 85"; + for = "10m"; + labels = {severity = "warning";}; + annotations = { + summary = "High memory usage on {{ $labels.instance }}"; + description = "Memory usage is above 85% for more than 10 minutes on {{ $labels.instance }}."; + }; + } + { + alert = "DiskSpaceLow"; + expr = "((node_filesystem_size_bytes - node_filesystem_avail_bytes) / node_filesystem_size_bytes) * 100 > 90"; + for = "5m"; + labels = {severity = "critical";}; + annotations = { + summary = "Disk space low on {{ $labels.instance }}"; + description = "Disk usage is above 90% on {{ $labels.instance }} {{ $labels.mountpoint }}."; + }; + } + ]; + } + ]; +in { + options.homelab.services.prometheus = serviceInterface.mkServiceInterface { + serviceName = "prometheus"; + defaultPort = 9090; + defaultSubdomain = "prometheus"; + monitoringPath = "/metrics"; + healthCheckPath = "/-/healthy"; + healthCheckConditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; + serviceOptions = prometheusServiceOptions; + }; + + config = serviceInterface.mkServiceConfig { + inherit config cfg homelabCfg; + serviceName = "prometheus"; + + extraMonitoringLabels = { + component = "monitoring-server"; + }; + + customHealthChecks = [ + { + name = "prometheus-ready"; + port = cfg.port; + path = "/-/ready"; + interval = "30s"; + conditions = ["[STATUS] == 200"]; + group = "monitoring"; + labels = { + service = "prometheus"; + component = "readiness"; + }; + } + ]; + + serviceConfig = { + services.prometheus = { + enable = true; + port = cfg.port; + listenAddress = "0.0.0.0"; + retentionTime = cfg.retention; + + globalConfig = cfg.globalConfig; + extraFlags = cfg.extraFlags; + + scrapeConfigs = prometheusScrapeConfigs ++ cfg.extraScrapeConfigs; + + ruleFiles = + map (ruleGroup: + pkgs.writeText "${ruleGroup.name}.yml" (builtins.toJSON { + groups = [ruleGroup]; + })) (alertingRules ++ cfg.extraAlertingRules) + ++ cfg.ruleFiles; + + alertmanagers = mkIf cfg.alertmanager.enable [ + { + static_configs = [ + { + targets = [cfg.alertmanager.url]; + } + ]; + } + ]; + }; + }; + }; +} diff --git a/modules/homelab/services/monitoring/promtail.nix b/modules/homelab/services/monitoring/promtail.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/homelab/services/monitoring/tempo.nix b/modules/homelab/services/monitoring/tempo.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/homelab/services/postgres.nix b/modules/homelab/services/postgres.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/homelab/services/prometheus_old.nix b/modules/homelab/services/prometheus_old.nix new file mode 100644 index 0000000..9485b3a --- /dev/null +++ b/modules/homelab/services/prometheus_old.nix @@ -0,0 +1,208 @@ +# modules/services/prometheus.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.prometheus; + globalCfg = config.homelab.global; +in { + options.homelab.services.prometheus = { + enable = mkEnableOption "Prometheus monitoring server"; + + port = mkOption { + type = types.port; + default = 9090; + description = "Prometheus server port"; + }; + + webExternalUrl = mkOption { + type = types.str; + default = "http://${globalCfg.hostname}:${toString cfg.port}"; + description = "External URL for Prometheus"; + }; + + retention = mkOption { + type = types.str; + default = "30d"; + description = "Data retention period"; + }; + + scrapeConfigs = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional scrape configurations"; + }; + + alertmanager = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable Alertmanager integration"; + }; + + url = mkOption { + type = types.str; + default = "http://localhost:9093"; + description = "Alertmanager URL"; + }; + }; + }; + + config = mkIf cfg.enable { + # Register service with global homelab config + homelab.global.services.prometheus = { + enable = true; + description = "Metrics collection and monitoring server"; + category = "monitoring"; + ports = [cfg.port]; + tags = ["metrics" "monitoring" "alerting"]; + priority = 20; + dependencies = ["node-exporter"]; + }; + + # Configure the actual Prometheus service + services.prometheus = { + enable = true; + port = cfg.port; + webExternalUrl = cfg.webExternalUrl; + + retentionTime = cfg.retention; + + scrapeConfigs = + [ + # Auto-discover monitoring endpoints from global config + { + job_name = "homelab-auto"; + static_configs = [ + { + targets = + map ( + endpoint: "${globalCfg.hostname}:${toString endpoint.port}" + ) + globalCfg.monitoring.endpoints; + } + ]; + scrape_interval = "30s"; + metrics_path = "/metrics"; + } + ] + ++ cfg.scrapeConfigs; + + # Alertmanager configuration + alertmanagers = mkIf cfg.alertmanager.enable [ + { + static_configs = [ + { + targets = [cfg.alertmanager.url]; + } + ]; + } + ]; + + rules = [ + # Basic homelab alerting rules + (pkgs.writeText "homelab-alerts.yml" '' + groups: + - name: homelab + rules: + - alert: ServiceDown + expr: up == 0 + for: 5m + labels: + severity: critical + annotations: + summary: "Service {{ $labels.instance }} is down" + description: "{{ $labels.job }} on {{ $labels.instance }} has been down for more than 5 minutes." + + - alert: HighMemoryUsage + expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9 + for: 10m + labels: + severity: warning + annotations: + summary: "High memory usage on {{ $labels.instance }}" + description: "Memory usage is above 90% on {{ $labels.instance }}" + + - alert: HighDiskUsage + expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85 + for: 5m + labels: + severity: warning + annotations: + summary: "High disk usage on {{ $labels.instance }}" + description: "Disk usage is above 85% on {{ $labels.instance }} for filesystem {{ $labels.mountpoint }}" + '') + ]; + }; + + # Add monitoring endpoint to global config + homelab.global.monitoring.endpoints = [ + { + name = "prometheus"; + port = cfg.port; + path = "/metrics"; + jobName = "prometheus"; + scrapeInterval = "30s"; + labels = { + service = "prometheus"; + role = "monitoring"; + }; + } + ]; + + # Add reverse proxy entry if configured + homelab.global.reverseProxy.entries = mkIf (globalCfg.domain != null) [ + { + subdomain = "prometheus"; + port = cfg.port; + path = "/"; + enableAuth = true; + enableSSL = true; + customHeaders = { + "X-Frame-Options" = "DENY"; + "X-Content-Type-Options" = "nosniff"; + }; + } + ]; + + # Add backup job for Prometheus data + homelab.global.backups.jobs = [ + { + name = "prometheus-data"; + backend = "restic"; + paths = ["/var/lib/prometheus2"]; + schedule = "daily"; + retention = { + daily = "7"; + weekly = "4"; + monthly = "3"; + yearly = "1"; + }; + excludePatterns = [ + "*.tmp" + "*/wal/*" + ]; + preHook = '' + # Stop prometheus temporarily for consistent backup + systemctl stop prometheus + ''; + postHook = '' + # Restart prometheus after backup + systemctl start prometheus + ''; + } + ]; + + # Open firewall port + networking.firewall.allowedTCPPorts = [cfg.port]; + + # Create prometheus configuration directory + systemd.tmpfiles.rules = [ + "d /var/lib/prometheus2 0755 prometheus prometheus -" + "d /etc/prometheus 0755 root root -" + ]; + }; +} diff --git a/modules/lib/helpers.nix b/modules/lib/helpers.nix new file mode 100644 index 0000000..34201dc --- /dev/null +++ b/modules/lib/helpers.nix @@ -0,0 +1,126 @@ +# modules/lib/helpers.nix +{lib, ...}: +with lib; rec { + # Helper to merge global configurations from multiple sources + mergeGlobalConfigs = configs: let + mergeEndpoints = foldl' (acc: cfg: acc ++ cfg.monitoring.endpoints) []; + mergeBackups = foldl' (acc: cfg: acc ++ cfg.backups.jobs) []; + mergeProxyEntries = foldl' (acc: cfg: acc ++ cfg.reverseProxy.entries) []; + in { + monitoring.endpoints = mergeEndpoints configs; + backups.jobs = mergeBackups configs; + reverseProxy.entries = mergeProxyEntries configs; + }; + + # Helper to create a service module template + createServiceModule = { + name, + port, + hasMetrics ? true, + hasWebUI ? true, + dataDir ? "/var/lib/${name}", + }: { + config, + lib, + pkgs, + ... + }: + with lib; let + cfg = config.services.${name}; + in { + options.services.${name} = { + enable = mkEnableOption "${name} service"; + port = mkOption { + type = types.port; + default = port; + description = "Port for ${name}"; + }; + dataDir = mkOption { + type = types.str; + default = dataDir; + description = "Data directory for ${name}"; + }; + enableMetrics = mkOption { + type = types.bool; + default = hasMetrics; + description = "Enable metrics endpoint"; + }; + exposeWeb = mkOption { + type = types.bool; + default = hasWebUI; + description = "Expose web interface"; + }; + }; + + config = mkIf cfg.enable { + homelab.global = { + backups.jobs = [ + { + name = "${name}-data"; + backend = "restic"; + paths = [cfg.dataDir]; + schedule = "daily"; + } + ]; + + reverseProxy.entries = mkIf cfg.exposeWeb [ + { + subdomain = name; + port = cfg.port; + } + ]; + + monitoring.endpoints = mkIf cfg.enableMetrics [ + { + name = name; + port = cfg.port; + path = "/metrics"; + jobName = name; + } + ]; + }; + }; + }; + + # Helper to generate nginx configuration from proxy entries + generateNginxConfig = proxyEntries: domain: let + createVHost = entry: { + "${entry.subdomain}.${domain}" = { + enableACME = entry.enableSSL; + forceSSL = entry.enableSSL; + locations."${entry.path}" = { + proxyPass = "http://${entry.targetHost}:${toString entry.port}"; + proxyWebsockets = entry.websockets; + extraConfig = '' + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + ${concatStringsSep "\n" (mapAttrsToList ( + name: value: "proxy_set_header ${name} ${value};" + ) + entry.customHeaders)} + ''; + }; + }; + }; + in + foldl' (acc: entry: acc // (createVHost entry)) {} proxyEntries; + + # Helper to generate Prometheus scrape configs + generatePrometheusConfig = endpoints: let + endpointsByJob = groupBy (e: e.jobName) endpoints; + createJobConfig = jobName: jobEndpoints: { + job_name = jobName; + scrape_interval = (head jobEndpoints).scrapeInterval; + metrics_path = (head jobEndpoints).path; + static_configs = [ + { + targets = map (e: "${e.targetHost}:${toString e.port}") jobEndpoints; + labels = foldl' (acc: e: acc // e.labels) {} jobEndpoints; + } + ]; + }; + in + mapAttrsToList createJobConfig endpointsByJob; +} diff --git a/modules/nixos/backup-manager.nix b/modules/nixos/backup-manager.nix new file mode 100644 index 0000000..cd06883 --- /dev/null +++ b/modules/nixos/backup-manager.nix @@ -0,0 +1,187 @@ +# modules/backup-manager.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.backups; + globalCfg = config.homelab.global; + + # Create systemd services for backup jobs + createBackupService = job: let + serviceName = "backup-${job.name}"; + allExcludes = globalCfg.backups.globalExcludes ++ job.excludePatterns; + excludeArgs = map (pattern: "--exclude '${pattern}'") allExcludes; + + backupScript = + if job.backend == "restic" + then '' + #!/bin/bash + set -euo pipefail + + ${optionalString (job.preHook != null) job.preHook} + + # Restic backup + ${pkgs.restic}/bin/restic backup \ + ${concatStringsSep " " (map (path: "'${path}'") job.paths)} \ + ${concatStringsSep " " excludeArgs} \ + --tag "host:${globalCfg.hostname}" \ + --tag "job:${job.name}" \ + --tag "env:${globalCfg.environment}" + + # Apply retention policy + ${pkgs.restic}/bin/restic forget \ + --keep-daily ${job.retention.daily} \ + --keep-weekly ${job.retention.weekly} \ + --keep-monthly ${job.retention.monthly} \ + --keep-yearly ${job.retention.yearly} \ + --prune + + ${optionalString (job.postHook != null) job.postHook} + '' + else if job.backend == "borg" + then '' + #!/bin/bash + set -euo pipefail + + ${optionalString (job.preHook != null) job.preHook} + + # Borg backup + ${pkgs.borgbackup}/bin/borg create \ + --stats --progress \ + ${concatStringsSep " " excludeArgs} \ + "::${globalCfg.hostname}-${job.name}-{now}" \ + ${concatStringsSep " " (map (path: "'${path}'") job.paths)} + + # Apply retention policy + ${pkgs.borgbackup}/bin/borg prune \ + --keep-daily ${job.retention.daily} \ + --keep-weekly ${job.retention.weekly} \ + --keep-monthly ${job.retention.monthly} \ + --keep-yearly ${job.retention.yearly} + + ${optionalString (job.postHook != null) job.postHook} + '' + else throw "Unsupported backup backend: ${job.backend}"; + in { + ${serviceName} = { + description = "Backup job: ${job.name}"; + after = ["network-online.target"]; + wants = ["network-online.target"]; + serviceConfig = { + Type = "oneshot"; + User = "backup"; + Group = "backup"; + ExecStart = pkgs.writeScript "backup-${job.name}" backupScript; + EnvironmentFile = "/etc/backup/environment"; + }; + }; + }; + + # Create systemd timers for backup jobs + createBackupTimer = job: let + serviceName = "backup-${job.name}"; + timerName = "${serviceName}.timer"; + in { + ${timerName} = { + description = "Timer for backup job: ${job.name}"; + wantedBy = ["timers.target"]; + timerConfig = { + OnCalendar = + if job.schedule == "daily" + then "daily" + else if job.schedule == "weekly" + then "weekly" + else if job.schedule == "hourly" + then "hourly" + else job.schedule; # Assume it's a cron expression + Persistent = true; + RandomizedDelaySec = "15min"; + }; + }; + }; +in { + options.homelab.backups = { + enable = mkEnableOption "Backup management"; + + restic = { + repository = mkOption { + type = types.str; + description = "Restic repository URL"; + }; + passwordFile = mkOption { + type = types.str; + default = "/etc/backup/restic-password"; + description = "Path to file containing restic password"; + }; + }; + + borg = { + repository = mkOption { + type = types.str; + description = "Borg repository path"; + }; + sshKey = mkOption { + type = types.str; + default = "/etc/backup/borg-ssh-key"; + description = "Path to SSH key for borg repository"; + }; + }; + }; + + config = mkIf (cfg.enable && globalCfg.enable && (length globalCfg.backups.jobs) > 0) { + # Create backup user + users.users.backup = { + isSystemUser = true; + group = "backup"; + home = "/var/lib/backup"; + createHome = true; + }; + + users.groups.backup = {}; + + # Install backup tools + environment.systemPackages = with pkgs; [ + restic + borgbackup + rclone + + (pkgs.writeScriptBin "backup-status" '' + #!/bin/bash + echo "=== Backup Status ===" + echo + ${concatStringsSep "\n" (map (job: '' + echo "Job: ${job.name}" + systemctl is-active backup-${job.name}.timer || echo "Timer inactive" + systemctl status backup-${job.name}.timer --no-pager -l | grep -E "(Active|Trigger)" || true + echo + '') + globalCfg.backups.jobs)} + '') + ]; + + # Create systemd services and timers + systemd.services = lib.foldl' (acc: job: acc // (createBackupService job)) {} globalCfg.backups.jobs; + systemd.timers = lib.foldl' (acc: job: acc // (createBackupTimer job)) {} globalCfg.backups.jobs; + + # Environment file template + environment.etc."backup/environment.example".text = '' + # Restic configuration + RESTIC_REPOSITORY=${cfg.restic.repository} + RESTIC_PASSWORD_FILE=${cfg.restic.passwordFile} + + # AWS S3 credentials (if using S3 backend) + AWS_ACCESS_KEY_ID=your-access-key + AWS_SECRET_ACCESS_KEY=your-secret-key + + # Borg configuration + BORG_REPO=${cfg.borg.repository} + BORG_RSH="ssh -i ${cfg.borg.sshKey}" + + # Notification settings + NOTIFICATION_URL=your-webhook-url + ''; + }; +} diff --git a/modules/nixos/default.nix b/modules/nixos/default.nix index a0250d5..af472eb 100644 --- a/modules/nixos/default.nix +++ b/modules/nixos/default.nix @@ -1,3 +1,8 @@ { ente = import ./ente.nix; + global-config = import ./global-config.nix; + backup-manager = import ./backup-manager.nix; + + # Service modules + services = import ./services; } diff --git a/modules/nixos/ente.nix b/modules/nixos/ente.nix index 283e4ec..7c26c57 100644 --- a/modules/nixos/ente.nix +++ b/modules/nixos/ente.nix @@ -72,6 +72,11 @@ in { type = types.str; description = "The domain under which the photos frontend will be served."; }; + + auth = mkOption { + type = types.str; + description = "The domain under which the auth frontend will be served."; + }; }; }; @@ -187,6 +192,11 @@ in { name = "ente"; user = "ente"; }; + key = { + encryption._secret = pkgs.writeText "encryption" "T0sn+zUVFOApdX4jJL4op6BtqqAfyQLH95fu8ASWfno="; + hash._secret = pkgs.writeText "hash" "g/dBZBs1zi9SXQ0EKr4RCt1TGr7ZCKkgrpjyjrQEKovWPu5/ce8dYM6YvMIPL23MMZToVuuG+Z6SGxxTbxg5NQ=="; + }; + jwt.secret._secret = pkgs.writeText "jwt" "i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8="; }; systemd.services.ente = { @@ -243,6 +253,7 @@ in { BindReadOnlyPaths = [ "${cfgApi.package}/share/museum/migrations:${dataDir}/migrations" "${cfgApi.package}/share/museum/mail-templates:${dataDir}/mail-templates" + "${cfgApi.package}/share/museum/web-templates:${dataDir}/web-templates" ]; User = cfgApi.user; @@ -311,7 +322,12 @@ in { in { enable = true; virtualHosts.${domainFor "accounts"} = { - forceSSL = mkDefault false; + listen = [ + { + addr = "0.0.0.0"; + port = 3001; + } + ]; locations."/" = { root = webPackage "accounts"; tryFiles = "$uri $uri.html /index.html"; @@ -321,7 +337,12 @@ in { }; }; virtualHosts.${domainFor "cast"} = { - forceSSL = mkDefault false; + listen = [ + { + addr = "0.0.0.0"; + port = 3004; + } + ]; locations."/" = { root = webPackage "cast"; tryFiles = "$uri $uri.html /index.html"; @@ -334,7 +355,12 @@ in { serverAliases = [ (domainFor "albums") # the albums app is shared with the photos frontend ]; - forceSSL = mkDefault false; + listen = [ + { + addr = "0.0.0.0"; + port = 3000; + } + ]; locations."/" = { root = webPackage "photos"; tryFiles = "$uri $uri.html /index.html"; @@ -343,6 +369,21 @@ in { ''; }; }; + virtualHosts.${domainFor "auth"} = { + listen = [ + { + addr = "0.0.0.0"; + port = 3003; + } + ]; + locations."/" = { + root = webPackage "auth"; + tryFiles = "$uri $uri.html /index.html"; + extraConfig = '' + add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}'; + ''; + }; + }; }; }) ]; diff --git a/modules/nixos/global-config.nix b/modules/nixos/global-config.nix new file mode 100644 index 0000000..3443eca --- /dev/null +++ b/modules/nixos/global-config.nix @@ -0,0 +1,462 @@ +# modules/global-config.nix +{ + config, + lib, + outputs, + ... +}: +with lib; let + cfg = config.homelab.global; + + # Service type definition + serviceType = types.submodule { + options = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable this service"; + }; + + description = mkOption { + type = types.str; + description = "Human-readable description of the service"; + }; + + category = mkOption { + type = types.enum ["monitoring" "networking" "storage" "security" "media" "development" "backup" "other"]; + default = "other"; + description = "Service category for organization"; + }; + + dependencies = mkOption { + type = types.listOf types.str; + default = []; + description = "List of other homelab services this depends on"; + }; + + ports = mkOption { + type = types.listOf types.port; + default = []; + description = "Ports this service uses"; + }; + + tags = mkOption { + type = types.listOf types.str; + default = []; + description = "Additional tags for this service"; + }; + + priority = mkOption { + type = types.int; + default = 100; + description = "Service priority (lower numbers start first)"; + }; + }; + }; + + # Type definitions + monitoringEndpointType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Name of the monitoring endpoint"; + }; + port = mkOption { + type = types.port; + description = "Port number for the endpoint"; + }; + path = mkOption { + type = types.str; + default = "/metrics"; + description = "Path for the metrics endpoint"; + }; + jobName = mkOption { + type = types.str; + description = "Prometheus job name"; + }; + scrapeInterval = mkOption { + type = types.str; + default = "30s"; + description = "Prometheus scrape interval"; + }; + labels = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Additional labels for this endpoint"; + }; + }; + }; + + backupJobType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Name of the backup job"; + }; + backend = mkOption { + type = types.enum ["restic" "borg" "rclone"]; + description = "Backup backend to use"; + }; + paths = mkOption { + type = types.listOf types.str; + description = "List of paths to backup"; + }; + schedule = mkOption { + type = types.str; + default = "daily"; + description = "Backup schedule (cron format or preset)"; + }; + retention = mkOption { + type = types.attrsOf types.str; + default = { + daily = "7"; + weekly = "4"; + monthly = "6"; + yearly = "2"; + }; + description = "Retention policy"; + }; + excludePatterns = mkOption { + type = types.listOf types.str; + default = []; + description = "Patterns to exclude from backup"; + }; + preHook = mkOption { + type = types.nullOr types.str; + default = null; + description = "Script to run before backup"; + }; + postHook = mkOption { + type = types.nullOr types.str; + default = null; + description = "Script to run after backup"; + }; + }; + }; + + reverseProxyEntryType = types.submodule { + options = { + subdomain = mkOption { + type = types.str; + description = "Subdomain for the service"; + }; + port = mkOption { + type = types.port; + description = "Internal port to proxy to"; + }; + path = mkOption { + type = types.str; + default = "/"; + description = "Path prefix for the service"; + }; + enableAuth = mkOption { + type = types.bool; + default = false; + description = "Enable authentication for this service"; + }; + enableSSL = mkOption { + type = types.bool; + default = true; + description = "Enable SSL for this service"; + }; + customHeaders = mkOption { + type = types.attrsOf types.str; + default = {}; + description = "Custom headers to add"; + }; + websockets = mkOption { + type = types.bool; + default = false; + description = "Enable websocket support"; + }; + }; + }; + + # Helper functions for services + enabledServices = filterAttrs (name: service: service.enable) cfg.services; + servicesByCategory = category: filterAttrs (name: service: service.enable && service.category == category) cfg.services; +in { + imports = [ + ./motd + ]; + + options.homelab.global = { + enable = mkEnableOption "Global homelab configuration"; + + hostname = mkOption { + type = types.str; + description = "Hostname for this system"; + }; + + domain = mkOption { + type = types.str; + default = "procopius.dk"; + description = "Base domain for the homelab"; + }; + + environment = mkOption { + type = types.enum ["production" "staging" "development"]; + default = "production"; + description = "Environment type"; + }; + + location = mkOption { + type = types.str; + default = "homelab"; + description = "Physical location identifier"; + }; + + tags = mkOption { + type = types.listOf types.str; + default = []; + description = "Tags for this system"; + }; + + services = mkOption { + type = types.attrsOf serviceType; + default = {}; + description = "Homelab services configuration"; + example = literalExpression '' + { + prometheus = { + enable = true; + description = "Metrics collection and monitoring"; + category = "monitoring"; + ports = [ 9090 ]; + tags = [ "metrics" "alerting" ]; + }; + + traefik = { + enable = true; + description = "Reverse proxy and load balancer"; + category = "networking"; + ports = [ 80 443 8080 ]; + tags = [ "proxy" "loadbalancer" ]; + priority = 10; + }; + } + ''; + }; + + monitoring = { + endpoints = mkOption { + type = types.listOf monitoringEndpointType; + default = []; + description = "Monitoring endpoints exposed by this system"; + }; + + nodeExporter = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable node exporter"; + }; + port = mkOption { + type = types.port; + default = 9100; + description = "Node exporter port"; + }; + }; + }; + + backups = { + jobs = mkOption { + type = types.listOf backupJobType; + default = []; + description = "Backup jobs for this system"; + }; + + globalExcludes = mkOption { + type = types.listOf types.str; + default = [ + "*.tmp" + "*.cache" + "*/.git" + "*/node_modules" + "*/target" + ]; + description = "Global exclude patterns for all backup jobs"; + }; + }; + + reverseProxy = { + entries = mkOption { + type = types.listOf reverseProxyEntryType; + default = []; + description = "Reverse proxy entries for this system"; + }; + }; + + # Helper function to add monitoring endpoint + addMonitoringEndpoint = mkOption { + type = types.functionTo (types.functionTo types.anything); + default = name: endpoint: { + homelab.global.monitoring.endpoints = [ + (endpoint // {inherit name;}) + ]; + }; + description = "Helper function to add monitoring endpoints"; + }; + + # Helper function to add backup job + addBackupJob = mkOption { + type = types.functionTo (types.functionTo types.anything); + default = name: job: { + homelab.global.backups.jobs = [ + (job // {inherit name;}) + ]; + }; + description = "Helper function to add backup jobs"; + }; + + # Helper function to add reverse proxy entry + addReverseProxyEntry = mkOption { + type = types.functionTo (types.functionTo types.anything); + default = subdomain: entry: { + homelab.global.reverseProxy.entries = [ + (entry // {inherit subdomain;}) + ]; + }; + description = "Helper function to add reverse proxy entries"; + }; + + # Helper functions + enabledServicesList = mkOption { + type = types.listOf types.str; + default = attrNames enabledServices; + description = "List of enabled service names"; + readOnly = true; + }; + + servicesByPriority = mkOption { + type = types.listOf types.str; + default = + map (x: x.name) (sort (a: b: a.priority < b.priority) + (mapAttrsToList (name: service: service // {inherit name;}) enabledServices)); + description = "Services sorted by priority"; + readOnly = true; + }; + }; + + config = mkIf cfg.enable { + # Set hostname + networking.hostName = cfg.hostname; + + # Configure node exporter if enabled + services.prometheus.exporters.node = mkIf cfg.monitoring.nodeExporter.enable { + enable = true; + port = cfg.monitoring.nodeExporter.port; + enabledCollectors = [ + "systemd" + "textfile" + "filesystem" + "loadavg" + "meminfo" + "netdev" + "stat" + ]; + }; + + # Automatically add node exporter to monitoring endpoints + homelab.global.monitoring.endpoints = mkIf cfg.monitoring.nodeExporter.enable [ + { + name = "node-exporter"; + port = cfg.monitoring.nodeExporter.port; + path = "/metrics"; + jobName = "node"; + labels = { + instance = cfg.hostname; + environment = cfg.environment; + location = cfg.location; + }; + } + ]; + + # Export configuration for external consumption + environment.etc."homelab/config.json".text = builtins.toJSON { + inherit (cfg) hostname domain environment location tags; + + services = + mapAttrs (name: service: { + inherit (service) enable description category dependencies ports tags priority; + }) + cfg.services; + + enabledServices = enabledServices; + + servicesByCategory = { + monitoring = servicesByCategory "monitoring"; + networking = servicesByCategory "networking"; + storage = servicesByCategory "storage"; + security = servicesByCategory "security"; + media = servicesByCategory "media"; + development = servicesByCategory "development"; + backup = servicesByCategory "backup"; + other = servicesByCategory "other"; + }; + + monitoring = { + endpoints = + map (endpoint: { + name = endpoint.name; + url = "http://${cfg.hostname}:${toString endpoint.port}${endpoint.path}"; + port = endpoint.port; + path = endpoint.path; + jobName = endpoint.jobName; + scrapeInterval = endpoint.scrapeInterval; + labels = + endpoint.labels + // { + hostname = cfg.hostname; + environment = cfg.environment; + }; + }) + cfg.monitoring.endpoints; + }; + + backups = { + jobs = cfg.backups.jobs; + }; + + reverseProxy = { + entries = + map (entry: { + subdomain = entry.subdomain; + url = "http://${cfg.hostname}:${toString entry.port}"; + port = entry.port; + path = entry.path; + domain = "${entry.subdomain}.${cfg.domain}"; + enableAuth = entry.enableAuth; + enableSSL = entry.enableSSL; + customHeaders = entry.customHeaders; + websockets = entry.websockets; + }) + cfg.reverseProxy.entries; + }; + }; + + # Create a status command that shows service information + environment.systemPackages = [ + # (pkgs.writeScriptBin "homelab-services" '' + # #!/bin/bash + # echo "🏠 Homelab Services Status" + # echo "==========================" + # echo + + # ${concatStringsSep "\n" (mapAttrsToList (name: service: '' + # echo "${name}: ${service.description}" + # echo " Category: ${service.category}" + # echo " Status: $(systemctl is-active ${name} 2>/dev/null || echo "not found")" + # ${optionalString (service.ports != []) '' + # echo " Ports: ${concatStringsSep ", " (map toString service.ports)}" + # ''} + # ${optionalString (service.tags != []) '' + # echo " Tags: ${concatStringsSep ", " service.tags}" + # ''} + # echo + # '') + # enabledServices)} + # '') + ]; + }; +} diff --git a/modules/nixos/motd/default.nix b/modules/nixos/motd/default.nix new file mode 100644 index 0000000..3c56198 --- /dev/null +++ b/modules/nixos/motd/default.nix @@ -0,0 +1,304 @@ +# modules/motd/default.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.motd; + globalCfg = config.homelab.global; + enabledServices = filterAttrs (name: service: service.enable) globalCfg.services; + + homelab-motd = pkgs.writeShellScriptBin "homelab-motd" '' + #! /usr/bin/env bash + source /etc/os-release + + # Colors for output + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + BLUE='\033[0;34m' + PURPLE='\033[0;35m' + CYAN='\033[0;36m' + WHITE='\033[1;37m' + NC='\033[0m' # No Color + BOLD='\033[1m' + + # Helper functions + print_header() { + echo -e "''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC}''${WHITE} 🏠 HOMELAB STATUS ''${NC}''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•''${NC}" + } + + print_section() { + echo -e "\n''${BOLD}''${CYAN}β–Ά $1''${NC}" + echo -e "''${CYAN}─────────────────────────────────────────────────────────────''${NC}" + } + + get_service_status() { + local service="$1" + if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then + echo -e "''${GREEN}●''${NC} Active" + elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then + echo -e "''${YELLOW}●''${NC} Inactive" + else + echo -e "''${RED}●''${NC} Disabled" + fi + } + + get_timer_status() { + local timer="$1" + if ${pkgs.systemd}/bin/systemctl is-active --quiet "$timer" 2>/dev/null; then + local next_run=$(${pkgs.systemd}/bin/systemctl show "$timer" --property=NextElapseUSecRealtime --value 2>/dev/null || echo "0") + if [[ "$next_run" != "0" && "$next_run" != "n/a" ]]; then + local next_readable=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager "$timer" 2>/dev/null | tail -n +2 | head -n 1 | awk '{print $1, $2}' || echo "Unknown") + echo -e "''${GREEN}●''${NC} Next: ''${next_readable}" + else + echo -e "''${GREEN}●''${NC} Active" + fi + else + echo -e "''${RED}●''${NC} Inactive" + fi + } + + # Main script + ${optionalString cfg.clearScreen "clear"} + print_header + + # Check if global config exists + CONFIG_FILE="/etc/homelab/config.json" + if [[ ! -f "$CONFIG_FILE" ]]; then + echo -e "''${RED}❌ Global homelab configuration not found at $CONFIG_FILE''${NC}" + exit 1 + fi + + # Parse global configuration + HOSTNAME=$(${pkgs.jq}/bin/jq -r '.hostname' "$CONFIG_FILE" 2>/dev/null || hostname) + DOMAIN=$(${pkgs.jq}/bin/jq -r '.domain' "$CONFIG_FILE" 2>/dev/null || echo "unknown") + ENVIRONMENT=$(${pkgs.jq}/bin/jq -r '.environment' "$CONFIG_FILE" 2>/dev/null || echo "unknown") + LOCATION=$(${pkgs.jq}/bin/jq -r '.location' "$CONFIG_FILE" 2>/dev/null || echo "unknown") + TAGS=$(${pkgs.jq}/bin/jq -r '.tags[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ' || echo "none") + + print_section "SYSTEM INFO" + echo -e " ''${BOLD}Hostname:''${NC} $HOSTNAME" + echo -e " ''${BOLD}Domain:''${NC} $DOMAIN" + echo -e " ''${BOLD}Environment:''${NC} $ENVIRONMENT" + echo -e " ''${BOLD}Location:''${NC} $LOCATION" + echo -e " ''${BOLD}Tags:''${NC} ''${TAGS:-none}" + echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p)" + echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)" + + ${optionalString cfg.showServices '' + # Enabled services from homelab config + print_section "HOMELAB SERVICES" + ${concatStringsSep "\n" (mapAttrsToList (name: service: '' + status=$(get_service_status "${service.systemdService}") + printf " %-25s %s\n" "${name}" "$status" + '') + cfg.services)} + ''} + + ${optionalString cfg.showMonitoring '' + # Monitoring endpoints + print_section "MONITORING ENDPOINTS" + ENDPOINTS=$(${pkgs.jq}/bin/jq -c '.monitoring.endpoints[]?' "$CONFIG_FILE" 2>/dev/null || echo "") + if [[ -n "$ENDPOINTS" ]]; then + while IFS= read -r endpoint; do + name=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.name') + port=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.port') + path=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.path') + job=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.jobName') + + # Check if port is accessible + if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then + status="''${GREEN}●''${NC}" + else + status="''${RED}●''${NC}" + fi + + printf " %-20s %s %s:%s%s (job: %s)\n" "$name" "$status" "$HOSTNAME" "$port" "$path" "$job" + done <<< "$ENDPOINTS" + else + echo -e " ''${YELLOW}No monitoring endpoints configured''${NC}" + fi + ''} + + ${optionalString cfg.showBackups '' + # Backup jobs status + print_section "BACKUP JOBS" + BACKUP_JOBS=$(${pkgs.jq}/bin/jq -c '.backups.jobs[]?' "$CONFIG_FILE" 2>/dev/null || echo "") + if [[ -n "$BACKUP_JOBS" ]]; then + while IFS= read -r job; do + name=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.name') + backend=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.backend') + schedule=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.schedule') + + service_name="backup-''${name}" + timer_name="''${service_name}.timer" + + timer_status=$(get_timer_status "$timer_name") + + # Get last backup info + last_run="Unknown" + if ${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | grep -q "^[^n]"; then + last_run=$(${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | head -1) + if [[ "$last_run" != "n/a" && -n "$last_run" ]]; then + last_run=$(${pkgs.coreutils}/bin/date -d "$last_run" "+%Y-%m-%d %H:%M" 2>/dev/null || echo "Unknown") + fi + fi + + printf " %-20s %s (%s, %s) Last: %s\n" "$name" "$timer_status" "$backend" "$schedule" "$last_run" + done <<< "$BACKUP_JOBS" + + # Show backup-status command output if available + if command -v backup-status >/dev/null 2>&1; then + echo -e "\n ''${BOLD}Quick Status:''${NC}" + backup-status 2>/dev/null | tail -n +3 | head -10 | sed 's/^/ /' + fi + else + echo -e " ''${YELLOW}No backup jobs configured''${NC}" + fi + ''} + + ${optionalString cfg.showReverseProxy '' + # Reverse proxy entries + print_section "REVERSE PROXY ENTRIES" + PROXY_ENTRIES=$(${pkgs.jq}/bin/jq -c '.reverseProxy.entries[]?' "$CONFIG_FILE" 2>/dev/null || echo "") + if [[ -n "$PROXY_ENTRIES" ]]; then + while IFS= read -r entry; do + subdomain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.subdomain') + port=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.port') + domain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.domain') + auth=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableAuth') + ssl=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableSSL') + + # Check if service is running on the port + if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then + status="''${GREEN}●''${NC}" + else + status="''${RED}●''${NC}" + fi + + auth_indicator="" + [[ "$auth" == "true" ]] && auth_indicator=" πŸ”" + + ssl_indicator="" + [[ "$ssl" == "true" ]] && ssl_indicator=" πŸ”’" + + printf " %-25s %s :%s β†’ %s%s%s\n" "''${domain}" "$status" "$port" "$domain" "$auth_indicator" "$ssl_indicator" + done <<< "$PROXY_ENTRIES" + else + echo -e " ''${YELLOW}No reverse proxy entries configured''${NC}" + fi + ''} + + ${optionalString cfg.showResources '' + # Resource usage + print_section "RESOURCE USAGE" + echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s (%.1f%%)", $3, $2, ($3/$2)*100}')" + echo -e " ''${BOLD}Disk (root):''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')" + echo -e " ''${BOLD}CPU Usage:''${NC} $(${pkgs.procps}/bin/top -bn1 | grep "Cpu(s)" | awk '{printf "%.1f%%", $2+$4}' | sed 's/%us,//')%" + ''} + + ${optionalString cfg.showRecentIssues '' + # Recent logs (errors only) + print_section "RECENT ISSUES" + error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | wc -l) + if [[ "$error_count" -gt 0 ]]; then + echo -e " ''${RED}⚠ $error_count errors in last 24h''${NC}" + ${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | tail -3 | sed 's/^/ /' + else + echo -e " ''${GREEN}βœ“ No critical errors in last 24h''${NC}" + fi + ''} + + echo -e "\n''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${WHITE}Run 'backup-status' for detailed backup info ''${NC}''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•‘''${NC} ''${WHITE}Config: /etc/homelab/config.json ''${NC}''${BOLD}''${BLUE}β•‘''${NC}" + echo -e "''${BOLD}''${BLUE}β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•''${NC}" + echo + ''; +in { + options.homelab.motd = { + enable = mkEnableOption "Dynamic homelab MOTD"; + + clearScreen = mkOption { + type = types.bool; + default = true; + description = "Clear screen before showing MOTD"; + }; + + showServices = mkOption { + type = types.bool; + default = true; + description = "Show enabled homelab services"; + }; + + showMonitoring = mkOption { + type = types.bool; + default = true; + description = "Show monitoring endpoints"; + }; + + showBackups = mkOption { + type = types.bool; + default = true; + description = "Show backup jobs status"; + }; + + showReverseProxy = mkOption { + type = types.bool; + default = true; + description = "Show reverse proxy entries"; + }; + + showResources = mkOption { + type = types.bool; + default = true; + description = "Show system resource usage"; + }; + + showRecentIssues = mkOption { + type = types.bool; + default = true; + description = "Show recent system issues"; + }; + + services = mkOption { + type = types.attrsOf (types.submodule { + options = { + systemdService = mkOption { + type = types.str; + description = "Name of the systemd service to monitor"; + }; + description = mkOption { + type = types.str; + default = ""; + description = "Human-readable description of the service"; + }; + }; + }); + default = {}; + description = "Homelab services to monitor in MOTD"; + }; + }; + + config = mkIf (cfg.enable && globalCfg.enable) { + # Register services with MOTD + homelab.motd.services = + mapAttrs (name: service: { + systemdService = name; + description = service.description; + }) + enabledServices; + + # Create a command to manually run the MOTD + environment.systemPackages = with pkgs; [ + jq + netcat + homelab-motd + ]; + }; +} diff --git a/modules/nixos/services/default.nix b/modules/nixos/services/default.nix new file mode 100644 index 0000000..c5ac354 --- /dev/null +++ b/modules/nixos/services/default.nix @@ -0,0 +1,4 @@ +{ + jellyfin = import ./jellyfin.nix; + grafana = import ./grafana.nix; +} diff --git a/modules/nixos/services/forgejo-runner.nix b/modules/nixos/services/forgejo-runner.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/nixos/services/forgejo.nix b/modules/nixos/services/forgejo.nix new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/modules/nixos/services/forgejo.nix @@ -0,0 +1 @@ + diff --git a/modules/nixos/services/grafana.nix b/modules/nixos/services/grafana.nix new file mode 100644 index 0000000..f76edf7 --- /dev/null +++ b/modules/nixos/services/grafana.nix @@ -0,0 +1,72 @@ +# modules/services/grafana.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.services.grafana; + helpers = import ../lib/helpers.nix {inherit lib;}; +in { + options.services.grafana = { + enable = mkEnableOption "Grafana monitoring dashboard"; + port = mkOption { + type = types.port; + default = 3000; + description = "Grafana web interface port"; + }; + adminPassword = mkOption { + type = types.str; + description = "Admin password for Grafana"; + }; + }; + + config = mkIf cfg.enable { + services.grafana = { + enable = true; + settings = { + server = { + http_port = cfg.port; + domain = "${config.homelab.global.hostname}.${config.homelab.global.domain}"; + }; + security = { + admin_password = cfg.adminPassword; + }; + }; + }; + + homelab.global = { + backups.jobs = [ + { + name = "grafana-data"; + backend = "restic"; + paths = ["/var/lib/grafana"]; + schedule = "daily"; + excludePatterns = ["*/plugins/*" "*/png/*"]; + } + ]; + + reverseProxy.entries = [ + { + subdomain = "grafana"; + port = cfg.port; + enableAuth = false; # Grafana handles its own auth + } + ]; + + monitoring.endpoints = [ + { + name = "grafana"; + port = cfg.port; + path = "/metrics"; + jobName = "grafana"; + labels = { + service = "grafana"; + type = "monitoring"; + }; + } + ]; + }; + }; +} diff --git a/modules/nixos/services/jellyfin.nix b/modules/nixos/services/jellyfin.nix new file mode 100644 index 0000000..1aac7e5 --- /dev/null +++ b/modules/nixos/services/jellyfin.nix @@ -0,0 +1,125 @@ +# modules/services/jellyfin.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.services.jellyfin; +in { + options.services.jellyfin = { + enable = mkEnableOption "Jellyfin media server"; + + port = mkOption { + type = types.port; + default = 8096; + description = "Port for Jellyfin web interface"; + }; + + dataDir = mkOption { + type = types.str; + default = "/var/lib/jellyfin"; + description = "Directory to store Jellyfin data"; + }; + + mediaDir = mkOption { + type = types.str; + default = "/media"; + description = "Directory containing media files"; + }; + + enableMetrics = mkOption { + type = types.bool; + default = true; + description = "Enable Prometheus metrics"; + }; + + exposeWeb = mkOption { + type = types.bool; + default = true; + description = "Expose web interface through reverse proxy"; + }; + }; + + config = mkIf cfg.enable { + # Enable the service + services.jellyfin = { + enable = true; + dataDir = cfg.dataDir; + }; + + # Configure global settings + homelab.global = { + # Add backup job for Jellyfin data + backups.jobs = [ + { + name = "jellyfin-config"; + backend = "restic"; + paths = ["${cfg.dataDir}/config" "${cfg.dataDir}/data"]; + schedule = "0 2 * * *"; # Daily at 2 AM + excludePatterns = [ + "*/cache/*" + "*/transcodes/*" + "*/logs/*" + ]; + preHook = '' + # Stop jellyfin for consistent backup + systemctl stop jellyfin + ''; + postHook = '' + # Restart jellyfin after backup + systemctl start jellyfin + ''; + } + { + name = "jellyfin-media"; + backend = "restic"; + paths = [cfg.mediaDir]; + schedule = "0 3 * * 0"; # Weekly on Sunday at 3 AM + excludePatterns = [ + "*.tmp" + "*/.@__thumb/*" # Synology thumbnails + ]; + } + ]; + + # Add reverse proxy entry if enabled + reverseProxy.entries = mkIf cfg.exposeWeb [ + { + subdomain = "jellyfin"; + port = cfg.port; + enableAuth = false; # Jellyfin has its own auth + websockets = true; + customHeaders = { + "X-Forwarded-Proto" = "$scheme"; + "X-Forwarded-Host" = "$host"; + }; + } + ]; + + # Add monitoring endpoint if metrics enabled + monitoring.endpoints = mkIf cfg.enableMetrics [ + { + name = "jellyfin"; + port = cfg.port; + path = "/metrics"; # Assuming you have a metrics plugin + jobName = "jellyfin"; + scrapeInterval = "60s"; + labels = { + service = "jellyfin"; + type = "media-server"; + }; + } + ]; + }; + + # Open firewall + networking.firewall.allowedTCPPorts = [cfg.port]; + + # Create media directory + systemd.tmpfiles.rules = [ + "d ${cfg.mediaDir} 0755 jellyfin jellyfin -" + ]; + }; +} diff --git a/modules/nixos/services/postgres.nix b/modules/nixos/services/postgres.nix new file mode 100644 index 0000000..e69de29 diff --git a/modules/nixos/services/prometheus.nix b/modules/nixos/services/prometheus.nix new file mode 100644 index 0000000..9485b3a --- /dev/null +++ b/modules/nixos/services/prometheus.nix @@ -0,0 +1,208 @@ +# modules/services/prometheus.nix +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.homelab.services.prometheus; + globalCfg = config.homelab.global; +in { + options.homelab.services.prometheus = { + enable = mkEnableOption "Prometheus monitoring server"; + + port = mkOption { + type = types.port; + default = 9090; + description = "Prometheus server port"; + }; + + webExternalUrl = mkOption { + type = types.str; + default = "http://${globalCfg.hostname}:${toString cfg.port}"; + description = "External URL for Prometheus"; + }; + + retention = mkOption { + type = types.str; + default = "30d"; + description = "Data retention period"; + }; + + scrapeConfigs = mkOption { + type = types.listOf types.attrs; + default = []; + description = "Additional scrape configurations"; + }; + + alertmanager = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable Alertmanager integration"; + }; + + url = mkOption { + type = types.str; + default = "http://localhost:9093"; + description = "Alertmanager URL"; + }; + }; + }; + + config = mkIf cfg.enable { + # Register service with global homelab config + homelab.global.services.prometheus = { + enable = true; + description = "Metrics collection and monitoring server"; + category = "monitoring"; + ports = [cfg.port]; + tags = ["metrics" "monitoring" "alerting"]; + priority = 20; + dependencies = ["node-exporter"]; + }; + + # Configure the actual Prometheus service + services.prometheus = { + enable = true; + port = cfg.port; + webExternalUrl = cfg.webExternalUrl; + + retentionTime = cfg.retention; + + scrapeConfigs = + [ + # Auto-discover monitoring endpoints from global config + { + job_name = "homelab-auto"; + static_configs = [ + { + targets = + map ( + endpoint: "${globalCfg.hostname}:${toString endpoint.port}" + ) + globalCfg.monitoring.endpoints; + } + ]; + scrape_interval = "30s"; + metrics_path = "/metrics"; + } + ] + ++ cfg.scrapeConfigs; + + # Alertmanager configuration + alertmanagers = mkIf cfg.alertmanager.enable [ + { + static_configs = [ + { + targets = [cfg.alertmanager.url]; + } + ]; + } + ]; + + rules = [ + # Basic homelab alerting rules + (pkgs.writeText "homelab-alerts.yml" '' + groups: + - name: homelab + rules: + - alert: ServiceDown + expr: up == 0 + for: 5m + labels: + severity: critical + annotations: + summary: "Service {{ $labels.instance }} is down" + description: "{{ $labels.job }} on {{ $labels.instance }} has been down for more than 5 minutes." + + - alert: HighMemoryUsage + expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9 + for: 10m + labels: + severity: warning + annotations: + summary: "High memory usage on {{ $labels.instance }}" + description: "Memory usage is above 90% on {{ $labels.instance }}" + + - alert: HighDiskUsage + expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85 + for: 5m + labels: + severity: warning + annotations: + summary: "High disk usage on {{ $labels.instance }}" + description: "Disk usage is above 85% on {{ $labels.instance }} for filesystem {{ $labels.mountpoint }}" + '') + ]; + }; + + # Add monitoring endpoint to global config + homelab.global.monitoring.endpoints = [ + { + name = "prometheus"; + port = cfg.port; + path = "/metrics"; + jobName = "prometheus"; + scrapeInterval = "30s"; + labels = { + service = "prometheus"; + role = "monitoring"; + }; + } + ]; + + # Add reverse proxy entry if configured + homelab.global.reverseProxy.entries = mkIf (globalCfg.domain != null) [ + { + subdomain = "prometheus"; + port = cfg.port; + path = "/"; + enableAuth = true; + enableSSL = true; + customHeaders = { + "X-Frame-Options" = "DENY"; + "X-Content-Type-Options" = "nosniff"; + }; + } + ]; + + # Add backup job for Prometheus data + homelab.global.backups.jobs = [ + { + name = "prometheus-data"; + backend = "restic"; + paths = ["/var/lib/prometheus2"]; + schedule = "daily"; + retention = { + daily = "7"; + weekly = "4"; + monthly = "3"; + yearly = "1"; + }; + excludePatterns = [ + "*.tmp" + "*/wal/*" + ]; + preHook = '' + # Stop prometheus temporarily for consistent backup + systemctl stop prometheus + ''; + postHook = '' + # Restart prometheus after backup + systemctl start prometheus + ''; + } + ]; + + # Open firewall port + networking.firewall.allowedTCPPorts = [cfg.port]; + + # Create prometheus configuration directory + systemd.tmpfiles.rules = [ + "d /var/lib/prometheus2 0755 prometheus prometheus -" + "d /etc/prometheus 0755 root root -" + ]; + }; +} diff --git a/modules/nixos/system/backups/backrest.nix b/modules/nixos/system/backups/backrest.nix new file mode 100644 index 0000000..e230402 --- /dev/null +++ b/modules/nixos/system/backups/backrest.nix @@ -0,0 +1,4 @@ +{ + # TODO + # https://github.com/L-Trump/nixos-configs/blob/ab3fb16e330b8a2904b9967e46af8c061b56266e/modules/nixos/server/backrest.nix#L7 +} diff --git a/modules/nixos/system/backups/backups-option.nix b/modules/nixos/system/backups/backups-option.nix new file mode 100644 index 0000000..137f73f --- /dev/null +++ b/modules/nixos/system/backups/backups-option.nix @@ -0,0 +1,95 @@ +# backups-option.nix +cfg: let + inherit (cfg.lib) mkOption types mkEnableOption attrNames; +in + mkOption { + type = types.attrsOf ( + types.submodule ( + { + name, + config, + ... + } @ args: { + options = { + backend = mkOption { + type = types.enum (attrNames cfg.backends); + description = "The backup backend to use"; + }; + + paths = mkOption { + type = types.listOf types.str; + default = []; + description = "Paths to backup"; + }; + + enable = mkOption { + type = types.bool; + default = true; + description = "Whether to enable this backup job"; + }; + + timerConfig = mkOption { + type = with types; nullOr attrs; + default = null; + example = { + OnCalendar = "00:05"; + Persistent = true; + RandomizedDelaySec = "5h"; + }; + description = '' + When to run the backup. If null, inherits from backend's default timerConfig. + Set to null to disable automatic scheduling. + ''; + }; + + backendOptions = mkOption { + type = let + backupConfig = config; + backupName = name; + in + types.submodule ( + {config, ...} @ args'': + cfg.backends.${args.config.backend} (args'' // {inherit backupConfig backupName;}) + ); + default = {}; + description = "Backend-specific options"; + }; + + preBackupScript = mkOption { + type = types.lines; + default = ""; + description = "Script to run before backing up"; + }; + + postBackupScript = mkOption { + type = types.lines; + default = ""; + description = '' + Script to run after backing up. Runs even if the backup fails. + ''; + }; + + notifications = { + failure = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable failure notifications"; + }; + }; + + success = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable success notifications"; + }; + }; + }; + }; + } + ) + ); + default = {}; + description = "Backup job definitions"; + } diff --git a/modules/nixos/system/backups/default.nix b/modules/nixos/system/backups/default.nix new file mode 100644 index 0000000..d29d46e --- /dev/null +++ b/modules/nixos/system/backups/default.nix @@ -0,0 +1,6 @@ +{ + imports = [ + ./root.nix + ./restic.nix + ]; +} diff --git a/modules/nixos/system/backups/restic.nix b/modules/nixos/system/backups/restic.nix new file mode 100644 index 0000000..58bfb1b --- /dev/null +++ b/modules/nixos/system/backups/restic.nix @@ -0,0 +1,234 @@ +# restic.nix - Restic backend implementation +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.system.backups; + resticCfg = cfg.restic; + + # Get only restic backups that are enabled + resticBackups = filterAttrs (_: backup: backup.backend == "restic" && backup.enable) cfg.backups; + + # Create restic service configurations + createResticServices = + mapAttrs ( + name: backup: let + # Merge global defaults with backup-specific options + serviceConfig = + recursiveUpdate resticCfg.defaultBackendOptions backup.backendOptions + // { + inherit (backup) paths; + + # Use backup-specific timer or fall back to global default + timerConfig = + if backup.timerConfig != null + then backup.timerConfig + else resticCfg.timerConfig; + }; + in + serviceConfig + ) + resticBackups; +in { + options.system.backups.restic = { + enable = mkEnableOption "restic backup backend"; + + timerConfig = mkOption { + type = types.attrs; + default = { + OnCalendar = "*-*-* 05:00:00"; + Persistent = true; + }; + description = "Default systemd timer configuration for restic backups"; + }; + + defaultBackendOptions = mkOption { + type = types.attrs; + default = {}; + example = { + repository = "/backup/restic"; + passwordFile = "/etc/nixos/secrets/restic-password"; + initialize = true; + pruneOpts = [ + "--keep-daily 7" + "--keep-weekly 5" + "--keep-monthly 12" + "--keep-yearly 75" + ]; + }; + description = "Default backend options applied to all restic backup jobs"; + }; + + # Advanced options + runMaintenance = mkOption { + type = types.bool; + default = true; + description = "Whether to run repository maintenance after backups"; + }; + + maintenanceTimer = mkOption { + type = types.attrs; + default = { + OnCalendar = "*-*-* 06:00:00"; + Persistent = true; + }; + description = "Timer configuration for maintenance tasks"; + }; + + pruneOpts = mkOption { + type = types.listOf types.str; + default = [ + "--keep-daily 7" + "--keep-weekly 4" + "--keep-monthly 6" + "--keep-yearly 3" + ]; + description = "Default pruning options for maintenance"; + }; + }; + + config = mkIf resticCfg.enable { + # Register restic backend + system.backups.backends.restic = { + backupConfig, + backupName, + ... + }: { + # Define the proper options schema for restic backendOptions + options = { + repository = mkOption { + type = types.str; + description = "Restic repository path or URL"; + }; + + passwordFile = mkOption { + type = types.str; + description = "Path to file containing the repository password"; + }; + + initialize = mkOption { + type = types.bool; + default = true; + description = "Whether to initialize the repository if it doesn't exist"; + }; + + exclude = mkOption { + type = types.listOf types.str; + default = []; + description = "Patterns to exclude from backup"; + }; + + extraBackupArgs = mkOption { + type = types.listOf types.str; + default = []; + description = "Additional arguments passed to restic backup command"; + }; + + user = mkOption { + type = types.str; + default = "root"; + description = "User to run the backup as"; + }; + + pruneOpts = mkOption { + type = types.listOf types.str; + default = resticCfg.pruneOpts; + description = "Pruning options for this backup"; + }; + }; + + # Default config merged with global defaults + config = { + extraBackupArgs = + [ + "--tag ${backupName}" + "--verbose" + ] + ++ (resticCfg.defaultBackendOptions.extraBackupArgs or []); + }; + }; + + # Create actual restic backup services + services.restic.backups = createResticServices; + + # Add restic package + environment.systemPackages = [pkgs.restic]; + + # Systemd service customizations for restic backups + systemd.services = + (mapAttrs' ( + name: backup: + nameValuePair "restic-backups-${name}" { + # Custom pre/post scripts + preStart = mkBefore backup.preBackupScript; + postStop = mkAfter backup.postBackupScript; + + # Enhanced service configuration + serviceConfig = { + # Restart configuration + Restart = "on-failure"; + RestartSec = "5m"; + RestartMaxDelaySec = "30m"; + RestartSteps = 3; + + # Rate limiting + StartLimitBurst = 4; + StartLimitIntervalSec = "2h"; + }; + + # Failure handling could be extended here for notifications + # onFailure = optional backup.notifications.failure.enable "restic-backup-${name}-failure-notify.service"; + } + ) + resticBackups) + // optionalAttrs resticCfg.runMaintenance { + # Repository maintenance service + restic-maintenance = { + description = "Restic repository maintenance"; + after = map (name: "restic-backups-${name}.service") (attrNames resticBackups); + + environment = + resticCfg.defaultBackendOptions + // { + RESTIC_CACHE_DIR = "/var/cache/restic-maintenance"; + }; + + serviceConfig = { + Type = "oneshot"; + ExecStart = [ + "${pkgs.restic}/bin/restic forget --prune ${concatStringsSep " " resticCfg.pruneOpts}" + "${pkgs.restic}/bin/restic check --read-data-subset=500M" + ]; + + User = "root"; + CacheDirectory = "restic-maintenance"; + CacheDirectoryMode = "0700"; + }; + }; + }; + + # Maintenance timer + systemd.timers = mkIf resticCfg.runMaintenance { + restic-maintenance = { + description = "Timer for restic repository maintenance"; + wantedBy = ["timers.target"]; + timerConfig = resticCfg.maintenanceTimer; + }; + }; + + # Helpful shell aliases + programs.zsh.shellAliases = + { + restic-snapshots = "restic snapshots --compact --group-by tags"; + restic-repo-size = "restic stats --mode raw-data"; + } + // (mapAttrs' ( + name: _: + nameValuePair "backup-${name}" "systemctl start restic-backups-${name}" + ) + resticBackups); + }; +} diff --git a/modules/nixos/system/backups/root.nix b/modules/nixos/system/backups/root.nix new file mode 100644 index 0000000..5656f72 --- /dev/null +++ b/modules/nixos/system/backups/root.nix @@ -0,0 +1,66 @@ +# root.nix - Main backup system module +{ + config, + lib, + pkgs, + ... +}: +with lib; let + cfg = config.system.backups; + + # Filter backups by backend + getBackupsByBackend = backend: + filterAttrs (_: backup: backup.backend == backend && backup.enable) cfg.backups; +in { + options.system.backups = { + # Backend registration system - backends register themselves here + backends = mkOption { + type = with types; attrsOf (functionTo attrs); + internal = true; + default = {}; + description = '' + Attribute set of backends where the value is a function that accepts + backend-specific arguments and returns an attribute set for the backend's options. + ''; + }; + + # Import the backups option from separate file, passing cfg for backend inference + backups = import ./backups-option.nix cfg; + + # Pass lib to the backups-option for access to mkOption, types, etc. + lib = mkOption { + type = types.attrs; + internal = true; + default = lib; + }; + }; + + config = { + # Re-export backups at root level for convenience + # backups = cfg.backups; + + # Common backup packages + environment.systemPackages = with pkgs; [ + # Add common backup utilities here + ]; + + # Common systemd service modifications for all backup services + systemd.services = let + allBackupServices = flatten ( + mapAttrsToList ( + backendName: backups: + mapAttrsToList (name: backup: "${backendName}-backups-${name}") backups + ) (genAttrs (attrNames cfg.backends) (backend: getBackupsByBackend backend)) + ); + in + genAttrs allBackupServices (serviceName: { + serviceConfig = { + # Common hardening for all backup services + ProtectSystem = "strict"; + ProtectHome = "read-only"; + PrivateTmp = true; + NoNewPrivileges = true; + }; + }); + }; +} diff --git a/nixos/README.md b/nixos/README.md index 3c98a20..d892b5f 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -3,7 +3,7 @@ nixos-rebuild switch --flake .#proxmox --target-host root@192.168.1.205 --verbos nixos-rebuild switch --flake .#sandbox --target-host root@sandbox.lab --verbose nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose nixos-rebuild switch --flake .#forgejo --target-host root@forgejo.lab --verbose -nixos-rebuild switch --flake .#dns --target-host root@192.168.1.140 --verbose +nixos-rebuild switch --flake .#dns --target-host root@dns.lab --verbose nixos-rebuild switch --flake .#keycloak --target-host root@keycloak.lab --verbose nixos-rebuild switch --flake .#mail --target-host root@mail.lab --verbose nixos-rebuild switch --flake .#media --target-host root@media.lab --verbose diff --git a/nixos/hosts/forgejo/README.md b/nixos/hosts/forgejo/README.md deleted file mode 100644 index 8420219..0000000 --- a/nixos/hosts/forgejo/README.md +++ /dev/null @@ -1,17 +0,0 @@ -πŸ₯‡ Phase 1: Git + Secrets - - βœ… Set up Forgejo VM (NixOS declarative) - - βœ… Set up sops-nix + age keys (can live in the Git repo) - - βœ… Push flake + ansible + secrets to Forgejo - - βœ… Write a basic README with how to rebuild infra - -πŸ₯ˆ Phase 2: GitOps - - πŸ” Add CI runner VM - - πŸ” Configure runner to deploy (nixos-rebuild or ansible-playbook) on commit - - πŸ” Optional: add webhooks to auto-trigger via Forgejo diff --git a/nixos/hosts/forgejo/forgejo.nix b/nixos/hosts/forgejo/forgejo.nix index 11ed29f..049673a 100644 --- a/nixos/hosts/forgejo/forgejo.nix +++ b/nixos/hosts/forgejo/forgejo.nix @@ -18,7 +18,7 @@ in { stateDir = "/srv/forgejo"; secrets = { mailer = { - PASSWD = ; + PASSWD = config.sops.secrets.forgejo-mailer-password.path; }; }; settings = { @@ -76,12 +76,12 @@ in { ALLOW_DEACTIVATE_ALL = false; }; - oauth2 = { - }; - oauth2_client = { - ENABLE_AUTO_REGISTRATION = true; - UPDATE_AVATAR = true; - }; + # oauth2 = { + # }; + # oauth2_client = { + # ENABLE_AUTO_REGISTRATION = true; + # UPDATE_AVATAR = true; + # }; # log = { # ROOT_PATH = "/var/log/forgejo"; # MODE = "file"; diff --git a/nixos/hosts/forgejo/sops.nix b/nixos/hosts/forgejo/sops.nix index 7e96908..4d2b560 100644 --- a/nixos/hosts/forgejo/sops.nix +++ b/nixos/hosts/forgejo/sops.nix @@ -1,7 +1,6 @@ let forgejoSops = ../../secrets/forgejo/secrets.yml; -in -{ +in { sops.secrets = { "forgejo-admin-password" = { sopsFile = forgejoSops; @@ -15,5 +14,9 @@ in sopsFile = forgejoSops; owner = "forgejo"; }; + "forgejo-mailer-password" = { + sopsFile = forgejoSops; + owner = "forgejo"; + }; }; } diff --git a/nixos/hosts/traefik/configuration/infra/routers.nix b/nixos/hosts/traefik/configuration/infra/routers.nix index 67ed719..3312e1a 100644 --- a/nixos/hosts/traefik/configuration/infra/routers.nix +++ b/nixos/hosts/traefik/configuration/infra/routers.nix @@ -15,6 +15,13 @@ middlewares = []; }; + roundcube = { + rule = "Host(`roundcube.procopius.dk`)"; + service = "roundcube"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + forgejo = { rule = "Host(`git.procopius.dk`)"; service = "forgejo"; @@ -34,10 +41,4 @@ entryPoints = ["websecure"]; tls.certResolver = "letsencrypt"; }; - catchAll = { - rule = "HostRegexp(`.+`)"; - service = "nginx"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; } diff --git a/nixos/hosts/traefik/configuration/infra/services.nix b/nixos/hosts/traefik/configuration/infra/services.nix index 27de8c8..35a49f2 100644 --- a/nixos/hosts/traefik/configuration/infra/services.nix +++ b/nixos/hosts/traefik/configuration/infra/services.nix @@ -2,12 +2,11 @@ traefik.loadBalancer.servers = [{url = "http://localhost:8080";}]; mail-acme.loadBalancer.servers = [{url = "http://mail.lab:80";}]; + roundcube.loadBalancer.servers = [{url = "http://mail.lab:80";}]; forgejo.loadBalancer.servers = [{url = "http://forgejo.lab:3000";}]; proxmox.loadBalancer.servers = [{url = "https://192.168.1.205:8006";}]; proxmox.loadBalancer.serversTransport = "insecureTransport"; nas.loadBalancer.servers = [{url = "https://192.168.1.226:5001";}]; nas.loadBalancer.serversTransport = "insecureTransport"; - nginx.loadBalancer.servers = [{url = "https://192.168.1.226:4433";}]; - nginx.loadBalancer.serversTransport = "insecureTransport"; } diff --git a/nixos/hosts/traefik/configuration/photos/routers.nix b/nixos/hosts/traefik/configuration/photos/routers.nix index 65da5a3..c0b94c9 100644 --- a/nixos/hosts/traefik/configuration/photos/routers.nix +++ b/nixos/hosts/traefik/configuration/photos/routers.nix @@ -32,4 +32,52 @@ entryPoints = ["websecure"]; tls.certResolver = "letsencrypt"; }; + + ente-minio = { + rule = "Host(`ente-minio.procopius.dk`)"; + service = "ente-minio"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + ente-minio-api = { + rule = "Host(`ente-minio-api.procopius.dk`)"; + service = "ente-minio-api"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + + ente-museum = { + rule = "Host(`ente-museum.procopius.dk`)"; + service = "ente-museum"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + + ente-photos = { + rule = "Host(`ente-photos.procopius.dk`) || Host(`ente-albums.procopius.dk`)"; + service = "ente-photos"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + + ente-cast = { + rule = "Host(`ente-cast.procopius.dk`) "; + service = "ente-cast"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + + ente-accounts = { + rule = "Host(`ente-accounts.procopius.dk`) "; + service = "ente-accounts"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; + + ente-auth = { + rule = "Host(`ente-auth.procopius.dk`) "; + service = "ente-auth"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; + }; } diff --git a/nixos/hosts/traefik/configuration/photos/services.nix b/nixos/hosts/traefik/configuration/photos/services.nix index 7bec8af..7857a55 100644 --- a/nixos/hosts/traefik/configuration/photos/services.nix +++ b/nixos/hosts/traefik/configuration/photos/services.nix @@ -4,4 +4,12 @@ account.loadBalancer.servers = [{url = "http://192.168.1.226:3001";}]; minio.loadBalancer.servers = [{url = "http://192.168.1.226:3201";}]; minio-api.loadBalancer.servers = [{url = "http://192.168.1.226:3200";}]; + + ente-minio.loadBalancer.servers = [{url = "http://photos.lab:9001";}]; + ente-minio-api.loadBalancer.servers = [{url = "http://photos.lab:9000";}]; + ente-museum.loadBalancer.servers = [{url = "http://photos.lab:8080";}]; + ente-photos.loadBalancer.servers = [{url = "http://photos.lab:3000";}]; + ente-accounts.loadBalancer.servers = [{url = "http://photos.lab:3001";}]; + ente-cast.loadBalancer.servers = [{url = "http://photos.lab:3004";}]; + ente-auth.loadBalancer.servers = [{url = "http://photos.lab:3003";}]; } diff --git a/nixos/hosts/vpn/README.md b/nixos/hosts/vpn/README.md deleted file mode 100644 index e76e6ef..0000000 --- a/nixos/hosts/vpn/README.md +++ /dev/null @@ -1,127 +0,0 @@ -Great question β€” and you're absolutely right to ask. - -You **don’t need** Innernet or Headscale+patch *if* you're comfortable building a small self-hosted portal that handles: - -* βœ… OIDC login -* βœ… WireGuard peer key management -* βœ… Config generation and download - -So let’s break it down: - ---- - -## 🧩 Why *Innernet* and *Headscale+patch* Exist - -Those tools solve **user/device coordination**, dynamic routing, and access control *automatically*, **with batteries included**. They're great if you want a more managed, β€œplug-and-play” experience. Specifically: - -| Tool | Purpose | -| ------------------------ | ----------------------------------------------------------------- | -| **Innernet** | Full WireGuard-based mesh + OIDC login + per-device management | -| **Headscale** | Tailscale backend, enables user login + ACL + device coordination | -| **Headscale+OIDC patch** | Add OIDC login to Headscale (by default, only pre-shared keys) | - -These tools do things like: - -* Assign IPs/DNS names to users -* Regenerate keys -* Handle coordination between peers -* Gate access with identity - ---- - -## πŸ› οΈ Why You *Don’t* Need Them (Your Custom Portal Idea) - -If you're fine with: - -* Managing WireGuard server and IPs manually -* Writing a small web service to issue `.conf` files after login -* Using firewall rules for access control - -Then you absolutely **can** build a custom system around **WireGuard + OIDC** like this: - ---- - -### 🧱 Architecture: WireGuard + OIDC + Config Generator - -``` - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Your OIDC β”‚ - β”‚ provider │◄────── Google, Authentik, Keycloak - β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - (OIDC Login) - β”‚ - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Web Portal │◄────── nginx + Go, Node.js, Flask, etc. - β”‚ (self-hosted) β”‚ - β””β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”˜ - Gen Keys β”‚ β”‚ Serve config - & register β”‚ └─────> `client.conf` / QR code - β–Ό - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ WireGuard server β”‚ (NixOS) - β”‚ - Handles all peers β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -### πŸ” What Your Portal Needs to Do - -* Authenticate via OIDC (OAuth2 flow) -* Store a list of "authorized users" -* When a user logs in: - - * Check if they already have a WireGuard peer entry - * If not: generate a keypair, assign IP (e.g. `10.100.0.5`) - * Append to server config (`/etc/wireguard/wg0.conf`) or use `wg set` - * Return config file or QR code for the mobile client - ---- - -## βœ… Benefits of This DIY Approach - -* πŸ’‘ Full control: you own every step -* πŸ” OIDC is the gatekeeper (centrally managed access) -* ⚑ No extra daemon/software overhead (just WireGuard + your app) -* 🧩 Easily extensible (e.g., device limits, IP lease expiration) - ---- - -## βš™οΈ Tools You Can Use - -| Purpose | Tools | -| ---------------------- | -------------------------------------------------------------------------- | -| OIDC Auth | [Dex](https://github.com/dexidp/dex), Keycloak, Authentik, or Firebase | -| Web Portal Backend | Node.js (Express), Go, Flask, FastAPI | -| Config Template Engine | Go templates, EJS, Jinja2 | -| WireGuard Integration | `wg`, `wg-quick`, or directly edit `wg0.conf` | -| NixOS Support | Use `networking.wireguard.interfaces.wg0.peers` to declaratively add peers | - ---- - -## πŸ§ͺ Bonus Features You Could Build - -* πŸ” Per-user firewall rules -* πŸ” One-time config URLs (`vpn.mydomain.com/invite/xyz`) -* πŸ” Optional Authelia/Keycloak MFA -* πŸ“‰ Dashboard with currently active peers (use `wg show`) - ---- - -## ✨ Summary - -| Option | You Need Innernet / Headscale? | Login via OIDC? | DNS Routing? | Complexity | -| ----------------- | ------------------------------ | --------------- | ------------ | ---------- | -| Your own portal | ❌ No | βœ… Yes | ❌ Manual | 🟑 Medium | -| Innernet | βœ… Yes | βœ… Yes | βœ… Built-in | 🟠 Medium | -| Headscale + patch | βœ… Yes | βœ… Yes | βœ… Built-in | πŸ”΄ High | - -You’re on a great path by choosing the custom VPN portal idea. -Let me know if you'd like: - -* A **sample architecture repo** -* A **NixOS module to support peer configs** -* Help building the **login + config generator backend** - -I can generate a Nix flake and a working OIDC portal template to kickstart the project. diff --git a/nixos/hosts/warpgate/warpgate.nix b/nixos/hosts/warpgate/warpgate.nix deleted file mode 100644 index bc48f1e..0000000 --- a/nixos/hosts/warpgate/warpgate.nix +++ /dev/null @@ -1,14 +0,0 @@ -{ - virtualisation.oci-containers.containers = { - warpgate = { - image = "ghcr.io/warp-tech/warpgate"; - ports = [ - "2222:2222" - "8888:8888" - ]; - volumes = [ - "/srv/warpgate/data:/data" - ]; - }; - }; -} diff --git a/nixos/secrets/forgejo/secrets.yml b/nixos/secrets/forgejo/secrets.yml index 94f993a..2b11e9e 100644 --- a/nixos/secrets/forgejo/secrets.yml +++ b/nixos/secrets/forgejo/secrets.yml @@ -1,6 +1,7 @@ forgejo-admin-password: ENC[AES256_GCM,data:S05b/J9AK2SuIKDSWmtRf72C7V5FwMgZv/o5yxzNXRZEH2eIm18sC6+FEg==,iv:Ig/c4K9Io0S07Ywl4JQtbfxhjXJ7Rvea7+N4KhLUqjc=,tag:rx44tRuAbERBZR45QN6b9A==,type:str] forgejo-db-password: ENC[AES256_GCM,data:5YwRl6HNa1LzJgr73ArllG9s+vWCS7m/s6QQh5YUz8I0anG7GQ==,iv:5ARq3unUy2xbDcAFkucvEhjz/QYC2rYgutEo4T2bw2E=,tag:k7eHKqeA7k6XzksLVcnXRw==,type:str] forgejo-secret-key: ENC[AES256_GCM,data:iserDzOnJkM4HLP4c6rekSFANtRmEXwuCPyfMqo=,iv:3CNqN/DyS4PIl/iOO4JCpWJn3ARlb5KQSCNv5Orx2mo=,tag:q34jEpGrK2EKf0bcBznpQQ==,type:str] +forgejo-mailer-password: ENC[AES256_GCM,data:6mX8wB7RkiCj/43G4vttusOPogUifKua3Ozgch8ewz8=,iv:BxFIto7L0A8YhhmiRYwUFDy8PeXaghE2j9SQbZ1GaZQ=,tag:gB6/9lUrz0HeQUl536Vp4A==,type:str] sops: age: - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y @@ -12,7 +13,7 @@ sops: LzBHRWZXODVDZTE2WnVZOGNQckk4KzAKdm3xnA03JnQnc07yhVVtYkVYS6654Zm1 4AcLRSCcWvWrvp26XYVE2UGqU7acfxrTsk07o0nHAQpa5LjgJ4oFKw== -----END AGE ENCRYPTED FILE----- - lastmodified: "2025-06-06T18:38:08Z" - mac: ENC[AES256_GCM,data:BvpIz6tfVSR3m1l7g4ilUyoTKKqirt+k6tPizxCsAgjztt0IyDCio+cLTln4P1tGSy/frjvbxy1mR3tIDkWn6aDFoYz/gnsbTKHSo/K5Q77jJ3uJffoB3/Wruigojl3EBIQHALicq9xhF8rsH/RKjpWqh+TrQwO+ibbA6ff76cw=,iv:Z0ZwJ9aPpI9MtbsZnvFkW7zsFFOMj5/Gv+tF/mal+yI=,tag:knf01NC/XwgjPUHH+8RpSg==,type:str] + lastmodified: "2025-07-25T10:22:17Z" + mac: ENC[AES256_GCM,data:JiqFsbC6rxk3Pmc0vqHwElfT3kXDLJwiBZS50xo/iyOgwyWbwf5sCNdn9CMFciDsDHfd8jRp8hYfdr7VaPFwc/Iec5cwHY23+lzat1hwOkmwEDdxW7pY4IVXZEWdBaeVrFInnvdLgJAOi+KecZ2BIx0iyMEQZUKs6exxSXB2/fE=,iv:LWv0XKSBPz35+pIur98+js3ETnFDOf6aEY67L2RGpHU=,tag:VzTG6zhHVHpbVDAc2266qQ==,type:str] unencrypted_suffix: _unencrypted version: 3.10.2 diff --git a/pkgs/ente-web.nix b/pkgs/ente-web.nix index 8aa0fa6..0be16a2 100644 --- a/pkgs/ente-web.nix +++ b/pkgs/ente-web.nix @@ -9,7 +9,7 @@ nix-update-script, extraBuildEnv ? {}, # This package contains serveral sub-applications. This specifies which of them you want to build. - enteApp ? "photos", + enteApp ? "auth", # Accessing some apps (such as account) directly will result in a hardcoded redirect to ente.io. # To prevent users from accidentally logging in to ente.io instead of the selfhosted instance, you # can set this parameter to override these occurrences with your own url. Must include the schema. @@ -18,7 +18,7 @@ }: stdenv.mkDerivation (finalAttrs: { pname = "ente-web-${enteApp}"; - version = "1.0.4"; + version = "1.1.57"; src = fetchFromGitHub { owner = "ente-io"; @@ -26,13 +26,13 @@ stdenv.mkDerivation (finalAttrs: { sparseCheckout = ["web"]; tag = "photos-v${finalAttrs.version}"; fetchSubmodules = true; - hash = "sha256-M1kAZgqjbWNn6LqymtWRmAk/v0vWEGbyS50lVrsr85o="; + hash = "sha256-SCkxGm/w0kES7wDuLBsUTgwrFYNLvLD51NyioAVTLrg="; # lib.fakeHash; }; sourceRoot = "${finalAttrs.src.name}/web"; offlineCache = fetchYarnDeps { yarnLock = "${finalAttrs.src}/web/yarn.lock"; - hash = "sha256-EYhYwy6+7bgWckU/7SfL1PREWw9JUgKxWadSVtoZwXs="; + hash = "sha256-FnLMXOpIVNOhaM7VjNEDlwpew9T/5Ch5eFed9tLpDsI="; }; nativeBuildInputs = [ diff --git a/profiles/proxmox-vm.nix b/profiles/proxmox-vm.nix new file mode 100644 index 0000000..b3fb3aa --- /dev/null +++ b/profiles/proxmox-vm.nix @@ -0,0 +1,43 @@ +# profiles/proxmox-vm.nix - Proxmox VM specific profile +{ + config, + lib, + modulesPath, + ... +}: { + imports = [ + (modulesPath + "/profiles/qemu-guest.nix") + ]; + + # Proxmox VM specific configuration + services.qemuGuest.enable = true; + + # Boot configuration for Proxmox VMs + boot = { + loader.grub = { + enable = true; + devices = ["nodev"]; + }; + growPartition = true; + tmp.cleanOnBoot = true; + + # Proxmox specific kernel modules + initrd.availableKernelModules = ["ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk"]; + }; + + # Standard Proxmox VM filesystem + fileSystems."/" = lib.mkDefault { + device = "/dev/disk/by-label/nixos"; + autoResize = true; + fsType = "ext4"; + }; + + # Update global config with Proxmox-specific info + homelab = { + location = lib.mkDefault "proxmox-cluster"; + tags = lib.mkDefault ["proxmox-vm" "homelab"]; + }; + + # VM-specific optimizations + services.fstrim.enable = true; +} diff --git a/scripts/config.nix b/scripts/config.nix new file mode 100644 index 0000000..36603b2 --- /dev/null +++ b/scripts/config.nix @@ -0,0 +1,18 @@ +{ + nodes, + lib, + ... +}: let + extractGlobal = name: node: + if node ? config.homelab.global + then { + ${name} = { + hostname = node.config.homelab.global.hostname; + monitoring = map (e: "${e.name}:${toString e.port}") node.config.homelab.global.monitoring.endpoints; + backups = map (b: "${b.name}(${b.backend})") node.config.homelab.global.backups.jobs; + proxy = map (p: "${p.subdomain}.${node.config.homelab.global.domain}") node.config.homelab.global.reverseProxy.entries; + }; + } + else {}; +in + lib.foldl (acc: name: acc // (extractGlobal name nodes.${name})) {} (builtins.attrNames nodes) diff --git a/scripts/deploy-homelab.sh b/scripts/deploy-homelab.sh new file mode 100755 index 0000000..f5bdb17 --- /dev/null +++ b/scripts/deploy-homelab.sh @@ -0,0 +1,115 @@ +# Helper script: scripts/deploy-homelab.sh +#!/bin/bash +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}=== Homelab Deployment Script ===${NC}" + +# Function to print colored output +log() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if colmena is available +if ! command -v colmena &> /dev/null; then + error "colmena is not installed. Please install it first." + exit 1 +fi + +# Parse arguments +COMMAND=${1:-"deploy"} +TARGET=${2:-""} + +case $COMMAND in + "deploy") + if [ -n "$TARGET" ]; then + log "Deploying to specific target: $TARGET" + colmena apply --on "$TARGET" + else + log "Deploying to all targets" + colmena apply + fi + ;; + "build") + if [ -n "$TARGET" ]; then + log "Building specific target: $TARGET" + colmena build --on "$TARGET" + else + log "Building all targets" + colmena build + fi + ;; + "status") + log "Checking deployment status" + colmena apply --dry-run + ;; + "config") + log "Showing global configuration summary" + # Extract global configs from all nodes + colmena eval ./scripts/config.nix | jq . + ;; + "backup-status") + log "Checking backup status across all nodes" + if [ -n "$TARGET" ]; then + colmena exec --on "$TARGET" -- backup-status + else + colmena exec -- backup-status + fi + ;; + "monitoring") + log "Collecting monitoring endpoints" + nix eval --json .#colmena --apply 'colmena: + let + lib = (import {}).lib; + nodes = removeAttrs colmena ["meta"]; + collectEndpoints = lib.flatten ( + lib.mapAttrsToList (name: node: + if node ? config.homelab.global.monitoring.endpoints then + map (e: { + node = name; + hostname = node.config.homelab.global.hostname; + endpoint = "${e.name}:${toString e.port}${e.path}"; + job = e.jobName; + }) node.config.homelab.global.monitoring.endpoints + else [] + ) nodes + ); + in collectEndpoints + ' | jq . + ;; + "help") + echo "Usage: $0 [COMMAND] [TARGET]" + echo "" + echo "Commands:" + echo " deploy [TARGET] Deploy to all nodes or specific target" + echo " build [TARGET] Build configuration for all nodes or specific target" + echo " status Show deployment status (dry-run)" + echo " config Show global configuration summary" + echo " backup-status Check backup status on all nodes" + echo " monitoring List all monitoring endpoints" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 deploy media-server # Deploy only to media-server" + echo " $0 build # Build all configurations" + echo " $0 config # Show global config summary" + ;; + *) + error "Unknown command: $COMMAND" + echo "Run '$0 help' for usage information" + exit 1 + ;; +esac diff --git a/scripts/generate-docs.sh b/scripts/generate-docs.sh new file mode 100755 index 0000000..8ab1f4a --- /dev/null +++ b/scripts/generate-docs.sh @@ -0,0 +1,41 @@ +# scripts/generate-docs.sh +#!/bin/bash + +echo "# Homelab Global Configuration Documentation" +echo +echo "This document describes the global configuration system for the NixOS homelab." +echo +echo "## Available Services" +echo + +# List all service modules +find modules/nixos/services -name "*.nix" | while read -r file; do + service=$(basename "$file" .nix) + echo "### $service" + echo + # Extract description from the module + grep -m1 "mkEnableOption" "$file" | sed 's/.*mkEnableOption "\([^"]*\)".*/\1/' || echo "Service module for $service" + echo +done + +echo "## Configuration Examples" +echo +echo "### Basic Media Server Setup" +echo '```nix' +echo 'media-server = { ... }: {' +echo ' homelab.global = {' +echo ' enable = true;' +echo ' hostname = "media-server";' +echo ' domain = "homelab.local";' +echo ' };' +echo ' services.jellyfin.enable = true;' +echo '};' +echo '```' +echo + +echo "### Monitoring Configuration" +echo '```nix' +echo 'monitoring = { nodes, ... }: {' +echo ' services.prometheus.scrapeConfigs = collectMonitoringEndpoints nodes;' +echo '};' +echo '```'# modules/global-config.nix diff --git a/scripts/validate-config.nix b/scripts/validate-config.nix new file mode 100644 index 0000000..c30369d --- /dev/null +++ b/scripts/validate-config.nix @@ -0,0 +1,79 @@ +# scripts/validate-config.nix +{ + lib, + pkgs, +}: let + inherit (lib) types mkOption; + + # Validation functions + validateBackupJob = job: let + errors = + [] + ++ ( + if job.paths == [] + then ["Backup job '${job.name}' has no paths defined"] + else [] + ) + ++ ( + if !(builtins.elem job.backend ["restic" "borg" "rclone"]) + then ["Invalid backup backend: ${job.backend}"] + else [] + ) + ++ ( + if job.schedule == "" + then ["Backup job '${job.name}' has no schedule defined"] + else [] + ); + in + errors; + + validateMonitoringEndpoint = endpoint: let + errors = + [] + ++ ( + if endpoint.port < 1 || endpoint.port > 65535 + then ["Invalid port ${toString endpoint.port} for endpoint '${endpoint.name}'"] + else [] + ) + ++ ( + if endpoint.jobName == "" + then ["Monitoring endpoint '${endpoint.name}' has no job name"] + else [] + ); + in + errors; + + validateReverseProxyEntry = entry: let + errors = + [] + ++ ( + if entry.subdomain == "" + then ["Reverse proxy entry has no subdomain defined"] + else [] + ) + ++ ( + if entry.port < 1 || entry.port > 65535 + then ["Invalid port ${toString entry.port} for subdomain '${entry.subdomain}'"] + else [] + ); + in + errors; + + validateGlobalConfig = config: let + backupErrors = lib.flatten (map validateBackupJob config.backups.jobs); + monitoringErrors = lib.flatten (map validateMonitoringEndpoint config.monitoring.endpoints); + proxyErrors = lib.flatten (map validateReverseProxyEntry config.reverseProxy.entries); + allErrors = backupErrors ++ monitoringErrors ++ proxyErrors; + in + if allErrors == [] + then { + valid = true; + errors = []; + } + else { + valid = false; + errors = allErrors; + }; +in { + inherit validateGlobalConfig validateBackupJob validateMonitoringEndpoint validateReverseProxyEntry; +} diff --git a/secrets/.gitignore b/secrets/.gitignore new file mode 100644 index 0000000..c996e50 --- /dev/null +++ b/secrets/.gitignore @@ -0,0 +1 @@ +*.key diff --git a/secrets/default.nix b/secrets/default.nix new file mode 100644 index 0000000..0b1b9c4 --- /dev/null +++ b/secrets/default.nix @@ -0,0 +1,42 @@ +{ + config, + lib, + pkgs, + ... +}: { + # SOPS configuration + sops = { + age.keyFile = "/run/keys/age.key"; + defaultSopsFile = ./secrets.yaml; + + # Define secrets that all systems need + secrets = { + # SSH keys + # "ssh/plasmagoat_private_key" = { + # owner = "plasmagoat"; + # mode = "0600"; + # path = "/home/plasmagoat/.ssh/id_rsa"; + # }; + + # # Age key for the system + # "age/system_key" = { + # mode = "0600"; + # path = "/run/keys/age.key"; + # }; + + # # Backup credentials + # "backup/restic_password" = { + # path = "/etc/backup/restic-password"; + # mode = "0600"; + # }; + }; + }; + + # Deployment keys for colmena + deployment.keys = { + "age.key" = { + destDir = "/run/keys"; + keyFile = "/home/plasmagoat/.config/age/age.key"; # Your local age key + }; + }; +} diff --git a/secrets/secrets.yaml b/secrets/secrets.yaml index 6a8d487..f2eaf9b 100644 --- a/secrets/secrets.yaml +++ b/secrets/secrets.yaml @@ -25,6 +25,18 @@ service_accounts: password: ENC[AES256_GCM,data:PpUHEhNfnR1eg7DmnO7tyNciNE4Tsx/Y4uL92gqiods=,iv:DNKQfymvgEu/iEW8t79m0ZmKTU0Ffasu+gp2KOIAK3o=,tag:lGKw5dbXqImDJNVX6p8kLg==,type:str] mail: password: ENC[AES256_GCM,data:6lfziq1zXlFxCAFWv5co3MkBgwaWixjHHX9riQXCbe0=,iv:/t4CnW3bKUDxfpE/qGf1LPs0ciivRMkfgJ1nMseruy4=,tag:TWApzLsm2HV+JMaZLG/Kig==,type:str] + ente: + password: ENC[AES256_GCM,data:bQxiCr9OgFU7oSGjkEO43iH9L2nikvvFQZsjGurtOFM=,iv:LIwzaZARQgiGdOLfpebJkKO0I71I3kX8mq8W1WC2lT4=,tag:VxK6oON6th9b1YhvC7cjjA==,type:str] +ente: + minio: + root_password: ENC[AES256_GCM,data:TIurrIEjWKdMYzIZY3dp00ert90=,iv:5kT06lUUlRC9J4DVwo7RDdxAM8zCJwwjWOF9YAZbbmk=,tag:qk7Cszn39kPijkr71ckxvg==,type:str] + root_user: ENC[AES256_GCM,data:wPj8SBzeohdG,iv:bSgCKGc+X+oofpYN0yV1aQNhAvWzcw9CTaK3FzUBKj4=,tag:ArUwveqBWXDRc5eSPZYa9g==,type:str] +cloudflare: + dns-api-token: ENC[AES256_GCM,data:/NroEdwOwqY30nwLLzO9RvEYPttDIw85A0M81fOPJSzEodtF95VCPA==,iv:BN5xZhSyvoZiXZk096KYpj59qns6hHg3PvhWC6c2sXo=,tag:3DaP3/p/JTM+beBRacGzSA==,type:str] + zone-api-token: ENC[AES256_GCM,data:FoMHKi5q+d97+pxUsyyNZAxGGgBRsZsNMePL5OeD3pcBIqtZP9MP5g==,iv:yRno4aJRlVyFTZVmat9tmFGFI1ghLw2vW+Py0+viFdE=,tag:pooTMSOsBZdbN/AE5J04MA==,type:str] +restic: + default-password: ENC[AES256_GCM,data:9gHH8V00XFveogOhVl0nLvq3olI=,iv:+wdSlZXnkTw1VKXesx3GMy5yz+kPf2FlYSPNXMB0Y0o=,tag:jHKQTfvm+G+L+Fb+3qP+rA==,type:str] + default-repository: ENC[AES256_GCM,data:znNTSZknMvL5ceINgt0iHA==,iv:taobWUuT4nKfzegk329dzFGIOdL03d6kw8JlgO1E78Y=,tag:kgM4551xZcaxzZw58AqBBQ==,type:str] sops: age: - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y @@ -36,7 +48,7 @@ sops: QzNYRk5ERmR4aGtLQ3dwQ1lPeDZyaEkKJMLXqv6tBBql7VVnWDIwAh24SfQ2O6Ca CEOQTGEonbqr5doWqTsXUXrdQAS0amL45UdT6ITFtfNAjaHwCMfhZg== -----END AGE ENCRYPTED FILE----- - lastmodified: "2025-07-16T15:33:06Z" - mac: ENC[AES256_GCM,data:nZm7N8qfANzHadtW+3eTJljpmYejJdKGFO44iw40CnwlGgb454us9LZbQIAYkNiS7UkivoWa5BqvgLcpObHNAn3tVi+ha0jySIrAmp43y5ilmg76fvL4znel4Nk7eRiGoF3t3xiCR39/3l7PPffx2RJ6PerEyGBpiUZ6mBcWoTE=,iv:UmhSynpMdTnY0R6jwDJts13b0rKsaRFlCizdM2oargE=,tag:Q2xh/QXFOQYqqkxKs7nujA==,type:str] + lastmodified: "2025-07-27T17:17:02Z" + mac: ENC[AES256_GCM,data:i0S0G7D+yPiCWaiCmI++N0EKpED0uGpsEs+3Mc1LbLaHj5kFUMAbOPPl/QGDGhq2eL99+w1PKOfmdHYe2AdtsIhkIQ0F0FUkgItSjdOKlh0hKI+Hk2OqpfA6PRLlZT5dh8r0q0WcI1JPE46egNogjN2za4i6KrkjnTRSchhrxNg=,iv:k0BZ8b+5kmMqaKi9dx6fibIGVVJLZRa3oApwa/fWVdE=,tag:DbV4ZS/ciZVSi+aE0wOZfg==,type:str] unencrypted_suffix: _unencrypted version: 3.10.2 diff --git a/users/plasmagoat.nix b/users/plasmagoat.nix new file mode 100644 index 0000000..9e5a96b --- /dev/null +++ b/users/plasmagoat.nix @@ -0,0 +1,31 @@ +# users/plasmagoat.nix - Your user configuration +{ + config, + lib, + pkgs, + ... +}: { + users.users.plasmagoat = { + isNormalUser = true; + description = "plasmagoat"; + extraGroups = ["wheel" "docker" "backup"]; + shell = pkgs.bash; # or pkgs.zsh, pkgs.fish + + # SSH keys managed through secrets + openssh.authorizedKeys.keys = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\dbs@ALPHA-DBS-P14sG2" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT" + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" + ]; + }; + + # Root SSH access (for deployment) + users.users.root.openssh.authorizedKeys.keys = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\\dbs@ALPHA-DBS-P14sG2" + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT" + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" + ]; + + # Home directory management (optional) + # You could add home-manager here if you want +} From 0b47fc0a53c3311dfc69952ba9cbacf0e02cd952 Mon Sep 17 00:00:00 2001 From: Forgejo Bot Date: Mon, 28 Jul 2025 06:01:03 +0000 Subject: [PATCH 2/2] feat: automated changes --- flake.lock | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flake.lock b/flake.lock index 61e80ac..ed23ee6 100644 --- a/flake.lock +++ b/flake.lock @@ -156,11 +156,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1753429684, - "narHash": "sha256-9h7+4/53cSfQ/uA3pSvCaBepmZaz/dLlLVJnbQ+SJjk=", + "lastModified": 1750134718, + "narHash": "sha256-v263g4GbxXv87hMXMCpjkIxd/viIF7p3JpJrwgKdNiI=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7fd36ee82c0275fb545775cc5e4d30542899511d", + "rev": "9e83b64f727c88a7711a2c463a7b16eedb69a84c", "type": "github" }, "original": { @@ -188,11 +188,11 @@ }, "nixpkgs-unstable": { "locked": { - "lastModified": 1752480373, - "narHash": "sha256-JHQbm+OcGp32wAsXTE/FLYGNpb+4GLi5oTvCxwSoBOA=", + "lastModified": 1753429684, + "narHash": "sha256-9h7+4/53cSfQ/uA3pSvCaBepmZaz/dLlLVJnbQ+SJjk=", "owner": "nixos", "repo": "nixpkgs", - "rev": "62e0f05ede1da0d54515d4ea8ce9c733f12d9f08", + "rev": "7fd36ee82c0275fb545775cc5e4d30542899511d", "type": "github" }, "original": { @@ -204,11 +204,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1752624097, - "narHash": "sha256-mQCof2VccFzF7cmXy43n3GCwSN2+m8TVhZpGLx9sxVc=", + "lastModified": 1753679156, + "narHash": "sha256-CiYhgWDUG6TF1gHo7hf309KnMNzlU5Y8m6pU/4PPFMI=", "owner": "nixos", "repo": "nixpkgs", - "rev": "d7c8095791ce3aafe97d9c16c1dc2f4e3d69a3ba", + "rev": "1e95fd75ac8ec3a9ce1f9cb45e8a8e849ad32aba", "type": "github" }, "original": { @@ -237,11 +237,11 @@ "nixpkgs-25_05": "nixpkgs-25_05" }, "locked": { - "lastModified": 1752060039, - "narHash": "sha256-MqcbN/PgfXOv8S4q6GcmlORd6kJZ3UlFNhzCvLOEe4I=", + "lastModified": 1753285640, + "narHash": "sha256-ofa021NeHDXAxg5J8mSnn8rHa393PAlD85ZCetP4Qa0=", "owner": "simple-nixos-mailserver", "repo": "nixos-mailserver", - "rev": "80d21ed7a1ab8007597f7cd9adc26ebc98b9611f", + "rev": "ce87c8a9771d1a20c3fa3b60113b9b0821627dcb", "type": "gitlab" }, "original": {