Compare commits

..

1 commit

Author SHA1 Message Date
Forgejo Bot
f1af911f72 feat: automated changes 2025-07-27 06:00:57 +00:00
94 changed files with 436 additions and 7289 deletions

View file

@ -41,6 +41,11 @@ jobs:
ssh-keyscan -H "$NIXOS_BUILER_HOST" >> ~/.ssh/known_hosts ssh-keyscan -H "$NIXOS_BUILER_HOST" >> ~/.ssh/known_hosts
chmod 600 ~/.ssh/known_hosts chmod 600 ~/.ssh/known_hosts
- name: Test SSH connection to NixOS Builder
run: |
echo "Testing SSH connection to $NIXOS_BUILER_HOST..."
ssh -o StrictHostKeyChecking=yes "$NIXOS_BUILER_USER"@"$NIXOS_BUILER_HOST" "echo 'SSH success. Hostname:' && hostname"
- name: Apply Colmena - name: Apply Colmena
id: apply id: apply
run: colmena apply run: colmena apply

View file

@ -1,59 +0,0 @@
# colmena.nix - Separate file to keep flake.nix clean
{
inputs,
outputs,
}: let
inherit (inputs.nixpkgs) lib;
# Helper to create a host configuration
mkHost = {
hostname,
profile ? "proxmox-vm",
modules ? [],
specialArgs ? {},
}: {
imports =
[
# Base profile (determines hardware/platform specifics)
(./. + "/profiles/${profile}.nix")
# Host-specific configuration
(./. + "/hosts/${hostname}")
# Additional modules
]
++ modules;
# Pass through special args and our outputs
_module.args =
specialArgs
// {
inherit inputs outputs;
};
};
in {
meta = {
nixpkgs = import inputs.nixpkgs {
system = "x86_64-linux";
overlays = [
outputs.overlays.additions
outputs.overlays.modifications
outputs.overlays.unstable-packages
inputs.colmena.overlays.default
];
};
specialArgs = {inherit inputs outputs;};
};
defaults = import ./hosts/default.nix;
# Define your hosts
sandbox = mkHost {
hostname = "sandbox";
profile = "proxmox-vm";
};
photos = mkHost {
hostname = "photos";
profile = "proxmox-vm";
};
}

6
flake.lock generated
View file

@ -204,11 +204,11 @@
}, },
"nixpkgs_2": { "nixpkgs_2": {
"locked": { "locked": {
"lastModified": 1753679156, "lastModified": 1753595164,
"narHash": "sha256-CiYhgWDUG6TF1gHo7hf309KnMNzlU5Y8m6pU/4PPFMI=", "narHash": "sha256-JtSQkf32bc0e9Z8Ieh0dqlhTMi4H5EybsPfCcp4Izuk=",
"owner": "nixos", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "1e95fd75ac8ec3a9ce1f9cb45e8a8e849ad32aba", "rev": "44f53751812dd1ab6206e4e87979d0babcb58480",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -27,18 +27,18 @@
# systems, # systems,
sops-nix, sops-nix,
# home-manager, # home-manager,
colmena,
simple-nixos-mailserver, simple-nixos-mailserver,
... ...
} @ inputs: let } @ inputs: let
inherit (self) outputs; inherit (self) outputs;
lib = nixpkgs.lib;
# Supported systems for your flake packages, shell, etc. # Supported systems for your flake packages, shell, etc.
systems = [ systems = [
"x86_64-linux" "x86_64-linux"
]; ];
# This is a function that generates an attribute by calling a function you # This is a function that generates an attribute by calling a function you
# pass to it, with each system as an argument # pass to it, with each system as an argument
forAllSystems = lib.genAttrs systems; forAllSystems = nixpkgs.lib.genAttrs systems;
in { in {
# Custom packages # Custom packages
# Accessible through 'nix build', 'nix shell', etc # Accessible through 'nix build', 'nix shell', etc
@ -54,35 +54,33 @@
# Reusable nixos modules # Reusable nixos modules
nixosModules = import ./modules/nixos; nixosModules = import ./modules/nixos;
colmenaHive = inputs.colmena.lib.makeHive self.outputs.colmena; colmenaHive = colmena.lib.makeHive self.outputs.colmena;
colmena = import ./colmena.nix {inherit inputs outputs;}; colmena = {
meta = {
nixpkgs = import nixpkgs {
system = "x86_64-linux";
overlays = [
outputs.overlays.additions
outputs.overlays.modifications
outputs.overlays.unstable-packages
# Development shells colmena.overlays.default
devShells = forAllSystems (
system: let
inherit (inputs.colmena.packages."${pkgs.system}") colmena;
pkgs = nixpkgs.legacyPackages.${system};
in {
default = pkgs.mkShell {
packages = with pkgs; [
# colmena
sops
age
nix-output-monitor
jq
ssh-to-age # For converting SSH keys to age keys
]; ];
config.allowUnfree = true;
shellHook = '' };
echo "🏠 Homelab Development Environment"
echo "Available commands:" specialArgs = {
echo " colmena apply - Deploy all hosts" inherit inputs outputs;
echo " colmena apply --on HOST - Deploy specific host" };
echo " sops secrets/secrets.yaml - Edit secrets" };
echo ""
''; defaults = import ./machines/_default/configuration.nix;
sandbox = import ./machines/sandbox/configuration.nix;
auth = import ./machines/auth/configuration.nix;
mail = import ./machines/mail/configuration.nix;
monitor = import ./machines/monitor/configuration.nix;
photos = import ./machines/photos/configuration.nix;
}; };
}
);
}; };
} }

37
hive.nix Normal file
View file

@ -0,0 +1,37 @@
inputs @ {
self,
nixpkgs,
sops-nix,
simple-nixos-mailserver,
# home-manager,
outputs,
...
}: {
sandbox = {name, ...}: {
imports = [./machines/${name}/definition.nix];
deployment.tags = ["sandbox"];
};
monitor = {name, ...}: {
imports = [./machines/${name}/definition.nix];
deployment.tags = ["grafana" "prometheus"];
};
auth = {name, ...}: {
imports = [./machines/${name}/definition.nix];
deployment.tags = ["zitadel" "sso" "ldap"];
};
mail = {name, ...}: {
imports = [
./machines/${name}/definition.nix
simple-nixos-mailserver.nixosModule
];
deployment.tags = ["mail"];
};
photos = {name, ...}: {
imports = [./machines/${name}/definition.nix];
deployment.tags = ["ente"];
};
}

View file

@ -1,106 +0,0 @@
{
config,
lib,
pkgs,
inputs,
outputs,
...
}: {
imports = [
# Essential modules for all systems
inputs.sops-nix.nixosModules.sops
../modules/homelab
# User configurations
../users/plasmagoat.nix
# Secrets management
../secrets
];
# Colmena deployment defaults
deployment = {
targetHost = lib.mkDefault "${config.homelab.hostname}.${config.homelab.domain}";
tags = [config.nixpkgs.system config.networking.hostName];
replaceUnknownProfiles = lib.mkDefault true;
buildOnTarget = lib.mkDefault false;
};
# Basic system configuration that applies to ALL systems
nix = {
settings = {
experimental-features = ["nix-command" "flakes"];
auto-optimise-store = true;
allowed-users = ["@wheel"];
trusted-users = ["root" "@wheel"];
};
gc = {
automatic = true;
options = "--delete-older-than 15d";
dates = "daily";
};
optimise.automatic = true;
extraOptions = ''
keep-outputs = true
keep-derivations = true
'';
};
# Basic security
security.sudo.wheelNeedsPassword = false;
# SSH configuration
services.openssh = {
enable = true;
openFirewall = true;
settings = {
PasswordAuthentication = false;
PermitRootLogin = "prohibit-password";
KbdInteractiveAuthentication = false;
};
};
services.sshguard.enable = true;
programs.ssh.startAgent = true;
# Basic packages for all systems
environment.systemPackages = with pkgs; [
dig
nmap
traceroute
vim
git
curl
python3
htop
tree
];
# Timezone and locale
time.timeZone = lib.mkDefault "Europe/Copenhagen";
console.keyMap = lib.mkDefault "dk-latin1";
i18n.defaultLocale = lib.mkDefault "en_US.UTF-8";
# System backup job (applies to all systems)
# homelab.global.backups.jobs = [
# {
# name = "system-config";
# backend = "restic";
# paths = [
# "/etc/nixos"
# "/etc/sops"
# "/var/lib/nixos"
# ];
# schedule = "daily";
# excludePatterns = [
# "*/cache/*"
# "*/tmp/*"
# ];
# }
# ];
# Default state version
system.stateVersion = lib.mkDefault "25.05";
}

View file

@ -1,28 +0,0 @@
{
outputs,
name,
...
}: let
in {
imports = [
outputs.nixosModules.ente
./ente.nix
# ./minio.nix
];
homelab = {
enable = true;
hostname = name;
tags = [name];
monitoring.enable = true;
motd.enable = true;
services = {
minio.enable = true;
};
};
deployment.tags = ["ente"];
system.stateVersion = "25.05";
}

View file

@ -1,73 +0,0 @@
{
config,
pkgs,
...
}: {
sops.secrets."ente/minio/root_password".owner = "ente";
sops.secrets."ente/minio/root_user".owner = "ente";
sops.secrets."service_accounts/ente/password".owner = "ente";
environment.systemPackages = with pkgs; [
ente-cli
];
services.ente.api = {
enable = true;
enableLocalDB = true;
domain = "ente-museum.procopius.dk";
settings = {
# apps = {
# accounts = "https://accounts.procopius.dk";
# cast = "https://cast.procopius.dk";
# public-albums = "https://albums.procopius.dk";
# };
smtp = {
host = "mail.procopius.dk";
port = "465";
username = "ente@procopius.dk";
password._secret = config.sops.secrets."service_accounts/ente/password".path;
# The email address from which to send the email. Set this to an email
# address whose credentials you're providing.
email = "ente@procopius.dk";
# Optional override for the sender name in the emails. If specified, it will
# be used for all emails sent by the instance (default is email specific).
sender-name = "ente";
};
internal.admins = [
1580559962386438
];
s3 = {
use_path_style_urls = true;
b2-eu-cen = {
endpoint = "https://ente-minio-api.procopius.dk";
region = "us-east-1";
bucket = "ente";
key._secret = config.sops.secrets."ente/minio/root_user".path;
secret._secret = config.sops.secrets."ente/minio/root_password".path;
};
};
};
};
services.ente.web = {
enable = true;
domains = {
api = "ente-museum.procopius.dk";
accounts = "ente-accounts.procopius.dk";
albums = "ente-albums.procopius.dk";
cast = "ente-cast.procopius.dk";
photos = "ente-photos.procopius.dk";
auth = "ente-auth.procopius.dk";
};
};
networking.firewall.allowedTCPPorts = [
3000
3001
3002
3003
3004
8080
];
}

View file

@ -1,35 +0,0 @@
{
config,
pkgs,
lib,
...
}: {
sops.secrets."ente/minio/root_user" = {};
sops.secrets."ente/minio/root_password" = {};
sops.templates."minio-root-credentials".content = ''
MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"}
MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"}
'';
services.minio = {
enable = true;
rootCredentialsFile = config.sops.templates."minio-root-credentials".path;
};
systemd.services.minio = {
environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk";
postStart = ''
# Wait until minio is up
${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live"
# Make sure bucket exists
mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente
'';
};
networking.firewall.allowedTCPPorts = [
9000
9001
];
}

View file

@ -1,57 +0,0 @@
{
config,
name,
...
}: {
sops.secrets."restic/default-password" = {};
homelab = {
enable = true;
hostname = name;
tags = [name];
monitoring.enable = true;
motd.enable = true;
backups = {
enable = true;
backends = {
restic = {
enable = true;
repository = "/srv/restic-repo";
passwordFile = config.sops.secrets."restic/default-password".path;
};
};
jobs = [
{
name = "sandbox-home";
backend = "restic";
backendOptions = {
paths = ["/home/plasmagoat"];
repository = "/srv/restic-repo";
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 6"
"--keep-yearly 3"
];
};
}
];
};
services.prometheus = {
enable = true;
};
services.gatus = {
enable = true;
ui = {
title = "Homelab Status Dashboard";
header = "My Homelab Services";
};
};
};
system.stateVersion = "25.05";
}

View file

@ -5,7 +5,7 @@
nix run github:nix-community/nixos-generators -- -f proxmox -c configuration.nix nix run github:nix-community/nixos-generators -- -f proxmox -c configuration.nix
``` ```
## Upload to proxmox ## Update to proxmox
``` ```
scp /nix/store/jvwxp7agny9979fglf76s0ca9m2h6950-proxmox-nixos-cloud-init/vzdump-qemu-nixos-cloud-init.vma.zst root@192.168.1.206:/var/lib/vz/dump scp /nix/store/jvwxp7agny9979fglf76s0ca9m2h6950-proxmox-nixos-cloud-init/vzdump-qemu-nixos-cloud-init.vma.zst root@192.168.1.206:/var/lib/vz/dump
``` ```
@ -16,6 +16,3 @@ qmrestore /var/lib/vz/dump/vzdump-qemu-nixos-cloud-init.vma.zst 9000 --unique tr
qm template 9000 qm template 9000
``` ```
## Future
Maybe look into nixos-everywhere like done here https://github.com/solomon-b/nixos-config

View file

@ -1,6 +1,3 @@
pm_api_url = "https://192.168.1.205:8006/api2/json"
pm_api_token_id = "root@pam!opentofu"
pm_api_token_secret = "7660e962-9240-44ea-b1dc-e5176caba450"
pm_node = "proxmox-01" pm_node = "proxmox-01"
# nixos_template_id = 9100 # nixos_template_id = 9100

View file

@ -18,17 +18,11 @@
replaceUnknownProfiles = lib.mkDefault true; replaceUnknownProfiles = lib.mkDefault true;
buildOnTarget = lib.mkDefault false; buildOnTarget = lib.mkDefault false;
targetHost = lib.mkDefault "${name}.lab"; targetHost = lib.mkDefault "${name}.lab";
tags = [config.nixpkgs.system name "homelab"]; tags = lib.mkDefault [config.nixpkgs.system name "homelab"];
keys = {
"age.key" = {
destDir = "/run/keys";
keyFile = "/home/plasmagoat/.config/age/age.key";
};
};
}; };
sops = { sops = {
age.keyFile = "/run/keys/age.key"; age.keyFile = "/etc/sops/age.key";
defaultSopsFile = ../../secrets/secrets.yaml; defaultSopsFile = ../../secrets/secrets.yaml;
}; };

View file

@ -9,7 +9,8 @@ in {
9091 9091
]; ];
services.authelia.instances.procopius = { services = {
authelia.instances.procopius = {
enable = true; enable = true;
settings = { settings = {
theme = "auto"; theme = "auto";
@ -169,6 +170,7 @@ in {
AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path; AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path;
}; };
}; };
};
# Give Authelia access to the Redis socket # Give Authelia access to the Redis socket
users.users.${authelia}.extraGroups = ["redis-procopius"]; users.users.${authelia}.extraGroups = ["redis-procopius"];

View file

@ -3,7 +3,6 @@
sops.secrets."service_accounts/forgejo/password" = {}; sops.secrets."service_accounts/forgejo/password" = {};
sops.secrets."service_accounts/jellyfin/password" = {}; sops.secrets."service_accounts/jellyfin/password" = {};
sops.secrets."service_accounts/mail/password" = {}; sops.secrets."service_accounts/mail/password" = {};
sops.secrets."service_accounts/ente/password" = {};
sops.templates."service-accounts.json" = { sops.templates."service-accounts.json" = {
content = '' content = ''
{ {
@ -45,16 +44,6 @@
"mail" "mail"
] ]
} }
{
"id": "ente",
"email": "ente@procopius.dk",
"password": "${config.sops.placeholder."service_accounts/ente/password"}",
"displayName": "ente",
"groups": [
"lldap_password_manager",
"mail"
]
}
''; '';
path = "/bootstrap/user-configs/service-accounts.json"; path = "/bootstrap/user-configs/service-accounts.json";
owner = "lldap"; owner = "lldap";

View file

@ -4,7 +4,6 @@
./authelia.nix ./authelia.nix
./postgres.nix ./postgres.nix
./redis.nix ./redis.nix
../modules/pgbackrest.nix
]; ];
deployment.tags = ["authelia" "sso" "ldap" "lldap"]; deployment.tags = ["authelia" "sso" "ldap" "lldap"];

View file

@ -18,7 +18,6 @@
authentication = lib.mkForce '' authentication = lib.mkForce ''
# TYPE DATABASE USER ADDRESS METHOD # TYPE DATABASE USER ADDRESS METHOD
local all all trust local all all trust
host all all 127.0.0.1/32 trust
''; '';
}; };
} }

View file

@ -2,7 +2,6 @@
imports = [ imports = [
./mailserver.nix ./mailserver.nix
./networking.nix ./networking.nix
./roundcube.nix
inputs.simple-nixos-mailserver.nixosModule inputs.simple-nixos-mailserver.nixosModule
]; ];

View file

@ -1,14 +1,10 @@
{config, ...}: { {config, ...}: {
sops.secrets."service_accounts/mail/password" = {}; sops.secrets."service_accounts/mail/password" = {};
sops.secrets."cloudflare/dns-api-token" = {};
sops.secrets."cloudflare/zone-api-token" = {};
mailserver = { mailserver = {
enable = true; enable = true;
stateVersion = 3; stateVersion = 3;
fqdn = "mail.procopius.dk"; fqdn = "mail.procopius.dk";
domains = ["procopius.dk"]; domains = ["procopius.dk"];
dmarcReporting.enable = true;
localDnsResolver = false; localDnsResolver = false;
ldap = { ldap = {
enable = true; enable = true;
@ -32,17 +28,10 @@
searchBase = "ou=people,dc=procopius,dc=dk"; searchBase = "ou=people,dc=procopius,dc=dk";
}; };
certificateScheme = "acme"; # Use Let's Encrypt certificates. Note that this needs to set up a stripped
acmeCertificateName = "mail.procopius.dk"; # down nginx and opens port 80.
certificateScheme = "acme-nginx";
}; };
security.acme.acceptTerms = true; security.acme.acceptTerms = true;
security.acme.defaults.email = "david.mikael@proton.me"; security.acme.defaults.email = "david.mikael@proton.me";
security.acme.defaults = {
dnsProvider = "cloudflare";
dnsResolver = "1.1.1.1:53";
credentialFiles = {
"CF_DNS_API_TOKEN_FILE" = config.sops.secrets."cloudflare/dns-api-token".path;
"CF_ZONE_API_TOKEN_FILE" = config.sops.secrets."cloudflare/zone-api-token".path;
};
};
} }

View file

@ -1,22 +0,0 @@
{
lib,
config,
...
}: {
services.roundcube = {
enable = true;
hostName = "roundcube.procopius.dk";
extraConfig = ''
# starttls needed for authentication, so the fqdn required to match
# the certificate
$config['smtp_host'] = "tls://${config.mailserver.fqdn}";
$config['smtp_user'] = "%u";
$config['smtp_pass'] = "%p";
'';
};
services.nginx.virtualHosts."roundcube.procopius.dk" = {
forceSSL = lib.mkForce false;
enableACME = lib.mkForce false;
};
}

View file

@ -1,11 +0,0 @@
# Homelab nixos global config
A global module config for my homelab, where we gather:
* Monitoring endpoints (/metrics + port + host)
* Promtail log files
* Reverse proxy configuration
* Postgres backups (pgbackrest)
* Restic backups
* ...?
* LDAP config
* OIDC configs

View file

@ -1,43 +0,0 @@
{
lib,
config,
name,
# meta,
...
}: {
fileSystems."/mnt/pgdumps" = {
device = "192.168.1.226:/volume1/database_backups/${name}";
fsType = "nfs4";
options = ["x-systemd.automount" "noatime" "_netdev"];
};
services.postgresqlBackup = {
enable = true;
# We trigger this through restic
startAt = [];
# startAt = "*-*-* 01:15:00";
compression = "zstd";
databases = [
"authelia-procopius"
"lldap"
];
};
# services.restic.backups.b2 = {
# environmentFile = config.sops.templates.restic_floofs_env.path;
# repositoryFile = config.sops.secrets.b2_floofs_server_repository.path;
# passwordFile = config.sops.secrets.b2_floofs_server_password.path;
# paths = ["/var/backup/postgresql"];
# initialize = true;
# pruneOpts = [
# "--keep-daily 7"
# "--keep-weekly 3"
# "--keep-monthly 3"
# ];
# timerConfig = {
# OnCalendar = "04:45";
# Persistent = true;
# };
# };
# systemd.services.restic-backups-b2.wants = ["postgresqlBackup.service"];
}

View file

@ -2,7 +2,6 @@
imports = [ imports = [
outputs.nixosModules.ente outputs.nixosModules.ente
./ente.nix ./ente.nix
./minio.nix
]; ];
deployment.tags = ["ente"]; deployment.tags = ["ente"];

View file

@ -1,73 +1,25 @@
{ {
config,
pkgs,
...
}: {
sops.secrets."ente/minio/root_password".owner = "ente";
sops.secrets."ente/minio/root_user".owner = "ente";
sops.secrets."service_accounts/ente/password".owner = "ente";
environment.systemPackages = with pkgs; [
ente-cli
];
services.ente.api = { services.ente.api = {
enable = true; enable = true;
enableLocalDB = true; enableLocalDB = true;
domain = "ente-museum.procopius.dk"; domain = "ente-v2.procopius.dk";
settings = { settings = {
# apps = { # apps = {
# accounts = "https://accounts.procopius.dk"; # accounts = "https://accounts.procopius.dk";
# cast = "https://cast.procopius.dk"; # cast = "https://cast.procopius.dk";
# public-albums = "https://albums.procopius.dk"; # public-albums = "https://albums.procopius.dk";
# }; # };
smtp = {
host = "mail.procopius.dk";
port = "465";
username = "ente@procopius.dk";
password._secret = config.sops.secrets."service_accounts/ente/password".path;
# The email address from which to send the email. Set this to an email
# address whose credentials you're providing.
email = "ente@procopius.dk";
# Optional override for the sender name in the emails. If specified, it will
# be used for all emails sent by the instance (default is email specific).
sender-name = "ente";
};
internal.admins = [
1580559962386438
];
s3 = {
use_path_style_urls = true;
b2-eu-cen = {
endpoint = "https://ente-minio-api.procopius.dk";
region = "us-east-1";
bucket = "ente";
key._secret = config.sops.secrets."ente/minio/root_user".path;
secret._secret = config.sops.secrets."ente/minio/root_password".path;
};
};
}; };
}; };
services.ente.web = { services.ente.web = {
enable = true; enable = true;
domains = { domains = {
api = "ente-museum.procopius.dk"; api = "ente-v2.procopius.dk";
accounts = "ente-accounts.procopius.dk"; accounts = "accounts.procopius.dk";
albums = "ente-albums.procopius.dk"; albums = "albums.procopius.dk";
cast = "ente-cast.procopius.dk"; cast = "cast.procopius.dk";
photos = "ente-photos.procopius.dk"; photos = "photos.procopius.dk";
auth = "ente-auth.procopius.dk";
}; };
}; };
networking.firewall.allowedTCPPorts = [
3000
3001
3002
3003
3004
8080
];
} }

View file

@ -1,35 +1,6 @@
{ {
config,
pkgs,
lib,
...
}: {
sops.secrets."ente/minio/root_user" = {};
sops.secrets."ente/minio/root_password" = {};
sops.templates."minio-root-credentials".content = ''
MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"}
MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"}
'';
services.minio = { services.minio = {
enable = true; enable = true;
rootCredentialsFile = config.sops.templates."minio-root-credentials".path; rootCredentialsFile = "/etc/nixos/minio-root-credentials";
}; };
systemd.services.minio = {
environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk";
postStart = ''
# Wait until minio is up
${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live"
# Make sure bucket exists
mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente
'';
};
networking.firewall.allowedTCPPorts = [
9000
9001
];
} }

View file

@ -1,18 +1,5 @@
{outputs, ...}: { {
deployment.tags = ["sandbox"]; deployment.tags = ["sandbox"];
imports = [
outputs.nixosModules.global-config
];
homelab.global = {
enable = true;
hostname = "sandbox";
domain = "sandbox.local";
environment = "production";
location = "proxmox";
tags = ["sandbox"];
};
system.stateVersion = "25.05"; system.stateVersion = "25.05";
} }

View file

@ -1,116 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.backups;
homelabCfg = config.homelab;
# Get all defined backend names dynamically
backendNames = attrNames cfg.backends or {};
backupJobType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the backup job";
};
backend = mkOption {
type = types.enum backendNames;
description = "Backend to use for this backup job";
};
backendOptions = mkOption {
type = types.attrs;
default = {};
description = "Backend-specific options to override or extend the backend configuration";
};
labels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for this backup job";
};
};
};
in {
imports = [
./backup/restic.nix
# ./backup/borgbackup.nix
];
options.homelab.backups = {
enable = mkEnableOption "Homelab backup system";
jobs = mkOption {
type = types.listOf backupJobType;
default = [];
description = "Backup jobs to execute on this system";
};
defaultLabels = mkOption {
type = types.attrsOf types.str;
default = {
hostname = homelabCfg.hostname;
environment = homelabCfg.environment;
location = homelabCfg.location;
};
description = "Default labels applied to all backup jobs";
};
monitoring = mkOption {
type = types.bool;
default = true;
description = "Enable backup monitoring and metrics";
};
};
config = mkIf cfg.enable {
# Validate that all job backends exist
assertions = [
{
assertion = all (job: cfg.backends.${job.backend} != null) cfg.jobs;
message = "All backup jobs must reference backends that are defined and not null in homelab.backups.backends";
}
];
# Add backup jobs to monitoring endpoints if monitoring is enabled
# homelab.monitoring.endpoints =
# mkIf (cfg.monitoring && config.homelab.monitoring.enable)
# (map (job: {
# name = "backup-${job.name}";
# port = 9100; # Assuming node exporter collects backup metrics
# path = "/metrics";
# jobName = "backup";
# labels =
# cfg.defaultLabels
# // job.labels
# // {
# backup_job = job.name;
# backup_backend = job.backend;
# };
# })
# cfg.jobs);
# Export backup configuration for external consumption
environment.etc."homelab/backup-config.json".text = builtins.toJSON {
backends =
mapAttrs (name: config: {
inherit name;
enabled = config.enable or false;
})
cfg.backends;
jobs =
map (job: {
inherit (job) name backend labels;
allLabels = cfg.defaultLabels // job.labels;
paths = job.backendOptions.paths or [];
schedule = job.backendOptions.timerConfig.OnCalendar or job.backendOptions.startAt or "unknown";
node = homelabCfg.hostname;
environment = homelabCfg.environment;
location = homelabCfg.location;
})
cfg.jobs;
};
};
}

View file

@ -1,105 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.backups;
# Get restic backend config if it exists
resticBackend = cfg.backends.restic or null;
resticEnabled = resticBackend.enable or false;
# Filter jobs that use the restic backend
resticJobs = filter (job: job.backend == "restic") cfg.jobs;
in {
options.homelab.backups.backends.restic = mkOption {
type = types.nullOr (types.submodule {
options = {
enable = mkEnableOption "Restic backup backend";
# Default restic options - these map directly to services.restic.backups.<name>
repository = mkOption {
type = types.str;
description = "Default repository for restic backups";
};
initialize = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Create the repository if it doesn't exist.
'';
};
passwordFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Default password file for restic repository";
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Default environment file for restic credentials";
};
paths = mkOption {
type = types.listOf types.str;
default = [];
description = "Default paths to backup";
};
exclude = mkOption {
type = types.listOf types.str;
default = [];
description = "Default exclude patterns";
};
timerConfig = mkOption {
type = types.attrs;
default = {
OnCalendar = "daily";
RandomizedDelaySec = "1h";
};
description = "Default timer configuration";
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 6"
];
description = "Default pruning options";
};
# Allow any other restic options
extraOptions = mkOption {
type = types.attrs;
default = {};
description = "Additional default restic options";
};
};
});
default = null;
description = "Restic backend configuration";
};
config = mkIf (cfg.enable && resticEnabled && length resticJobs > 0) {
# Configure restic service for each job using the restic backend
services.restic.backups = listToAttrs (map (
job: let
# Get base config without the 'enable' field
baseConfig = removeAttrs resticBackend ["enable"];
# Merge extraOptions into base config
baseWithExtras = recursiveUpdate (removeAttrs baseConfig ["extraOptions"]) (baseConfig.extraOptions or {});
# Apply job-specific overrides
finalConfig = recursiveUpdate baseWithExtras job.backendOptions;
in
nameValuePair job.name finalConfig
)
resticJobs);
};
}

View file

@ -1,133 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab;
nodeAgg = import ./lib/node-aggregation.nix {inherit lib;};
in {
imports = [
./monitoring-config.nix
./proxy-config.nix
./backup-config.nix
./motd
./services
# Global aggregation modules
(nodeAgg.mkGlobalModule "monitoring" nodeAgg.aggregators.monitoring)
# (nodeAgg.mkGlobalModule "logs" nodeAgg.aggregators.logs)
(nodeAgg.mkGlobalModule "reverseProxy" nodeAgg.aggregators.reverseProxy)
(nodeAgg.mkGlobalModule "backups" nodeAgg.aggregators.backups)
];
options.homelab = {
enable = mkEnableOption "Homelab fleet configuration";
hostname = mkOption {
type = types.str;
description = "Hostname for this system";
};
domain = mkOption {
type = types.str;
default = "lab";
description = "Base domain for the homelab";
};
externalDomain = mkOption {
type = types.str;
default = "procopius.dk";
description = "External doamin to the homelab";
};
environment = mkOption {
type = types.enum ["production" "staging" "development"];
default = "production";
description = "Environment type";
};
location = mkOption {
type = types.str;
default = "homelab";
description = "Physical location identifier";
};
tags = mkOption {
type = types.listOf types.str;
default = [];
description = "Tags for this system";
};
};
config = mkIf cfg.enable {
# Set hostname
networking.hostName = cfg.hostname;
# Export configuration for external consumption
environment.etc."homelab/config.json".text = builtins.toJSON {
inherit (cfg) hostname domain environment location tags;
monitoring = {
# Metrics endpoints (Prometheus, etc.)
metrics =
map (endpoint: {
inherit (endpoint) name host port path jobName scrapeInterval labels;
url = "http://${endpoint.host}:${toString endpoint.port}${endpoint.path}";
})
cfg.global.monitoring.allMetrics or [];
# Health check endpoints
healthChecks =
map (check: let
# Determine the host based on useExternalDomain
actualHost =
if check.useExternalDomain
then "${check.subdomain}.${cfg.externalDomain}"
else check.host;
# Build the URL
portPart =
if check.port != null
then ":${toString check.port}"
else "";
url = "${check.protocol}://${actualHost}${portPart}${check.path}";
in {
inherit (check) name protocol method interval timeout conditions alerts group labels enabled;
host = actualHost;
port = check.port;
path = check.path;
url = url;
useExternalDomain = check.useExternalDomain;
subdomain = check.subdomain;
sourceNode = cfg.hostname;
})
cfg.global.monitoring.allHealthChecks or [];
};
reverseProxy = {
entries =
map (entry: {
inherit (entry) subdomain host port path enableAuth enableSSL;
internalHost = "${cfg.hostname}:${toString entry.port}${entry.path}";
externalHost = "${entry.subdomain}.${cfg.externalDomain}";
})
cfg.global.reverseProxy.all;
};
backups = {
jobs =
map (job: {
inherit (job) name backend labels;
backupId = job._backupId;
sourceNode = job._sourceNode;
})
cfg.global.backups.all;
backends = cfg.global.backups.allBackends;
summary = {
totalJobs = length cfg.global.backups.all;
jobsByBackend = mapAttrs (backend: jobs: length jobs) cfg.global.backups.byBackend;
jobsByNode = mapAttrs (node: jobs: length jobs) cfg.global.backups.byNode;
};
};
};
};
}

View file

@ -1,226 +0,0 @@
{lib}: let
inherit (lib) flatten mapAttrs mapAttrsToList filter groupBy length unique attrByPath splitString;
# Generic function to aggregate any attribute across nodes
aggregateFromNodes = {
nodes,
attributePath, # e.g. "homelab.monitoring.endpoints" or "homelab.backups.jobs"
enhancer ? null, # optional function to enhance each item with node context
}: let
# Extract the attribute from each node using the path
getNestedAttr = path: config: let
pathList = splitString "." path;
in
attrByPath pathList [] config;
# Get all items from all nodes
allItems = flatten (mapAttrsToList
(nodeName: nodeConfig: let
items = getNestedAttr attributePath nodeConfig.config;
baseEnhancer = item:
item
// {
_nodeName = nodeName;
_nodeConfig = nodeConfig;
_nodeAddress = nodeConfig.config.networking.hostName or nodeName;
};
finalEnhancer =
if enhancer != null
then (item: enhancer (baseEnhancer item))
else baseEnhancer;
in
map finalEnhancer items)
nodes);
in {
# Raw aggregated data
all = allItems;
# Common grouping patterns
byNode = groupBy (item: item._nodeName) allItems;
byType = groupBy (item: item.type or "unknown") allItems;
byService = groupBy (item: item.service or "unknown") allItems;
# Utility functions for filtering
filterBy = predicate: filter predicate allItems;
ofType = type: filter (item: (item.type or "") == type) allItems;
count = length allItems;
countBy = fn: mapAttrs (key: items: length items) (groupBy fn allItems);
};
# Specialized aggregators for common use cases
aggregators = {
monitoring = nodes: let
# Aggregate metrics endpoints
metricsAgg = aggregateFromNodes {
inherit nodes;
attributePath = "homelab.monitoring.metrics";
enhancer = endpoint:
endpoint
// {
_fullAddress = "${endpoint.host or endpoint._nodeAddress}:${toString endpoint.port}";
_metricsUrl = "http://${endpoint.host or endpoint._nodeAddress}:${toString endpoint.port}${endpoint.path or "/metrics"}";
_type = "metrics";
};
};
# Aggregate health checks
healthChecksAgg = aggregateFromNodes {
inherit nodes;
attributePath = "homelab.monitoring.healthChecks";
enhancer = check: let
# Compute the actual host and URL
actualHost =
if check.useExternalDomain or false
then "${check.subdomain}.${check._nodeConfig.config.homelab.externalDomain or "example.com"}"
else check.host or check._nodeAddress;
portPart =
if check.port != null
then ":${toString check.port}"
else "";
url = "${check.protocol or "http"}://${actualHost}${portPart}${check.path or "/"}";
in
check
// {
_actualHost = actualHost;
_url = url;
_type = "health-check";
# Merge default labels with node context
labels =
(check.labels or {})
// {
node = check._nodeName;
environment = check._nodeConfig.config.homelab.environment or "unknown";
};
};
};
in
metricsAgg
// healthChecksAgg
// {
# Metrics-specific aggregations
allMetrics = metricsAgg.all;
metricsByNode = metricsAgg.byNode;
metricsByJobName = groupBy (m: m.jobName or "unknown") metricsAgg.all;
# Health checks-specific aggregations
allHealthChecks = healthChecksAgg.all;
healthChecksByNode = healthChecksAgg.byNode;
healthChecksByGroup = groupBy (hc: hc.group or "default") healthChecksAgg.all;
healthChecksByProtocol = groupBy (hc: hc.protocol or "http") healthChecksAgg.all;
# Filtered health checks
externalHealthChecks = filter (hc: hc.useExternalDomain or false) healthChecksAgg.all;
internalHealthChecks = filter (hc: !(hc.useExternalDomain or false)) healthChecksAgg.all;
enabledHealthChecks = filter (hc: hc.enabled or true) healthChecksAgg.all;
# Summary statistics
summary = {
totalMetrics = length metricsAgg.all;
totalHealthChecks = length healthChecksAgg.all;
healthChecksByGroup =
mapAttrs (group: checks: length checks)
(groupBy (hc: hc.group or "default") healthChecksAgg.all);
healthChecksByProtocol =
mapAttrs (protocol: checks: length checks)
(groupBy (hc: hc.protocol or "http") healthChecksAgg.all);
externalChecksCount = length (filter (hc: hc.useExternalDomain or false) healthChecksAgg.all);
internalChecksCount = length (filter (hc: !(hc.useExternalDomain or false)) healthChecksAgg.all);
};
};
# Promtail log configurations
# logs = nodes:
# aggregateFromNodes {
# inherit nodes;
# attributePath = "homelab.logging.sources";
# enhancer = logSource:
# logSource
# // {
# # Add log-specific computed fields
# _logPath = logSource.path or "/var/log/${logSource.service}.log";
# _labels =
# (logSource.labels or {})
# // {
# node = logSource._nodeName;
# service = logSource.service or "unknown";
# };
# };
# };
# Reverse proxy configurations
reverseProxy = nodes:
aggregateFromNodes {
inherit nodes;
attributePath = "homelab.reverseProxy.entries";
enhancer = entry:
entry
// {
# Add proxy-specific computed fields
_upstream = "http://${entry.host or entry._nodeAddress}:${toString entry.port}";
_fqdn = "${entry.subdomain or entry.service}.${entry.domain or "local"}";
};
};
# Backup jobs with enhanced aggregation
backups = nodes: let
baseAgg = aggregateFromNodes {
inherit nodes;
attributePath = "homelab.backups.jobs";
enhancer = backup:
backup
// {
_sourceNode = backup._nodeName;
_backupId = "${backup._nodeName}-${backup.name}";
_jobFqdn = "${backup.name}.${backup._nodeName}";
};
};
# Get all unique backends across all nodes
allBackends = let
allBackendConfigs =
mapAttrsToList
(nodeName: nodeConfig:
attrByPath ["homelab" "backups" "backends"] {} nodeConfig.config)
nodes;
enabledBackends = flatten (map (backends:
filter (name: backends.${name} != null) (lib.attrNames backends))
allBackendConfigs);
in
unique enabledBackends;
in
baseAgg
// {
# Backup-specific aggregations
byBackend = groupBy (job: job.backend) baseAgg.all;
allBackends = allBackends;
# Enhanced summary
summary = {
totalJobs = length baseAgg.all;
jobsByBackend =
mapAttrs (backend: jobs: length jobs)
(groupBy (job: job.backend) baseAgg.all);
jobsByNode = baseAgg.countBy (job: job._nodeName);
availableBackends = allBackends;
backendsInUse = unique (map (job: job.backend) baseAgg.all);
};
};
};
in {
inherit aggregateFromNodes aggregators;
# Convenience function to create a module that provides global aggregations
mkGlobalModule = attributeName: aggregatorFn: {
lib,
nodes,
...
}: {
options.homelab.global.${attributeName} = lib.mkOption {
type = lib.types.attrs;
readOnly = true;
description = "Globally aggregated ${attributeName} from all nodes";
};
config.homelab.global.${attributeName} = aggregatorFn nodes;
};
}

View file

@ -1,295 +0,0 @@
# Standard service interface for homelab services
# This provides a consistent contract that all services should follow
{lib}: let
inherit (lib) mkOption mkEnableOption types;
# Define the standard service interface
mkServiceInterface = {
serviceName,
defaultPort ? null,
defaultSubdomain ? serviceName,
defaultDescription ? "Homelab ${serviceName} service",
monitoringPath ? "/metrics",
healthCheckPath ? "/health",
healthCheckConditions ? ["[STATUS] == 200"],
# Custom options that the service wants to expose
serviceOptions ? {},
}:
{
# Standard interface options that all services must have
enable = mkEnableOption defaultDescription;
port = mkOption {
type = types.port;
default =
if defaultPort != null
then defaultPort
else throw "Service ${serviceName} must specify a default port";
description = "Port for ${serviceName} service";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Whether to automatically open firewall ports";
};
proxy = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable reverse proxy for this service";
};
subdomain = mkOption {
type = types.str;
default = defaultSubdomain;
description = "Subdomain for reverse proxy (${defaultSubdomain}.yourdomain.com)";
};
enableAuth = mkOption {
type = types.bool;
default = false;
description = "Enable authentication for reverse proxy";
};
enableSSL = mkOption {
type = types.bool;
default = true;
description = "Enable SSL for reverse proxy";
};
};
monitoring = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable monitoring (metrics and health checks)";
};
metricsPath = mkOption {
type = types.str;
default = monitoringPath;
description = "Path for metrics endpoint";
};
jobName = mkOption {
type = types.str;
default = serviceName;
description = "Prometheus job name";
};
scrapeInterval = mkOption {
type = types.str;
default = "30s";
description = "Prometheus scrape interval";
};
healthCheck = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable health check monitoring";
};
path = mkOption {
type = types.str;
default = healthCheckPath;
description = "Path for health check endpoint";
};
interval = mkOption {
type = types.str;
default = "30s";
description = "Health check interval";
};
timeout = mkOption {
type = types.str;
default = "10s";
description = "Health check timeout";
};
conditions = mkOption {
type = types.listOf types.str;
default = healthCheckConditions;
description = "Health check conditions";
};
group = mkOption {
type = types.str;
default = "services";
description = "Health check group name";
};
};
extraLabels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for monitoring";
};
};
description = mkOption {
type = types.str;
default = defaultDescription;
description = "Service description";
};
extraOptions = mkOption {
type = types.attrs;
default = {};
description = "Additional service-specific configuration options";
};
# Merge in service-specific options
}
// serviceOptions;
# Helper function to implement the standard service behavior
mkServiceConfig = {
config,
cfg,
homelabCfg,
serviceName,
# Function that returns the actual service configuration
serviceConfig,
# Optional: custom monitoring labels
extraMonitoringLabels ? {},
# Optional: custom health check configuration
customHealthChecks ? [],
# Optional: custom reverse proxy configuration
customProxyConfig ? {},
}: let
# Standard monitoring labels
standardLabels =
{
service = serviceName;
component = "main";
instance = "${homelabCfg.hostname}.${homelabCfg.domain}";
}
// extraMonitoringLabels // cfg.monitoring.extraLabels;
# Standard reverse proxy entry
standardProxyEntry =
{
subdomain = cfg.proxy.subdomain;
host = homelabCfg.hostname;
port = cfg.port;
enableAuth = cfg.proxy.enableAuth;
enableSSL = cfg.proxy.enableSSL;
}
// customProxyConfig;
# Standard metrics configuration
standardMetrics = lib.optional cfg.monitoring.enable {
name = "${serviceName}-metrics";
port = cfg.port;
path = cfg.monitoring.metricsPath;
jobName = cfg.monitoring.jobName;
scrapeInterval = cfg.monitoring.scrapeInterval;
labels = standardLabels;
};
# Standard health check configuration
standardHealthCheck = lib.optional (cfg.monitoring.enable && cfg.monitoring.healthCheck.enable) {
name = "${serviceName}-health";
port = cfg.port;
path = cfg.monitoring.healthCheck.path;
interval = cfg.monitoring.healthCheck.interval;
timeout = cfg.monitoring.healthCheck.timeout;
conditions = cfg.monitoring.healthCheck.conditions;
group = cfg.monitoring.healthCheck.group;
labels = standardLabels;
};
# Merge service config with standard behaviors
baseConfig = lib.mkMerge [
# Service-specific configuration
serviceConfig
# Standard firewall configuration
(lib.mkIf cfg.openFirewall {
networking.firewall.allowedTCPPorts = [cfg.port];
})
# Standard monitoring configuration
(lib.mkIf cfg.monitoring.enable {
homelab.monitoring.metrics = standardMetrics;
homelab.monitoring.healthChecks = standardHealthCheck ++ customHealthChecks;
})
# Standard reverse proxy configuration
(lib.mkIf cfg.proxy.enable {
homelab.reverseProxy.entries = [standardProxyEntry];
})
];
in
lib.mkIf cfg.enable baseConfig;
# Validation helper to ensure required options are set
validateServiceConfig = cfg: serviceName: [
# Validate that if proxy is enabled, subdomain is set
(lib.mkIf (cfg.proxy.enable && cfg.proxy.subdomain == "")
(throw "Service ${serviceName}: proxy.subdomain is required when proxy.enable is true"))
# Validate that if monitoring is enabled, required paths are set
(lib.mkIf (cfg.monitoring.enable && cfg.monitoring.metricsPath == "")
(throw "Service ${serviceName}: monitoring.metricsPath cannot be empty when monitoring is enabled"))
];
in {
inherit mkServiceInterface mkServiceConfig validateServiceConfig;
# Common service option patterns
commonOptions = {
# Log level option
logLevel = mkOption {
type = types.enum ["debug" "info" "warn" "error"];
default = "info";
description = "Log level";
};
# Environment file option (for secrets)
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Environment file for secrets";
};
# External URL option
externalUrl = serviceName: homelabCfg:
mkOption {
type = types.str;
default = "https://${serviceName}.${homelabCfg.externalDomain}";
description = "External URL for ${serviceName}";
};
};
# Helper for creating service modules with the interface
mkServiceModule = {
serviceName,
defaultPort,
defaultSubdomain ? serviceName,
serviceOptions ? {},
...
} @ args: {
config,
lib,
...
}: let
cfg = config.homelab.services.${serviceName};
homelabCfg = config.homelab;
serviceInterface = mkServiceInterface {
inherit serviceName defaultPort defaultSubdomain serviceOptions;
};
in {
options.homelab.services.${serviceName} = serviceInterface;
config = mkServiceConfig {
inherit config cfg homelabCfg serviceName;
# Service implementor must provide this function
serviceConfig = args.serviceConfig or (throw "mkServiceModule requires serviceConfig function");
};
};
}

View file

@ -1,214 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.monitoring;
homelabCfg = config.homelab;
metricsEndpointType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the metrics endpoint";
};
host = mkOption {
type = types.str;
description = "Domain name of the host (default: hostname.domain)";
default = "${homelabCfg.hostname}.${homelabCfg.domain}";
};
port = mkOption {
type = types.port;
description = "Port number for the endpoint";
};
path = mkOption {
type = types.str;
default = "/metrics";
description = "Path for the metrics endpoint";
};
jobName = mkOption {
type = types.str;
description = "Prometheus job name";
};
scrapeInterval = mkOption {
type = types.str;
default = "30s";
description = "Prometheus scrape interval";
};
labels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for this endpoint";
};
};
};
healthCheckEndpointType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the health check endpoint";
};
host = mkOption {
type = types.str;
description = "Domain name of the host";
default = "${homelabCfg.hostname}.${homelabCfg.domain}";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
description = "Port number for the endpoint (null for standard HTTP/HTTPS)";
};
path = mkOption {
type = types.str;
default = "/";
description = "Path for the health check endpoint";
};
protocol = mkOption {
type = types.enum ["http" "https" "tcp" "icmp"];
default = "http";
description = "Protocol to use for health checks";
};
method = mkOption {
type = types.str;
default = "GET";
description = "HTTP method for health checks (only applies to http/https)";
};
interval = mkOption {
type = types.str;
default = "30s";
description = "Health check interval";
};
timeout = mkOption {
type = types.str;
default = "10s";
description = "Health check timeout";
};
conditions = mkOption {
type = types.listOf types.str;
default = ["[STATUS] == 200"];
description = "Health check conditions (Gatus format)";
example = ["[STATUS] == 200" "[BODY].status == UP" "[RESPONSE_TIME] < 500"];
};
alerts = mkOption {
type = types.listOf (types.submodule {
options = {
type = mkOption {
type = types.str;
description = "Alert type";
example = "discord";
};
enabled = mkOption {
type = types.bool;
default = true;
description = "Whether this alert is enabled";
};
failure-threshold = mkOption {
type = types.int;
default = 3;
description = "Number of failures before alerting";
};
success-threshold = mkOption {
type = types.int;
default = 2;
description = "Number of successes before resolving alert";
};
};
});
default = [];
description = "Alert configurations";
};
group = mkOption {
type = types.str;
default = "default";
description = "Group name for organizing health checks";
};
labels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for this health check";
};
enabled = mkOption {
type = types.bool;
default = true;
description = "Whether this health check is enabled";
};
# External domain support
useExternalDomain = mkOption {
type = types.bool;
default = false;
description = "Use external domain instead of internal";
};
subdomain = mkOption {
type = types.nullOr types.str;
default = null;
description = "Subdomain for external domain (required if useExternalDomain is true)";
};
};
};
in {
options.homelab.monitoring = {
enable = mkEnableOption "Homelab monitoring";
metrics = mkOption {
type = types.listOf metricsEndpointType;
default = [];
description = "Metric endpoints exposed by this system";
};
healthChecks = mkOption {
type = types.listOf healthCheckEndpointType;
default = [];
description = "Health check endpoints for uptime monitoring";
};
nodeExporter = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable node exporter";
};
port = mkOption {
type = types.port;
default = 9100;
description = "Node exporter port";
};
};
};
config = mkIf cfg.enable {
# Configure node exporter if enabled
services.prometheus.exporters.node = mkIf cfg.nodeExporter.enable {
enable = true;
port = cfg.nodeExporter.port;
enabledCollectors = [
"systemd"
"textfile"
"filesystem"
"loadavg"
"meminfo"
"netdev"
"stat"
];
};
# Automatically add node exporter to monitoring endpoints
homelab.monitoring.metrics = mkIf cfg.nodeExporter.enable [
{
name = "node-exporter";
port = cfg.nodeExporter.port;
path = "/metrics";
jobName = "node";
labels = {
instance = "${homelabCfg.hostname}.${homelabCfg.domain}";
environment = homelabCfg.environment;
location = homelabCfg.location;
};
}
];
networking.firewall.allowedTCPPorts = optionals cfg.nodeExporter.enable [
cfg.nodeExporter.port
];
};
}

View file

@ -1,397 +0,0 @@
# modules/motd/default.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.motd;
homelab-motd = pkgs.writeShellScriptBin "homelab-motd" ''
#! /usr/bin/env bash
# Colors for output
RED="\e[31m"
GREEN="\e[32m"
YELLOW="\e[33m"
BLUE='\e[0;34m'
CYAN='\e[0;36m'
WHITE='\e[1;37m'
NC='\e[0m' # No Color
BOLD='\e[1m'
# Helper functions
print_header() {
echo -e "''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}''${WHITE} 🏠 $(hostname -s) HOMELAB ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
}
print_section() {
echo -e "\n''${BOLD}''${CYAN} $1''${NC}"
echo -e "''${CYAN}''${NC}"
}
get_service_status() {
local service="$1"
if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then
echo -e "''${GREEN}''${NC}"
elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then
echo -e "''${YELLOW}''${NC}"
else
echo -e "''${RED}×''${NC}"
fi
}
check_backup_issues() {
local issues=0
# Check for failed backup services in the last 24 hours
if ${pkgs.systemd}/bin/journalctl --since "24 hours ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | grep -q .; then
issues=$((issues + 1))
fi
# Check for failed backup timers
local failed_timers=$(${pkgs.systemd}/bin/systemctl list-timers --failed --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)" | wc -l)
issues=$((issues + failed_timers))
echo $issues
}
# Main script
${optionalString cfg.clearScreen "clear"}
print_header
# System info
print_section "SYSTEM"
echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p | sed 's/up //')"
echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)"
echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s", $3, $2}')"
echo -e " ''${BOLD}Disk:''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')"
${optionalString cfg.showServices ''
# Local homelab services (auto-detected + manual)
print_section "HOMELAB SERVICES"
# Auto-detect services from homelab configuration
${optionalString (config.homelab.services.gatus.enable or false) ''
status=$(get_service_status "gatus")
printf " %-20s %b %s\n" "gatus" "$status" "Uptime monitoring"
''}
${optionalString (config.homelab.services.prometheus.enable or false) ''
status=$(get_service_status "prometheus")
printf " %-20s %b %s\n" "prometheus" "$status" "Metrics collection"
''}
${optionalString (config.homelab.services.grafana.enable or false) ''
status=$(get_service_status "grafana")
printf " %-20s %b %s\n" "grafana" "$status" "Monitoring dashboard"
''}
${optionalString (config.homelab.services.alertmanager.enable or false) ''
status=$(get_service_status "alertmanager")
printf " %-20s %b %s\n" "alertmanager" "$status" "Alert routing"
''}
${optionalString (config.services.nginx.enable or false) ''
status=$(get_service_status "nginx")
printf " %-20s %b %s\n" "nginx" "$status" "Web server/proxy"
''}
${optionalString (config.services.postgresql.enable or false) ''
status=$(get_service_status "postgresql")
printf " %-20s %b %s\n" "postgresql" "$status" "Database server"
''}
${optionalString (config.services.redis.server.enable or false) ''
status=$(get_service_status "redis")
printf " %-20s %b %s\n" "redis" "$status" "Key-value store"
''}
# Manual services from configuration
${concatStringsSep "\n" (mapAttrsToList (name: service: ''
status=$(get_service_status "${service.systemdService}")
printf " %-20s %b %s\n" "${name}" "$status" "${service.description}"
'')
cfg.services)}
# Show legend
echo -e "\n ''${GREEN}''${NC} Active ''${YELLOW}''${NC} Inactive ''${RED}×''${NC} Disabled"
''}
# Quick backup check
backup_issues=$(check_backup_issues)
if [[ $backup_issues -gt 0 ]]; then
echo -e "\n''${BOLD}''${RED} WARNING: $backup_issues backup issues detected!''${NC}"
echo -e " Run ''${BOLD}homelab-backup-status''${NC} for details"
fi
# Recent critical issues
error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q 2>/dev/null | wc -l || echo 0)
if [[ "$error_count" -gt 0 ]]; then
echo -e "\n''${BOLD}''${YELLOW} $error_count system errors in last 24h''${NC}"
echo -e " Run ''${BOLD}journalctl --priority=err --since='24 hours ago' ''${NC} for details"
fi
# Helpful commands
echo -e "\n''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${WHITE}Useful commands: ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${CYAN}homelab-monitor-status''${NC} - Monitoring overview ''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${CYAN}homelab-backup-status''${NC} - Backup jobs status ''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${CYAN}homelab-proxy-status''${NC} - Reverse proxy entries ''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${CYAN}systemctl status <srv>''${NC} - Check specific service ''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
echo
'';
# Helper script for monitoring status
homelab-monitor-status = pkgs.writeShellScriptBin "homelab-monitor-status" ''
#! /usr/bin/env bash
# Colors
RED="\e[31m"
GREEN="\e[32m"
YELLOW="\e[33m"
BLUE='\e[0;34m'
CYAN='\e[0;36m'
WHITE='\e[1;37m'
NC='\e[0m'
BOLD='\e[1m'
CONFIG_FILE="/etc/homelab/config.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "''${RED} Global homelab configuration not found''${NC}"
exit 1
fi
echo -e "''${BOLD}''${BLUE}📊 Homelab Monitoring Status''${NC}"
echo -e "''${BLUE}=============================''${NC}"
# Show metrics endpoints
echo -e "\n''${BOLD}''${CYAN}Metrics Endpoints:''${NC}"
metrics_count=$(${pkgs.jq}/bin/jq '.monitoring.metrics | length' "$CONFIG_FILE" 2>/dev/null || echo 0)
if [[ $metrics_count -gt 0 ]]; then
${pkgs.jq}/bin/jq -r '.monitoring.metrics[]? | " ''${GREEN}''${NC} \(.name): ''${BOLD}\(.host):\(.port)''${NC}\(.path) ''${YELLOW}(job: \(.jobName))''${NC}"' "$CONFIG_FILE" 2>/dev/null
echo -e "\n ''${BOLD}Total: ''${metrics_count} endpoints''${NC}"
else
echo -e " ''${YELLOW}No metrics endpoints configured''${NC}"
fi
# Show health checks by group
echo -e "\n''${BOLD}''${CYAN}Health Checks:''${NC}"
health_count=$(${pkgs.jq}/bin/jq '.monitoring.healthChecks | length' "$CONFIG_FILE" 2>/dev/null || echo 0)
if [[ $health_count -gt 0 ]]; then
# Group health checks
${pkgs.jq}/bin/jq -r '
.monitoring.healthChecks |
group_by(.group // "default") |
.[] |
"''${BOLD} \(.[0].group // "default" | ascii_upcase) Group:''${NC}" as $header |
($header, (
.[] |
" ''${if .enabled // true then "''${GREEN}" else "''${YELLOW}" end}''${NC} \(.name): ''${BOLD}\(.protocol)://\(.host)\(if .port then ":\(.port)" else "" end)''${NC}\(.path)"
))
' "$CONFIG_FILE" 2>/dev/null
echo -e "\n ''${BOLD}Total: ''${health_count} health checks''${NC}"
else
echo -e " ''${YELLOW}No health checks configured''${NC}"
fi
echo -e "\n''${CYAN}Run ''${BOLD}homelab-proxy-status''${NC}''${CYAN} and ''${BOLD}homelab-backup-status''${NC}''${CYAN} for more details.''${NC}"
'';
# Helper script for backup status
homelab-backup-status = pkgs.writeShellScriptBin "homelab-backup-status" ''
#! /usr/bin/env bash
# Colors
RED="\e[31m"
GREEN="\e[32m"
YELLOW="\e[33m"
BLUE='\e[0;34m'
CYAN='\e[0;36m'
WHITE='\e[1;37m'
NC='\e[0m'
BOLD='\e[1m'
echo -e "''${BOLD}''${BLUE}💾 Backup Status''${NC}"
echo -e "''${BLUE}===============''${NC}"
# Check backup timers
echo -e "\n''${BOLD}''${CYAN}Backup Timers:''${NC}"
backup_timers=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)")
if [[ -n "$backup_timers" ]]; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
next=$(echo "$line" | awk '{print $1, $2}')
left=$(echo "$line" | awk '{print $3}')
timer=$(echo "$line" | awk '{print $5}')
service=$(echo "$line" | awk '{print $6}')
# Color code based on time left
if [[ "$left" == "n/a" ]]; then
color="''${RED}"
status=""
elif echo "$left" | grep -qE "(sec|min|[0-9]h)"; then
color="''${YELLOW}"
status=""
else
color="''${GREEN}"
status=""
fi
printf " %b%s%b %-25s Next: %s (%s)\n" "$color" "$status" "$NC" "$(basename "$timer" .timer)" "$next" "$left"
fi
done <<< "$backup_timers"
else
echo -e " ''${YELLOW}No backup timers found''${NC}"
fi
# Check recent backup activity (last 3 days, summarized)
echo -e "\n''${BOLD}''${CYAN}Recent Activity (3 days):''${NC}"
# Count successful vs failed backups
success_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --no-pager -q 2>/dev/null | grep -iE "(completed|success|finished)" | wc -l)
error_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | wc -l)
if [[ $success_count -gt 0 ]]; then
echo -e " ''${GREEN} $success_count successful backups''${NC}"
fi
if [[ $error_count -gt 0 ]]; then
echo -e " ''${RED} $error_count failed backups''${NC}"
echo -e "\n''${BOLD}''${RED}Recent Failures:''${NC}"
${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager --lines=3 2>/dev/null | while read -r line; do
# Extract just the important parts
timestamp=$(echo "$line" | awk '{print $1, $2, $3}')
service=$(echo "$line" | grep -oE "(restic-backups-[^[]+|borgbackup-job-[^[]+|[^[]*backup[^[]*)" | head -1)
message=$(echo "$line" | sed -E 's/.*\]: //' | cut -c1-60)
echo -e " ''${YELLOW}$timestamp''${NC} ''${BOLD}$service''${NC}: $message..."
done
elif [[ $success_count -eq 0 ]]; then
echo -e " ''${YELLOW} No backup activity in last 3 days''${NC}"
else
echo -e " ''${GREEN} All backups completed successfully''${NC}"
fi
# Show backup summary from global config if available
CONFIG_FILE="/etc/homelab/config.json"
if [[ -f "$CONFIG_FILE" ]]; then
total_jobs=$(${pkgs.jq}/bin/jq -r '.backups.summary.totalJobs // 0' "$CONFIG_FILE" 2>/dev/null)
backends=$(${pkgs.jq}/bin/jq -r '.backups.summary.backendsInUse[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ')
if [[ $total_jobs -gt 0 ]]; then
echo -e "\n''${BOLD}''${CYAN}Configuration:''${NC}"
echo -e " ''${BOLD}Total jobs:''${NC} $total_jobs"
if [[ -n "$backends" ]]; then
echo -e " ''${BOLD}Backends:''${NC} $backends"
fi
fi
fi
'';
# Helper script for proxy status
homelab-proxy-status = pkgs.writeShellScriptBin "homelab-proxy-status" ''
#! /usr/bin/env bash
# Colors
RED="\e[31m"
GREEN="\e[32m"
YELLOW="\e[33m"
BLUE='\e[0;34m'
CYAN='\e[0;36m'
WHITE='\e[1;37m'
NC='\e[0m'
BOLD='\e[1m'
CONFIG_FILE="/etc/homelab/config.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "''${RED} Global homelab configuration not found''${NC}"
exit 1
fi
echo -e "''${BOLD}''${BLUE}🔗 Reverse Proxy Status''${NC}"
echo -e "''${BLUE}======================''${NC}"
proxy_count=$(${pkgs.jq}/bin/jq '.reverseProxy.entries | length' "$CONFIG_FILE" 2>/dev/null || echo 0)
if [[ $proxy_count -gt 0 ]]; then
${pkgs.jq}/bin/jq -r '.reverseProxy.entries[]? |
" ''${GREEN}''${NC} ''${BOLD}\(.subdomain)''${NC}: \(.externalHost) \(.internalHost)\(if .enableAuth then " ''${YELLOW}🔐''${NC}" else "" end)\(if .enableSSL then " ''${GREEN}🔒''${NC}" else "" end)"' "$CONFIG_FILE" 2>/dev/null
echo -e "\n''${BOLD}Legend:''${NC} ''${YELLOW}🔐''${NC} Auth enabled, ''${GREEN}🔒''${NC} SSL enabled"
echo -e "''${BOLD}Total: ''${proxy_count} proxy entries''${NC}"
else
echo -e " ''${YELLOW}No proxy entries configured''${NC}"
fi
'';
in {
options.homelab.motd = {
enable = mkEnableOption "Simple homelab MOTD";
clearScreen = mkOption {
type = types.bool;
default = true;
description = "Clear screen before showing MOTD";
};
showServices = mkOption {
type = types.bool;
default = true;
description = "Show local homelab services status";
};
services = mkOption {
type = types.attrsOf (types.submodule {
options = {
systemdService = mkOption {
type = types.str;
description = "Name of the systemd service to monitor";
};
description = mkOption {
type = types.str;
default = "";
description = "Human-readable description of the service";
};
};
});
default = {};
description = "Local homelab services to show in MOTD";
example = literalExpression ''
{
"nginx" = {
systemdService = "nginx";
description = "Web server";
};
"grafana" = {
systemdService = "grafana";
description = "Monitoring dashboard";
};
}
'';
};
};
config = mkIf cfg.enable {
# Create helper commands
environment.systemPackages = with pkgs; [
jq
homelab-motd
homelab-monitor-status
homelab-backup-status
homelab-proxy-status
];
# Set up MOTD to run on login
programs.bash.interactiveShellInit = ''
# Run homelab MOTD on interactive login (only once per session)
if [[ $- == *i* ]] && [[ -z "$MOTD_SHOWN" ]] && [[ -n "$SSH_CONNECTION" || "$TERM" == "linux" ]]; then
export MOTD_SHOWN=1
${homelab-motd}/bin/homelab-motd
fi
'';
# Disable default MOTD
users.motd = mkDefault "";
security.pam.services.login.showMotd = mkDefault false;
};
}

View file

@ -1,53 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.reverseProxy;
homelabCfg = config.homelab;
reverseProxyEntryType = types.submodule {
options = {
subdomain = mkOption {
type = types.str;
description = "Subdomain for the service";
};
host = mkOption {
type = types.str;
description = "Host to proxy to";
default = "${homelabCfg.hostname}.${homelabCfg.domain}";
};
port = mkOption {
type = types.port;
description = "Port to proxy to";
};
path = mkOption {
type = types.str;
default = "/";
description = "Path prefix for the service";
};
enableAuth = mkOption {
type = types.bool;
default = false;
description = "Enable authentication for this service";
};
enableSSL = mkOption {
type = types.bool;
default = true;
description = "Enable SSL for this service";
};
};
};
in {
options.homelab.reverseProxy = {
entries = mkOption {
type = types.listOf reverseProxyEntryType;
default = [];
description = "Reverse proxy entries for this system";
};
};
config = {
};
}

View file

@ -1,7 +0,0 @@
{
imports = [
./minio.nix
./monitoring/gatus.nix
./monitoring/prometheus.nix
];
}

View file

@ -1,161 +0,0 @@
# Example showing how to create a service using the standard interface
{
config,
lib,
pkgs,
...
}:
with lib; let
serviceInterface = import ../lib/service-interface.nix {inherit lib;};
cfg = config.homelab.services.grafana;
homelabCfg = config.homelab;
# Service-specific options beyond the standard interface
grafanaServiceOptions = {
domain = mkOption {
type = types.str;
default = "grafana.${homelabCfg.externalDomain}";
description = "Domain for Grafana";
};
rootUrl = mkOption {
type = types.str;
default = "https://grafana.${homelabCfg.externalDomain}";
description = "Root URL for Grafana";
};
dataDir = serviceInterface.commonOptions.dataDir "grafana";
admin = {
user = mkOption {
type = types.str;
default = "admin";
description = "Admin username";
};
password = mkOption {
type = types.str;
default = "admin";
description = "Admin password";
};
};
datasources = {
prometheus = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus datasource";
};
url = mkOption {
type = types.str;
default = "http://localhost:9090";
description = "Prometheus URL";
};
};
};
plugins = mkOption {
type = types.listOf types.package;
default = [];
description = "Grafana plugins to install";
};
};
in {
options.homelab.services.grafana = serviceInterface.mkServiceInterface {
serviceName = "grafana";
defaultPort = 3000;
defaultSubdomain = "grafana";
monitoringPath = "/metrics";
healthCheckPath = "/api/health";
healthCheckConditions = [
"[STATUS] == 200"
"[BODY].database == ok"
"[RESPONSE_TIME] < 2000"
];
serviceOptions = grafanaServiceOptions;
};
config = serviceInterface.mkServiceConfig {
inherit config cfg homelabCfg;
serviceName = "grafana";
extraMonitoringLabels = {
component = "dashboard";
};
customHealthChecks = [
{
name = "grafana-login";
port = cfg.port;
path = "/login";
interval = "60s";
conditions = [
"[STATUS] == 200"
"[RESPONSE_TIME] < 3000"
];
group = "monitoring";
labels = {
service = "grafana";
component = "login";
};
}
];
serviceConfig = {
services.grafana = {
enable = true;
dataDir = cfg.dataDir;
declarativePlugins = cfg.plugins;
settings = {
server = {
http_port = cfg.port;
http_addr = "0.0.0.0";
domain = cfg.domain;
root_url = cfg.rootUrl;
};
security = {
admin_user = cfg.admin.user;
admin_password = cfg.admin.password;
};
};
provision = {
enable = true;
datasources.settings.datasources = mkIf cfg.datasources.prometheus.enable [
{
name = "Prometheus";
type = "prometheus";
url = cfg.datasources.prometheus.url;
isDefault = true;
}
];
};
};
};
};
}
# Usage example in your configuration:
/*
{
homelab.services.grafana = {
enable = true;
# Standard interface options:
port = 3000; # Optional: defaults to 3000
openFirewall = true; # Optional: defaults to true
proxy.subdomain = "grafana"; # Optional: defaults to "grafana"
proxy.enableAuth = false; # Optional: defaults to false
monitoring.enable = true; # Optional: defaults to true
# Service-specific options:
admin.password = "secure-password";
datasources.prometheus.url = "http://prometheus.lab:9090";
plugins = with pkgs.grafanaPlugins; [ grafana-piechart-panel ];
};
}
*/

View file

@ -1,125 +0,0 @@
# modules/services/jellyfin.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.jellyfin;
in {
options.services.jellyfin = {
enable = mkEnableOption "Jellyfin media server";
port = mkOption {
type = types.port;
default = 8096;
description = "Port for Jellyfin web interface";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/jellyfin";
description = "Directory to store Jellyfin data";
};
mediaDir = mkOption {
type = types.str;
default = "/media";
description = "Directory containing media files";
};
enableMetrics = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus metrics";
};
exposeWeb = mkOption {
type = types.bool;
default = true;
description = "Expose web interface through reverse proxy";
};
};
config = mkIf cfg.enable {
# Enable the service
services.jellyfin = {
enable = true;
dataDir = cfg.dataDir;
};
# Configure global settings
homelab.global = {
# Add backup job for Jellyfin data
backups.jobs = [
{
name = "jellyfin-config";
backend = "restic";
paths = ["${cfg.dataDir}/config" "${cfg.dataDir}/data"];
schedule = "0 2 * * *"; # Daily at 2 AM
excludePatterns = [
"*/cache/*"
"*/transcodes/*"
"*/logs/*"
];
preHook = ''
# Stop jellyfin for consistent backup
systemctl stop jellyfin
'';
postHook = ''
# Restart jellyfin after backup
systemctl start jellyfin
'';
}
{
name = "jellyfin-media";
backend = "restic";
paths = [cfg.mediaDir];
schedule = "0 3 * * 0"; # Weekly on Sunday at 3 AM
excludePatterns = [
"*.tmp"
"*/.@__thumb/*" # Synology thumbnails
];
}
];
# Add reverse proxy entry if enabled
reverseProxy.entries = mkIf cfg.exposeWeb [
{
subdomain = "jellyfin";
port = cfg.port;
enableAuth = false; # Jellyfin has its own auth
websockets = true;
customHeaders = {
"X-Forwarded-Proto" = "$scheme";
"X-Forwarded-Host" = "$host";
};
}
];
# Add monitoring endpoint if metrics enabled
monitoring.endpoints = mkIf cfg.enableMetrics [
{
name = "jellyfin";
port = cfg.port;
path = "/metrics"; # Assuming you have a metrics plugin
jobName = "jellyfin";
scrapeInterval = "60s";
labels = {
service = "jellyfin";
type = "media-server";
};
}
];
};
# Open firewall
networking.firewall.allowedTCPPorts = [cfg.port];
# Create media directory
systemd.tmpfiles.rules = [
"d ${cfg.mediaDir} 0755 jellyfin jellyfin -"
];
};
}

View file

@ -1,66 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
service = "minio";
cfg = config.homelab.services.${service};
homelabCfg = config.homelab;
in {
options.homelab.services.${service} = {
enable = mkEnableOption "Minio Object Storage";
port = mkOption {
default = 9000;
type = types.port;
description = "Port of the server.";
};
webPort = mkOption {
default = 9001;
type = types.port;
description = "Port of the web UI (console).";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = ''
Whether to open the ports specified in `port` and `webPort` in the firewall.
'';
};
};
config = mkIf cfg.enable {
sops.secrets."ente/minio/root_user" = {};
sops.secrets."ente/minio/root_password" = {};
sops.templates."minio-root-credentials".content = ''
MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"}
MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"}
'';
services.minio = {
enable = true;
rootCredentialsFile = config.sops.templates."minio-root-credentials".path;
};
networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [cfg.port cfg.webPort];
homelab.reverseProxy.entries = [
{
subdomain = "${service}-api";
port = cfg.port;
}
{
subdomain = "${service}";
port = cfg.webPort;
}
];
# https://min.io/docs/minio/linux/operations/monitoring/collect-minio-metrics-using-prometheus.html
# metrics and monitoring...
};
}

View file

@ -1,237 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.alertmanager;
homelabCfg = config.homelab;
# Default alertmanager configuration
defaultConfig = {
global = {
smtp_smarthost = cfg.smtp.host;
smtp_from = cfg.smtp.from;
smtp_auth_username = cfg.smtp.username;
smtp_auth_password = cfg.smtp.password;
};
# Inhibit rules to prevent spam
inhibit_rules = [
{
source_match = {
severity = "critical";
};
target_match = {
severity = "warning";
};
equal = ["alertname" "dev" "instance"];
}
];
route = {
group_by = ["alertname"];
group_wait = "10s";
group_interval = "10s";
repeat_interval = "1h";
receiver = "web.hook";
routes = cfg.routes;
};
receivers =
[
{
name = "web.hook";
webhook_configs = [
{
url = "http://127.0.0.1:5001/";
}
];
}
]
++ cfg.receivers;
};
# Merge with user config
alertmanagerConfig = recursiveUpdate defaultConfig cfg.extraConfig;
in {
options.homelab.services.alertmanager = {
enable = mkEnableOption "Alertmanager for handling alerts";
port = mkOption {
type = types.port;
default = 9093;
description = "Port for Alertmanager web interface";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Whether to open firewall ports";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/alertmanager";
description = "Directory to store Alertmanager data";
};
smtp = {
host = mkOption {
type = types.str;
default = "localhost:587";
description = "SMTP server host:port";
};
from = mkOption {
type = types.str;
default = "alertmanager@${homelabCfg.externalDomain}";
description = "From email address";
};
username = mkOption {
type = types.str;
default = "";
description = "SMTP username";
};
password = mkOption {
type = types.str;
default = "";
description = "SMTP password";
};
};
routes = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional routing rules";
example = literalExpression ''
[
{
match = {
service = "gatus";
};
receiver = "discord-webhook";
}
{
match = {
severity = "critical";
};
receiver = "email-alerts";
}
]
'';
};
receivers = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Alert receivers configuration";
example = literalExpression ''
[
{
name = "email-alerts";
email_configs = [{
to = "admin@example.com";
subject = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}";
body = "{{ range .Alerts }}{{ .Annotations.description }}{{ end }}";
}];
}
{
name = "discord-webhook";
webhook_configs = [{
url = "https://discord.com/api/webhooks/...";
title = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}";
}];
}
]
'';
};
extraConfig = mkOption {
type = types.attrs;
default = {};
description = "Additional Alertmanager configuration";
};
webExternalUrl = mkOption {
type = types.str;
default = "https://alertmanager.${homelabCfg.externalDomain}";
description = "External URL for Alertmanager web interface";
};
};
config = mkIf cfg.enable {
services.prometheus.alertmanager = {
enable = true;
port = cfg.port;
listenAddress = "0.0.0.0";
webExternalUrl = cfg.webExternalUrl;
dataDir = cfg.dataDir;
# Write configuration to file
configuration = alertmanagerConfig;
};
# Open firewall if requested
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port];
# Add to monitoring endpoints
homelab.monitoring.metrics = [
{
name = "alertmanager";
port = cfg.port;
path = "/metrics";
jobName = "alertmanager";
labels = {
service = "alertmanager";
component = "monitoring";
};
}
];
# Add health checks
homelab.monitoring.healthChecks = [
{
name = "alertmanager-web-interface";
port = cfg.port;
path = "/-/healthy";
interval = "30s";
conditions = [
"[STATUS] == 200"
"[RESPONSE_TIME] < 1000"
];
group = "monitoring";
labels = {
service = "alertmanager";
component = "web-interface";
};
}
{
name = "alertmanager-ready";
port = cfg.port;
path = "/-/ready";
interval = "30s";
conditions = [
"[STATUS] == 200"
];
group = "monitoring";
labels = {
service = "alertmanager";
component = "readiness";
};
}
];
# Add reverse proxy entry
homelab.reverseProxy.entries = [
{
subdomain = "alertmanager";
host = homelabCfg.hostname;
port = cfg.port;
}
];
};
}

View file

@ -1,326 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.alertmanager;
homelabCfg = config.homelab;
# Build alertmanager configuration
alertmanagerConfig = {
route = {
receiver = cfg.defaultReceiver;
group_by = cfg.groupBy;
group_wait = cfg.groupWait;
group_interval = cfg.groupInterval;
repeat_interval = cfg.repeatInterval;
routes = cfg.routes;
};
receivers =
[
{name = cfg.defaultReceiver;}
]
++ cfg.receivers;
inhibit_rules = cfg.inhibitRules;
templates = cfg.templates;
};
in {
options.homelab.services.alertmanager = {
enable = mkEnableOption "Alertmanager for handling alerts";
port = mkOption {
type = types.port;
default = 9093;
description = "Port for Alertmanager web interface";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Whether to open firewall ports";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/alertmanager";
description = "Directory to store Alertmanager data";
};
webExternalUrl = mkOption {
type = types.str;
default = "http://${homelabCfg.hostname}.${homelabCfg.domain}:${toString cfg.port}";
description = "External URL for Alertmanager web interface";
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
description = "Environment file for secrets (e.g., Telegram bot token)";
example = "/run/secrets/alertmanager-env";
};
# Routing configuration
defaultReceiver = mkOption {
type = types.str;
default = "null";
description = "Default receiver for unmatched alerts";
};
groupBy = mkOption {
type = types.listOf types.str;
default = ["alertname"];
description = "Labels to group alerts by";
};
groupWait = mkOption {
type = types.str;
default = "10s";
description = "Time to wait before sending initial notification";
};
groupInterval = mkOption {
type = types.str;
default = "5m";
description = "Time to wait before sending updates for a group";
};
repeatInterval = mkOption {
type = types.str;
default = "4h";
description = "Time to wait before re-sending an alert";
};
routes = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Alert routing rules";
example = literalExpression ''
[
{
receiver = "telegram";
matchers = ["severity =~ \"warning|critical\""];
group_wait = "10s";
continue = true;
}
]
'';
};
receivers = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Alert receivers configuration";
example = literalExpression ''
[
{
name = "telegram";
telegram_configs = [{
api_url = "https://api.telegram.org";
bot_token = "$TELEGRAM_BOT_TOKEN";
chat_id = -1002642560007;
message_thread_id = 4;
parse_mode = "HTML";
send_resolved = true;
message = "{{ template \"telegram.message\" . }}";
}];
}
]
'';
};
inhibitRules = mkOption {
type = types.listOf types.attrs;
default = [
{
source_match = {severity = "critical";};
target_match = {severity = "warning";};
equal = ["alertname" "instance"];
}
];
description = "Rules for inhibiting alerts";
};
templates = mkOption {
type = types.listOf types.path;
default = [];
description = "Template files for alert formatting";
example = literalExpression ''
[
(pkgs.writeText "telegram.tmpl" '''
{{- define "telegram.message" -}}
{{- if gt (len .Alerts.Firing) 0 -}}
🔥 <b>FIRING</b> 🔥
{{- range .Alerts.Firing }}
<b>{{ .Annotations.summary }}</b>
{{ .Annotations.description }}
{{- end }}
{{- end }}
{{- if gt (len .Alerts.Resolved) 0 -}}
<b>RESOLVED</b>
{{- range .Alerts.Resolved }}
<b>{{ .Annotations.summary }}</b>
{{- end }}
{{- end }}
{{- end -}}
''')
]
'';
};
# Convenience options for common receivers
telegram = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Telegram notifications";
};
botToken = mkOption {
type = types.str;
default = "$TELEGRAM_BOT_TOKEN";
description = "Telegram bot token (use environment variable)";
};
chatId = mkOption {
type = types.int;
description = "Telegram chat ID";
example = -1002642560007;
};
messageThreadId = mkOption {
type = types.nullOr types.int;
default = null;
description = "Telegram message thread ID (for forum groups)";
};
template = mkOption {
type = types.str;
default = "telegram.message";
description = "Template to use for Telegram messages";
};
};
discord = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Discord notifications";
};
webhookUrl = mkOption {
type = types.str;
default = "$DISCORD_WEBHOOK_URL";
description = "Discord webhook URL (use environment variable)";
};
username = mkOption {
type = types.str;
default = "Alertmanager";
description = "Discord bot username";
};
};
};
config = mkIf cfg.enable {
services.prometheus.alertmanager = {
enable = true;
port = cfg.port;
listenAddress = "0.0.0.0";
openFirewall = cfg.openFirewall;
webExternalUrl = cfg.webExternalUrl;
dataDir = cfg.dataDir;
environmentFile = cfg.environmentFile;
configuration = alertmanagerConfig;
};
# Auto-configure Telegram and Discord receiver if enabled
homelab.services.alertmanager.receivers = [
(optional cfg.telegram.enable {
name = "telegram";
telegram_configs = [
{
api_url = "https://api.telegram.org";
bot_token = cfg.telegram.botToken;
chat_id = cfg.telegram.chatId;
message_thread_id = cfg.telegram.messageThreadId;
parse_mode = "HTML";
send_resolved = true;
message = "{{ template \"${cfg.telegram.template}\" . }}";
}
];
})
(optional cfg.discord.enable {
name = "discord";
discord_configs = [
{
webhook_url = cfg.discord.webhookUrl;
username = cfg.discord.username;
send_resolved = true;
}
];
})
];
# Auto-configure routes for convenience receivers
homelab.services.alertmanager.routes =
(optional cfg.telegram.enable {
receiver = "telegram";
matchers = ["severity =~ \"warning|critical\""];
group_wait = "10s";
continue = true;
})
++ (optional cfg.discord.enable {
receiver = "discord";
matchers = ["severity =~ \"warning|critical\""];
group_wait = "10s";
continue = true;
});
# Add to monitoring endpoints
homelab.monitoring.metrics = [
{
name = "alertmanager";
port = cfg.port;
path = "/metrics";
jobName = "alertmanager";
labels = {
service = "alertmanager";
component = "monitoring";
};
}
];
# Add health checks
homelab.monitoring.healthChecks = [
{
name = "alertmanager-web-interface";
port = cfg.port;
path = "/-/healthy";
interval = "30s";
conditions = [
"[STATUS] == 200"
"[RESPONSE_TIME] < 1000"
];
group = "monitoring";
labels = {
service = "alertmanager";
component = "web-interface";
};
}
];
# Add reverse proxy entry
homelab.reverseProxy.entries = [
{
subdomain = "alertmanager";
host = homelabCfg.hostname;
port = cfg.port;
}
];
};
}

View file

@ -1,148 +0,0 @@
# Example configuration showing how to use the monitoring stack
# with the homelab.global approach for dynamic discovery
{
config,
pkgs,
...
}: {
# Import the monitoring services
imports = [
./services/prometheus.nix
./services/alertmanager.nix
./services/grafana.nix
./services/monitoring-stack.nix
];
# Enable the full monitoring stack
homelab.services.monitoring-stack.enable = true;
# Configure Prometheus - it will automatically discover scrape targets
# from homelab.global.monitoring.allMetrics
homelab.services.prometheus = {
enable = true;
port = 9090;
retention = "7d";
# Optional: Add custom scrape configs if needed
extraScrapeConfigs = [
# Any additional manual scrape configs can go here
# but most should be discovered via homelab.monitoring.metrics
];
# Optional: Add custom alerting rules
extraAlertingRules = [
# Custom alert groups can be added here
];
# Optional: Add external rule files
ruleFiles = [
# ./path/to/custom-rules.yml
];
};
# Configure Alertmanager with Telegram support (like your original)
homelab.services.alertmanager = {
enable = true;
port = 9093;
# Use sops secrets for environment variables
environmentFile = config.sops.secrets."alertmanager/env".path;
# Enable Telegram notifications
telegram = {
enable = true;
botToken = "$TELEGRAM_BOT_TOKEN"; # From environment file
chatId = -1002642560007;
messageThreadId = 4;
};
# Custom templates (similar to your setup)
templates = [
(pkgs.writeText "telegram.tmpl" ''
{{- define "telegram.message" -}}
{{- if gt (len .Alerts.Firing) 0 -}}
🔥 <b>FIRING</b> 🔥
{{- range .Alerts.Firing }}
<b>{{ .Annotations.summary }}</b>
{{ .Annotations.description }}
{{- end }}
{{- end }}
{{- if gt (len .Alerts.Resolved) 0 -}}
<b>RESOLVED</b>
{{- range .Alerts.Resolved }}
<b>{{ .Annotations.summary }}</b>
{{- end }}
{{- end }}
{{- end -}}
'')
];
};
# Configure Grafana with data sources (similar to your setup)
homelab.services.grafana = {
enable = true;
port = 3000;
domain = "grafana.procopius.dk";
rootUrl = "https://grafana.procopius.dk";
# Add grafana user to influxdb2 group for accessing secrets
extraGroups = ["influxdb2"];
# Enable data sources
datasources = {
prometheus.enable = true;
loki.enable = true;
influxdb = {
enable = true;
database = "proxmox";
tokenPath = config.sops.secrets."influxdb/token".path;
};
};
# Provision dashboards (similar to your environment.etc approach)
dashboards.files = [
{
name = "traefik";
source = ./dashboards/traefik.json;
}
{
name = "traefik-access";
source = ./dashboards/traefik-access.json;
}
{
name = "grafana-traefik";
source = ./dashboards/grafana-traefik.json;
}
{
name = "node-exporter";
source = ./dashboards/node-exporter.json;
}
{
name = "promtail";
source = ./dashboards/promtail.json;
}
{
name = "gitea";
source = ./dashboards/gitea.json;
}
{
name = "postgres";
source = ./dashboards/postgres.json;
}
{
name = "gatus";
source = ./dashboards/gatus.json;
}
];
};
# Configure sops secrets (keep your existing setup)
sops.secrets."alertmanager/env" = {
sopsFile = ../../secrets/secrets.yaml;
mode = "0440";
};
# All services automatically register with homelab.monitoring.metrics
# and homelab.monitoring.healthChecks for Gatus monitoring
# All services automatically get reverse proxy entries
}

View file

@ -1,244 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.services.gatus;
homelabCfg = config.homelab;
# Convert our health check format to Gatus format
formatHealthCheck = check: let
# Build the URL
url = check._url;
# Convert conditions to Gatus format (they should already be compatible)
conditions = check.conditions or ["[STATUS] == 200"];
# Convert alerts to Gatus format
alerts = map (alert: {
inherit (alert) type enabled;
failure-threshold = alert.failure-threshold or 3;
success-threshold = alert.success-threshold or 2;
description = "Health check alert for ${check.name}";
}) (check.alerts or []);
in {
name = check.name;
group = check.group or "default";
url = url;
interval = check.interval or "30s";
# Add method and headers for HTTP/HTTPS checks
method =
if (check.protocol == "http" || check.protocol == "https")
then check.method or "GET"
else null;
conditions = conditions;
# Add timeout
client = {
timeout = check.timeout or "10s";
};
# Add alerts if configured
alerts =
if alerts != []
then alerts
else [];
# Add labels for UI organization
ui = {
hide-hostname = false;
hide-url = false;
description = "Health check for ${check.name} on ${check._nodeName}";
};
};
# Generate Gatus configuration
gatusConfig = {
# Global Gatus settings
alerting = mkIf (cfg.alerting != {}) cfg.alerting;
web = {
address = "0.0.0.0";
port = cfg.port;
};
# TODO: Introduce monitor option to toggle monitoring
metrics = true;
ui = {
title = cfg.ui.title;
header = cfg.ui.header;
link = cfg.ui.link;
buttons = cfg.ui.buttons;
};
storage = mkIf (cfg.storage != {}) cfg.storage;
# Convert all enabled health checks to Gatus endpoints
endpoints = let
# Get all health checks from global config
allHealthChecks = homelabCfg.global.monitoring.enabledHealthChecks or [];
# Group by group name for better organization
# groupedChecks = homelabCfg.global.monitoring.healthChecksByGroup or {};
# Convert to Gatus format
gatusEndpoints = map formatHealthCheck allHealthChecks;
in
gatusEndpoints;
};
in {
options.homelab.services.gatus = {
enable = mkEnableOption "Gatus uptime monitoring service";
port = mkOption {
type = types.port;
default = 8080;
description = "Port for Gatus web interface";
};
openFirewall = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to automatically open the specified ports in the firewall.
'';
};
ui = {
title = mkOption {
type = types.str;
default = "Homelab Status";
description = "Title for the Gatus web interface";
};
header = mkOption {
type = types.str;
default = "Homelab Services Status";
description = "Header text for the Gatus interface";
};
link = mkOption {
type = types.str;
default = "https://gatus.${homelabCfg.externalDomain}";
description = "Link in the Gatus header";
};
buttons = mkOption {
type = types.listOf (types.submodule {
options = {
name = mkOption {type = types.str;};
link = mkOption {type = types.str;};
};
});
default = [
{
name = "Grafana";
link = "https://grafana.${homelabCfg.externalDomain}";
}
{
name = "Prometheus";
link = "https://prometheus.${homelabCfg.externalDomain}";
}
];
description = "Navigation buttons in the Gatus interface";
};
};
alerting = mkOption {
type = types.attrs;
default = {};
description = "Gatus alerting configuration";
example = literalExpression ''
{
discord = {
webhook-url = "https://discord.com/api/webhooks/...";
default-alert = {
enabled = true;
description = "Health check failed";
failure-threshold = 3;
success-threshold = 2;
};
};
}
'';
};
storage = mkOption {
type = types.attrs;
default = {
type = "memory";
};
description = "Gatus storage configuration";
example = literalExpression ''
{
type = "postgres";
path = "postgres://user:password@localhost/gatus?sslmode=disable";
}
'';
};
extraConfig = mkOption {
type = types.attrs;
default = {};
description = "Additional Gatus configuration options";
};
};
config = mkIf cfg.enable {
services.gatus = {
enable = true;
openFirewall = cfg.openFirewall;
settings = gatusConfig;
};
# Add to monitoring endpoints
homelab.monitoring.metrics = [
{
name = "gatus";
port = cfg.port;
path = "/metrics";
jobName = "gatus";
labels = {
service = "gatus";
component = "monitoring";
};
}
];
# Add health check for Gatus itself
homelab.monitoring.healthChecks = [
{
name = "gatus-web-interface";
port = cfg.port;
path = "/health";
interval = "30s";
conditions = [
"[STATUS] == 200"
"[BODY].status == UP"
"[RESPONSE_TIME] < 1000"
];
group = "monitoring";
labels = {
service = "gatus";
component = "web-interface";
};
}
];
# Add reverse proxy entry if needed
homelab.reverseProxy.entries = [
{
subdomain = "status";
host = homelabCfg.hostname;
port = cfg.port;
# path = "/";
# enableAuth = false; # Status page should be publicly accessible
# enableSSL = true;
}
];
};
}

View file

@ -1,416 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.grafana;
homelabCfg = config.homelab;
# Default dashboards for homelab monitoring
defaultDashboards = {
"node-exporter" = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/1860/revisions/37/download";
sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this
};
"prometheus-stats" = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/2/revisions/2/download";
sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this
};
};
# Grafana provisioning configuration
provisioningConfig = {
# Data sources
datasources =
[
{
name = "Prometheus";
type = "prometheus";
access = "proxy";
url = cfg.datasources.prometheus.url;
isDefault = true;
editable = false;
jsonData = {
timeInterval = "5s";
queryTimeout = "60s";
httpMethod = "POST";
};
}
]
++ cfg.datasources.extra;
# Dashboard providers
dashboards = [
{
name = "homelab";
type = "file";
disableDeletion = false;
updateIntervalSeconds = 10;
allowUiUpdates = true;
options = {
path = "/var/lib/grafana/dashboards";
};
}
];
# Notification channels
notifiers = cfg.notifications;
};
in {
options.homelab.services.grafana = {
enable = mkEnableOption "Grafana dashboard service";
port = mkOption {
type = types.port;
default = 3000;
description = "Port for Grafana web interface";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Whether to open firewall ports";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/grafana";
description = "Directory to store Grafana data";
};
domain = mkOption {
type = types.str;
default = "grafana.${homelabCfg.externalDomain}";
description = "Domain for Grafana";
};
rootUrl = mkOption {
type = types.str;
default = "https://grafana.${homelabCfg.externalDomain}";
description = "Root URL for Grafana";
};
admin = {
user = mkOption {
type = types.str;
default = "admin";
description = "Admin username";
};
password = mkOption {
type = types.str;
default = "admin";
description = "Admin password (change this!)";
};
email = mkOption {
type = types.str;
default = "admin@${homelabCfg.externalDomain}";
description = "Admin email";
};
};
datasources = {
prometheus = {
url = mkOption {
type = types.str;
default = "http://localhost:9090";
description = "Prometheus URL";
};
};
extra = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional data sources";
example = literalExpression ''
[
{
name = "Loki";
type = "loki";
url = "http://localhost:3100";
}
]
'';
};
};
notifications = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Notification channels configuration";
example = literalExpression ''
[
{
name = "discord-webhook";
type = "discord";
settings = {
url = "https://discord.com/api/webhooks/...";
username = "Grafana";
};
}
]
'';
};
plugins = mkOption {
type = types.listOf types.str;
default = [
"grafana-piechart-panel"
"grafana-worldmap-panel"
"grafana-clock-panel"
"grafana-simple-json-datasource"
];
description = "Grafana plugins to install";
};
smtp = {
enabled = mkOption {
type = types.bool;
default = false;
description = "Enable SMTP for email notifications";
};
host = mkOption {
type = types.str;
default = "localhost:587";
description = "SMTP server host:port";
};
user = mkOption {
type = types.str;
default = "";
description = "SMTP username";
};
password = mkOption {
type = types.str;
default = "";
description = "SMTP password";
};
fromAddress = mkOption {
type = types.str;
default = "grafana@${homelabCfg.externalDomain}";
description = "From email address";
};
fromName = mkOption {
type = types.str;
default = "Homelab Grafana";
description = "From name";
};
};
security = {
allowEmbedding = mkOption {
type = types.bool;
default = false;
description = "Allow embedding Grafana in iframes";
};
cookieSecure = mkOption {
type = types.bool;
default = true;
description = "Set secure flag on cookies";
};
secretKey = mkOption {
type = types.str;
default = "change-this-secret-key";
description = "Secret key for signing (change this!)";
};
};
auth = {
anonymousEnabled = mkOption {
type = types.bool;
default = false;
description = "Enable anonymous access";
};
disableLoginForm = mkOption {
type = types.bool;
default = false;
description = "Disable login form";
};
};
extraConfig = mkOption {
type = types.attrs;
default = {};
description = "Additional Grafana configuration";
};
};
config = mkIf cfg.enable {
services.grafana = {
enable = true;
settings =
recursiveUpdate {
server = {
http_addr = "0.0.0.0";
http_port = cfg.port;
domain = cfg.domain;
root_url = cfg.rootUrl;
serve_from_sub_path = false;
};
database = {
type = "sqlite3";
path = "${cfg.dataDir}/grafana.db";
};
security = {
admin_user = cfg.admin.user;
admin_password = cfg.admin.password;
admin_email = cfg.admin.email;
allow_embedding = cfg.security.allowEmbedding;
cookie_secure = cfg.security.cookieSecure;
secret_key = cfg.security.secretKey;
};
users = {
allow_sign_up = false;
auto_assign_org = true;
auto_assign_org_role = "Viewer";
};
auth.anonymous = {
enabled = cfg.auth.anonymousEnabled;
org_name = "Homelab";
org_role = "Viewer";
};
auth.basic = {
enabled = !cfg.auth.disableLoginForm;
};
smtp = mkIf cfg.smtp.enabled {
enabled = true;
host = cfg.smtp.host;
user = cfg.smtp.user;
password = cfg.smtp.password;
from_address = cfg.smtp.fromAddress;
from_name = cfg.smtp.fromName;
};
analytics = {
reporting_enabled = false;
check_for_updates = false;
};
log = {
mode = "console";
level = "info";
};
paths = {
data = cfg.dataDir;
logs = "${cfg.dataDir}/log";
plugins = "${cfg.dataDir}/plugins";
provisioning = "/etc/grafana/provisioning";
};
}
cfg.extraConfig;
dataDir = cfg.dataDir;
};
# Install plugins
systemd.services.grafana.preStart = mkIf (cfg.plugins != []) (
concatStringsSep "\n" (map (
plugin: "${pkgs.grafana}/bin/grafana-cli --pluginsDir ${cfg.dataDir}/plugins plugins install ${plugin} || true"
)
cfg.plugins)
);
# Provisioning configuration
environment.etc =
{
"grafana/provisioning/datasources/datasources.yaml".text = builtins.toJSON {
apiVersion = 1;
datasources = provisioningConfig.datasources;
};
"grafana/provisioning/dashboards/dashboards.yaml".text = builtins.toJSON {
apiVersion = 1;
providers = provisioningConfig.dashboards;
};
}
// (mkIf (cfg.notifications != []) {
"grafana/provisioning/notifiers/notifiers.yaml".text = builtins.toJSON {
apiVersion = 1;
notifiers = provisioningConfig.notifiers;
};
});
# Create dashboard directory
systemd.tmpfiles.rules = [
"d ${cfg.dataDir}/dashboards 0755 grafana grafana -"
];
# Open firewall if requested
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port];
# Add to monitoring endpoints
homelab.monitoring.metrics = [
{
name = "grafana";
port = cfg.port;
path = "/metrics";
jobName = "grafana";
labels = {
service = "grafana";
component = "monitoring";
};
}
];
# Add health checks
homelab.monitoring.healthChecks = [
{
name = "grafana-web-interface";
port = cfg.port;
path = "/api/health";
interval = "30s";
conditions = [
"[STATUS] == 200"
"[BODY].database == ok"
"[RESPONSE_TIME] < 2000"
];
group = "monitoring";
labels = {
service = "grafana";
component = "web-interface";
};
}
{
name = "grafana-login-page";
port = cfg.port;
path = "/login";
interval = "60s";
conditions = [
"[STATUS] == 200"
"[RESPONSE_TIME] < 3000"
];
group = "monitoring";
labels = {
service = "grafana";
component = "login";
};
}
];
# Add reverse proxy entry
homelab.reverseProxy.entries = [
{
subdomain = "grafana";
host = homelabCfg.hostname;
port = cfg.port;
}
];
};
}

View file

@ -1,369 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.grafana;
homelabCfg = config.homelab;
# Dashboard provisioning
provisionDashboard = name: source: {
"grafana-dashboards/${name}.json" = {
inherit source;
user = "grafana";
group = "grafana";
mode = "0644";
};
};
# Generate all dashboard files
dashboardFiles =
fold (
dashboard: acc:
acc // (provisionDashboard dashboard.name dashboard.source)
) {}
cfg.dashboards.files;
in {
options.homelab.services.grafana = {
enable = mkEnableOption "Grafana dashboard service";
port = mkOption {
type = types.port;
default = 3000;
description = "Port for Grafana web interface";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Whether to open firewall ports";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/grafana";
description = "Directory to store Grafana data";
};
domain = mkOption {
type = types.str;
default = "grafana.${homelabCfg.externalDomain}";
description = "Domain for Grafana";
};
rootUrl = mkOption {
type = types.str;
default = "https://grafana.${homelabCfg.externalDomain}";
description = "Root URL for Grafana";
};
# Authentication settings
auth = {
disableLoginForm = mkOption {
type = types.bool;
default = false;
description = "Disable the login form";
};
oauthAutoLogin = mkOption {
type = types.bool;
default = false;
description = "Enable OAuth auto-login";
};
genericOauth = {
enabled = mkOption {
type = types.bool;
default = false;
description = "Enable generic OAuth";
};
};
};
# Data source configuration
datasources = {
prometheus = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus datasource";
};
url = mkOption {
type = types.str;
default = "http://127.0.0.1:9090";
description = "Prometheus URL";
};
uid = mkOption {
type = types.str;
default = "prometheus";
description = "Unique identifier for Prometheus datasource";
};
};
loki = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Loki datasource";
};
url = mkOption {
type = types.str;
default = "http://127.0.0.1:3100";
description = "Loki URL";
};
uid = mkOption {
type = types.str;
default = "loki";
description = "Unique identifier for Loki datasource";
};
};
influxdb = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable InfluxDB datasource";
};
url = mkOption {
type = types.str;
default = "http://127.0.0.1:8086";
description = "InfluxDB URL";
};
database = mkOption {
type = types.str;
default = "homelab";
description = "InfluxDB database name";
};
tokenPath = mkOption {
type = types.nullOr types.path;
default = null;
description = "Path to InfluxDB token file";
};
uid = mkOption {
type = types.str;
default = "influxdb";
description = "Unique identifier for InfluxDB datasource";
};
};
extra = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional data sources";
};
};
# Dashboard configuration
dashboards = {
path = mkOption {
type = types.str;
default = "/etc/grafana-dashboards";
description = "Path to dashboard files";
};
files = mkOption {
type = types.listOf (types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Dashboard name (without .json extension)";
example = "node-exporter";
};
source = mkOption {
type = types.path;
description = "Path to dashboard JSON file";
};
};
});
default = [];
description = "Dashboard files to provision";
example = literalExpression ''
[
{
name = "node-exporter";
source = ./dashboards/node-exporter.json;
}
{
name = "traefik";
source = ./dashboards/traefik.json;
}
]
'';
};
};
# Extra user groups for accessing secrets
extraGroups = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional groups for the grafana user";
example = ["influxdb2"];
};
# Additional settings
extraSettings = mkOption {
type = types.attrs;
default = {};
description = "Additional Grafana settings";
};
plugins = mkOption {
type = types.listOf types.package;
default = [];
description = "Grafana plugins to install";
example = literalExpression "with pkgs.grafanaPlugins; [ grafana-piechart-panel ]";
};
};
config = mkIf cfg.enable {
# Add grafana user to extra groups (e.g., for accessing secrets)
users.users.grafana.extraGroups = cfg.extraGroups;
services.grafana = {
enable = true;
dataDir = cfg.dataDir;
declarativePlugins = cfg.plugins;
settings =
recursiveUpdate {
server = {
http_port = cfg.port;
http_addr = "0.0.0.0";
domain = cfg.domain;
root_url = cfg.rootUrl;
oauth_auto_login = cfg.auth.oauthAutoLogin;
};
"auth.generic_oauth" = {
enabled = cfg.auth.genericOauth.enabled;
};
auth = {
disable_login_form = cfg.auth.disableLoginForm;
};
}
cfg.extraSettings;
provision = {
enable = true;
datasources.settings = {
datasources = let
# Build datasource list
datasources =
[]
++ optional cfg.datasources.prometheus.enable {
uid = cfg.datasources.prometheus.uid;
name = "Prometheus";
type = "prometheus";
url = cfg.datasources.prometheus.url;
}
++ optional cfg.datasources.loki.enable {
uid = cfg.datasources.loki.uid;
name = "Loki";
type = "loki";
url = cfg.datasources.loki.url;
}
++ optional cfg.datasources.influxdb.enable {
uid = cfg.datasources.influxdb.uid;
name = "InfluxDB";
type = "influxdb";
url = cfg.datasources.influxdb.url;
access = "proxy";
jsonData = {
dbName = cfg.datasources.influxdb.database;
httpHeaderName1 = "Authorization";
};
secureJsonData = mkIf (cfg.datasources.influxdb.tokenPath != null) {
httpHeaderValue1 = "$__file{${cfg.datasources.influxdb.tokenPath}}";
};
}
++ cfg.datasources.extra;
in
datasources;
};
dashboards.settings.providers = mkIf (cfg.dashboards.files != []) [
{
name = "homelab-dashboards";
options.path = cfg.dashboards.path;
}
];
};
};
# Open firewall if requested
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port];
# Provision dashboard files
environment.etc = dashboardFiles;
# Add to monitoring endpoints
homelab.monitoring.metrics = [
{
name = "grafana";
port = cfg.port;
path = "/metrics";
jobName = "grafana";
labels = {
service = "grafana";
component = "monitoring";
};
}
];
# Add health checks
homelab.monitoring.healthChecks = [
{
name = "grafana-web-interface";
port = cfg.port;
path = "/api/health";
interval = "30s";
conditions = [
"[STATUS] == 200"
"[BODY].database == ok"
"[RESPONSE_TIME] < 2000"
];
group = "monitoring";
labels = {
service = "grafana";
component = "web-interface";
};
}
{
name = "grafana-login-page";
port = cfg.port;
path = "/login";
interval = "60s";
conditions = [
"[STATUS] == 200"
"[RESPONSE_TIME] < 3000"
];
group = "monitoring";
labels = {
service = "grafana";
component = "login";
};
}
];
# Add reverse proxy entry
homelab.reverseProxy.entries = [
{
subdomain = "grafana";
host = homelabCfg.hostname;
port = cfg.port;
}
];
};
}

View file

@ -1,60 +0,0 @@
{
config,
lib,
...
}:
with lib; let
cfg = config.homelab.services.monitoring-stack;
in {
imports = [
./prometheus.nix
./alertmanager.nix
./grafana.nix
];
options.homelab.services.monitoring-stack = {
enable = mkEnableOption "Complete monitoring stack (Prometheus + Alertmanager + Grafana)";
prometheus = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus";
};
};
alertmanager = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable Alertmanager";
};
};
grafana = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable Grafana";
};
};
};
config = mkIf cfg.enable {
# Enable services based on configuration
homelab.services.prometheus.enable = mkDefault cfg.prometheus.enable;
homelab.services.alertmanager.enable = mkDefault cfg.alertmanager.enable;
homelab.services.grafana.enable = mkDefault cfg.grafana.enable;
# Configure Prometheus to use Alertmanager if both are enabled
homelab.services.prometheus.alertmanager = mkIf (cfg.prometheus.enable && cfg.alertmanager.enable) {
enable = true;
url = "http://localhost:${toString config.homelab.services.alertmanager.port}";
};
# Configure Grafana to use Prometheus if both are enabled
homelab.services.grafana.datasources.prometheus = mkIf (cfg.prometheus.enable && cfg.grafana.enable) {
url = "http://localhost:${toString config.homelab.services.prometheus.port}";
};
};
}

View file

@ -1,203 +0,0 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
serviceInterface = import ../../lib/service-interface.nix {inherit lib;};
cfg = config.homelab.services.prometheus;
homelabCfg = config.homelab;
# Generate Prometheus scrape configs from global monitoring data
prometheusScrapeConfigs = let
allMetrics = homelabCfg.global.monitoring.allMetrics or [];
jobGroups = groupBy (m: m.jobName) allMetrics;
scrapeConfigs =
mapAttrsToList (jobName: endpoints: {
job_name = jobName;
scrape_interval = head endpoints.scrapeInterval or ["30s"];
static_configs = [
{
targets = map (endpoint: "${endpoint.host}:${toString endpoint.port}") endpoints;
labels = fold (endpoint: acc: acc // endpoint.labels) {} endpoints;
}
];
metrics_path = head endpoints.path or [null];
})
jobGroups;
in
scrapeConfigs;
# Service-specific options beyond the standard interface
prometheusServiceOptions = {
retention = mkOption {
type = types.str;
default = "15d";
description = "How long to retain metrics data";
};
alertmanager = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable integration with Alertmanager";
};
url = mkOption {
type = types.str;
default = "${homelabCfg.hostname}.${homelabCfg.domain}:9093";
description = "Alertmanager URL";
};
};
extraScrapeConfigs = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional scrape configurations";
};
extraAlertingRules = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional alerting rules";
};
globalConfig = mkOption {
type = types.attrs;
default = {
scrape_interval = "15s";
evaluation_interval = "15s";
};
description = "Global Prometheus configuration";
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
description = "Extra command line flags";
};
ruleFiles = mkOption {
type = types.listOf types.path;
default = [];
description = "Additional rule files to load";
};
};
# Standard alerting rules
alertingRules = [
{
name = "homelab.rules";
rules = [
{
alert = "InstanceDown";
expr = "up == 0";
for = "5m";
labels = {severity = "critical";};
annotations = {
summary = "Instance {{ $labels.instance }} down";
description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.";
};
}
{
alert = "HighCPUUsage";
expr = "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100) > 80";
for = "10m";
labels = {severity = "warning";};
annotations = {
summary = "High CPU usage on {{ $labels.instance }}";
description = "CPU usage is above 80% for more than 10 minutes on {{ $labels.instance }}.";
};
}
{
alert = "HighMemoryUsage";
expr = "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100 > 85";
for = "10m";
labels = {severity = "warning";};
annotations = {
summary = "High memory usage on {{ $labels.instance }}";
description = "Memory usage is above 85% for more than 10 minutes on {{ $labels.instance }}.";
};
}
{
alert = "DiskSpaceLow";
expr = "((node_filesystem_size_bytes - node_filesystem_avail_bytes) / node_filesystem_size_bytes) * 100 > 90";
for = "5m";
labels = {severity = "critical";};
annotations = {
summary = "Disk space low on {{ $labels.instance }}";
description = "Disk usage is above 90% on {{ $labels.instance }} {{ $labels.mountpoint }}.";
};
}
];
}
];
in {
options.homelab.services.prometheus = serviceInterface.mkServiceInterface {
serviceName = "prometheus";
defaultPort = 9090;
defaultSubdomain = "prometheus";
monitoringPath = "/metrics";
healthCheckPath = "/-/healthy";
healthCheckConditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"];
serviceOptions = prometheusServiceOptions;
};
config = serviceInterface.mkServiceConfig {
inherit config cfg homelabCfg;
serviceName = "prometheus";
extraMonitoringLabels = {
component = "monitoring-server";
};
customHealthChecks = [
{
name = "prometheus-ready";
port = cfg.port;
path = "/-/ready";
interval = "30s";
conditions = ["[STATUS] == 200"];
group = "monitoring";
labels = {
service = "prometheus";
component = "readiness";
};
}
];
serviceConfig = {
services.prometheus = {
enable = true;
port = cfg.port;
listenAddress = "0.0.0.0";
retentionTime = cfg.retention;
globalConfig = cfg.globalConfig;
extraFlags = cfg.extraFlags;
scrapeConfigs = prometheusScrapeConfigs ++ cfg.extraScrapeConfigs;
ruleFiles =
map (ruleGroup:
pkgs.writeText "${ruleGroup.name}.yml" (builtins.toJSON {
groups = [ruleGroup];
})) (alertingRules ++ cfg.extraAlertingRules)
++ cfg.ruleFiles;
alertmanagers = mkIf cfg.alertmanager.enable [
{
static_configs = [
{
targets = [cfg.alertmanager.url];
}
];
}
];
};
};
};
}

View file

@ -1,208 +0,0 @@
# modules/services/prometheus.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.prometheus;
globalCfg = config.homelab.global;
in {
options.homelab.services.prometheus = {
enable = mkEnableOption "Prometheus monitoring server";
port = mkOption {
type = types.port;
default = 9090;
description = "Prometheus server port";
};
webExternalUrl = mkOption {
type = types.str;
default = "http://${globalCfg.hostname}:${toString cfg.port}";
description = "External URL for Prometheus";
};
retention = mkOption {
type = types.str;
default = "30d";
description = "Data retention period";
};
scrapeConfigs = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional scrape configurations";
};
alertmanager = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Alertmanager integration";
};
url = mkOption {
type = types.str;
default = "http://localhost:9093";
description = "Alertmanager URL";
};
};
};
config = mkIf cfg.enable {
# Register service with global homelab config
homelab.global.services.prometheus = {
enable = true;
description = "Metrics collection and monitoring server";
category = "monitoring";
ports = [cfg.port];
tags = ["metrics" "monitoring" "alerting"];
priority = 20;
dependencies = ["node-exporter"];
};
# Configure the actual Prometheus service
services.prometheus = {
enable = true;
port = cfg.port;
webExternalUrl = cfg.webExternalUrl;
retentionTime = cfg.retention;
scrapeConfigs =
[
# Auto-discover monitoring endpoints from global config
{
job_name = "homelab-auto";
static_configs = [
{
targets =
map (
endpoint: "${globalCfg.hostname}:${toString endpoint.port}"
)
globalCfg.monitoring.endpoints;
}
];
scrape_interval = "30s";
metrics_path = "/metrics";
}
]
++ cfg.scrapeConfigs;
# Alertmanager configuration
alertmanagers = mkIf cfg.alertmanager.enable [
{
static_configs = [
{
targets = [cfg.alertmanager.url];
}
];
}
];
rules = [
# Basic homelab alerting rules
(pkgs.writeText "homelab-alerts.yml" ''
groups:
- name: homelab
rules:
- alert: ServiceDown
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Service {{ $labels.instance }} is down"
description: "{{ $labels.job }} on {{ $labels.instance }} has been down for more than 5 minutes."
- alert: HighMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9
for: 10m
labels:
severity: warning
annotations:
summary: "High memory usage on {{ $labels.instance }}"
description: "Memory usage is above 90% on {{ $labels.instance }}"
- alert: HighDiskUsage
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85
for: 5m
labels:
severity: warning
annotations:
summary: "High disk usage on {{ $labels.instance }}"
description: "Disk usage is above 85% on {{ $labels.instance }} for filesystem {{ $labels.mountpoint }}"
'')
];
};
# Add monitoring endpoint to global config
homelab.global.monitoring.endpoints = [
{
name = "prometheus";
port = cfg.port;
path = "/metrics";
jobName = "prometheus";
scrapeInterval = "30s";
labels = {
service = "prometheus";
role = "monitoring";
};
}
];
# Add reverse proxy entry if configured
homelab.global.reverseProxy.entries = mkIf (globalCfg.domain != null) [
{
subdomain = "prometheus";
port = cfg.port;
path = "/";
enableAuth = true;
enableSSL = true;
customHeaders = {
"X-Frame-Options" = "DENY";
"X-Content-Type-Options" = "nosniff";
};
}
];
# Add backup job for Prometheus data
homelab.global.backups.jobs = [
{
name = "prometheus-data";
backend = "restic";
paths = ["/var/lib/prometheus2"];
schedule = "daily";
retention = {
daily = "7";
weekly = "4";
monthly = "3";
yearly = "1";
};
excludePatterns = [
"*.tmp"
"*/wal/*"
];
preHook = ''
# Stop prometheus temporarily for consistent backup
systemctl stop prometheus
'';
postHook = ''
# Restart prometheus after backup
systemctl start prometheus
'';
}
];
# Open firewall port
networking.firewall.allowedTCPPorts = [cfg.port];
# Create prometheus configuration directory
systemd.tmpfiles.rules = [
"d /var/lib/prometheus2 0755 prometheus prometheus -"
"d /etc/prometheus 0755 root root -"
];
};
}

View file

@ -1,126 +0,0 @@
# modules/lib/helpers.nix
{lib, ...}:
with lib; rec {
# Helper to merge global configurations from multiple sources
mergeGlobalConfigs = configs: let
mergeEndpoints = foldl' (acc: cfg: acc ++ cfg.monitoring.endpoints) [];
mergeBackups = foldl' (acc: cfg: acc ++ cfg.backups.jobs) [];
mergeProxyEntries = foldl' (acc: cfg: acc ++ cfg.reverseProxy.entries) [];
in {
monitoring.endpoints = mergeEndpoints configs;
backups.jobs = mergeBackups configs;
reverseProxy.entries = mergeProxyEntries configs;
};
# Helper to create a service module template
createServiceModule = {
name,
port,
hasMetrics ? true,
hasWebUI ? true,
dataDir ? "/var/lib/${name}",
}: {
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.${name};
in {
options.services.${name} = {
enable = mkEnableOption "${name} service";
port = mkOption {
type = types.port;
default = port;
description = "Port for ${name}";
};
dataDir = mkOption {
type = types.str;
default = dataDir;
description = "Data directory for ${name}";
};
enableMetrics = mkOption {
type = types.bool;
default = hasMetrics;
description = "Enable metrics endpoint";
};
exposeWeb = mkOption {
type = types.bool;
default = hasWebUI;
description = "Expose web interface";
};
};
config = mkIf cfg.enable {
homelab.global = {
backups.jobs = [
{
name = "${name}-data";
backend = "restic";
paths = [cfg.dataDir];
schedule = "daily";
}
];
reverseProxy.entries = mkIf cfg.exposeWeb [
{
subdomain = name;
port = cfg.port;
}
];
monitoring.endpoints = mkIf cfg.enableMetrics [
{
name = name;
port = cfg.port;
path = "/metrics";
jobName = name;
}
];
};
};
};
# Helper to generate nginx configuration from proxy entries
generateNginxConfig = proxyEntries: domain: let
createVHost = entry: {
"${entry.subdomain}.${domain}" = {
enableACME = entry.enableSSL;
forceSSL = entry.enableSSL;
locations."${entry.path}" = {
proxyPass = "http://${entry.targetHost}:${toString entry.port}";
proxyWebsockets = entry.websockets;
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
${concatStringsSep "\n" (mapAttrsToList (
name: value: "proxy_set_header ${name} ${value};"
)
entry.customHeaders)}
'';
};
};
};
in
foldl' (acc: entry: acc // (createVHost entry)) {} proxyEntries;
# Helper to generate Prometheus scrape configs
generatePrometheusConfig = endpoints: let
endpointsByJob = groupBy (e: e.jobName) endpoints;
createJobConfig = jobName: jobEndpoints: {
job_name = jobName;
scrape_interval = (head jobEndpoints).scrapeInterval;
metrics_path = (head jobEndpoints).path;
static_configs = [
{
targets = map (e: "${e.targetHost}:${toString e.port}") jobEndpoints;
labels = foldl' (acc: e: acc // e.labels) {} jobEndpoints;
}
];
};
in
mapAttrsToList createJobConfig endpointsByJob;
}

View file

@ -1,187 +0,0 @@
# modules/backup-manager.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.backups;
globalCfg = config.homelab.global;
# Create systemd services for backup jobs
createBackupService = job: let
serviceName = "backup-${job.name}";
allExcludes = globalCfg.backups.globalExcludes ++ job.excludePatterns;
excludeArgs = map (pattern: "--exclude '${pattern}'") allExcludes;
backupScript =
if job.backend == "restic"
then ''
#!/bin/bash
set -euo pipefail
${optionalString (job.preHook != null) job.preHook}
# Restic backup
${pkgs.restic}/bin/restic backup \
${concatStringsSep " " (map (path: "'${path}'") job.paths)} \
${concatStringsSep " " excludeArgs} \
--tag "host:${globalCfg.hostname}" \
--tag "job:${job.name}" \
--tag "env:${globalCfg.environment}"
# Apply retention policy
${pkgs.restic}/bin/restic forget \
--keep-daily ${job.retention.daily} \
--keep-weekly ${job.retention.weekly} \
--keep-monthly ${job.retention.monthly} \
--keep-yearly ${job.retention.yearly} \
--prune
${optionalString (job.postHook != null) job.postHook}
''
else if job.backend == "borg"
then ''
#!/bin/bash
set -euo pipefail
${optionalString (job.preHook != null) job.preHook}
# Borg backup
${pkgs.borgbackup}/bin/borg create \
--stats --progress \
${concatStringsSep " " excludeArgs} \
"::${globalCfg.hostname}-${job.name}-{now}" \
${concatStringsSep " " (map (path: "'${path}'") job.paths)}
# Apply retention policy
${pkgs.borgbackup}/bin/borg prune \
--keep-daily ${job.retention.daily} \
--keep-weekly ${job.retention.weekly} \
--keep-monthly ${job.retention.monthly} \
--keep-yearly ${job.retention.yearly}
${optionalString (job.postHook != null) job.postHook}
''
else throw "Unsupported backup backend: ${job.backend}";
in {
${serviceName} = {
description = "Backup job: ${job.name}";
after = ["network-online.target"];
wants = ["network-online.target"];
serviceConfig = {
Type = "oneshot";
User = "backup";
Group = "backup";
ExecStart = pkgs.writeScript "backup-${job.name}" backupScript;
EnvironmentFile = "/etc/backup/environment";
};
};
};
# Create systemd timers for backup jobs
createBackupTimer = job: let
serviceName = "backup-${job.name}";
timerName = "${serviceName}.timer";
in {
${timerName} = {
description = "Timer for backup job: ${job.name}";
wantedBy = ["timers.target"];
timerConfig = {
OnCalendar =
if job.schedule == "daily"
then "daily"
else if job.schedule == "weekly"
then "weekly"
else if job.schedule == "hourly"
then "hourly"
else job.schedule; # Assume it's a cron expression
Persistent = true;
RandomizedDelaySec = "15min";
};
};
};
in {
options.homelab.backups = {
enable = mkEnableOption "Backup management";
restic = {
repository = mkOption {
type = types.str;
description = "Restic repository URL";
};
passwordFile = mkOption {
type = types.str;
default = "/etc/backup/restic-password";
description = "Path to file containing restic password";
};
};
borg = {
repository = mkOption {
type = types.str;
description = "Borg repository path";
};
sshKey = mkOption {
type = types.str;
default = "/etc/backup/borg-ssh-key";
description = "Path to SSH key for borg repository";
};
};
};
config = mkIf (cfg.enable && globalCfg.enable && (length globalCfg.backups.jobs) > 0) {
# Create backup user
users.users.backup = {
isSystemUser = true;
group = "backup";
home = "/var/lib/backup";
createHome = true;
};
users.groups.backup = {};
# Install backup tools
environment.systemPackages = with pkgs; [
restic
borgbackup
rclone
(pkgs.writeScriptBin "backup-status" ''
#!/bin/bash
echo "=== Backup Status ==="
echo
${concatStringsSep "\n" (map (job: ''
echo "Job: ${job.name}"
systemctl is-active backup-${job.name}.timer || echo "Timer inactive"
systemctl status backup-${job.name}.timer --no-pager -l | grep -E "(Active|Trigger)" || true
echo
'')
globalCfg.backups.jobs)}
'')
];
# Create systemd services and timers
systemd.services = lib.foldl' (acc: job: acc // (createBackupService job)) {} globalCfg.backups.jobs;
systemd.timers = lib.foldl' (acc: job: acc // (createBackupTimer job)) {} globalCfg.backups.jobs;
# Environment file template
environment.etc."backup/environment.example".text = ''
# Restic configuration
RESTIC_REPOSITORY=${cfg.restic.repository}
RESTIC_PASSWORD_FILE=${cfg.restic.passwordFile}
# AWS S3 credentials (if using S3 backend)
AWS_ACCESS_KEY_ID=your-access-key
AWS_SECRET_ACCESS_KEY=your-secret-key
# Borg configuration
BORG_REPO=${cfg.borg.repository}
BORG_RSH="ssh -i ${cfg.borg.sshKey}"
# Notification settings
NOTIFICATION_URL=your-webhook-url
'';
};
}

View file

@ -1,8 +1,3 @@
{ {
ente = import ./ente.nix; ente = import ./ente.nix;
global-config = import ./global-config.nix;
backup-manager = import ./backup-manager.nix;
# Service modules
services = import ./services;
} }

View file

@ -72,11 +72,6 @@ in {
type = types.str; type = types.str;
description = "The domain under which the photos frontend will be served."; description = "The domain under which the photos frontend will be served.";
}; };
auth = mkOption {
type = types.str;
description = "The domain under which the auth frontend will be served.";
};
}; };
}; };
@ -192,11 +187,6 @@ in {
name = "ente"; name = "ente";
user = "ente"; user = "ente";
}; };
key = {
encryption._secret = pkgs.writeText "encryption" "T0sn+zUVFOApdX4jJL4op6BtqqAfyQLH95fu8ASWfno=";
hash._secret = pkgs.writeText "hash" "g/dBZBs1zi9SXQ0EKr4RCt1TGr7ZCKkgrpjyjrQEKovWPu5/ce8dYM6YvMIPL23MMZToVuuG+Z6SGxxTbxg5NQ==";
};
jwt.secret._secret = pkgs.writeText "jwt" "i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8=";
}; };
systemd.services.ente = { systemd.services.ente = {
@ -253,7 +243,6 @@ in {
BindReadOnlyPaths = [ BindReadOnlyPaths = [
"${cfgApi.package}/share/museum/migrations:${dataDir}/migrations" "${cfgApi.package}/share/museum/migrations:${dataDir}/migrations"
"${cfgApi.package}/share/museum/mail-templates:${dataDir}/mail-templates" "${cfgApi.package}/share/museum/mail-templates:${dataDir}/mail-templates"
"${cfgApi.package}/share/museum/web-templates:${dataDir}/web-templates"
]; ];
User = cfgApi.user; User = cfgApi.user;
@ -322,12 +311,7 @@ in {
in { in {
enable = true; enable = true;
virtualHosts.${domainFor "accounts"} = { virtualHosts.${domainFor "accounts"} = {
listen = [ forceSSL = mkDefault false;
{
addr = "0.0.0.0";
port = 3001;
}
];
locations."/" = { locations."/" = {
root = webPackage "accounts"; root = webPackage "accounts";
tryFiles = "$uri $uri.html /index.html"; tryFiles = "$uri $uri.html /index.html";
@ -337,12 +321,7 @@ in {
}; };
}; };
virtualHosts.${domainFor "cast"} = { virtualHosts.${domainFor "cast"} = {
listen = [ forceSSL = mkDefault false;
{
addr = "0.0.0.0";
port = 3004;
}
];
locations."/" = { locations."/" = {
root = webPackage "cast"; root = webPackage "cast";
tryFiles = "$uri $uri.html /index.html"; tryFiles = "$uri $uri.html /index.html";
@ -355,12 +334,7 @@ in {
serverAliases = [ serverAliases = [
(domainFor "albums") # the albums app is shared with the photos frontend (domainFor "albums") # the albums app is shared with the photos frontend
]; ];
listen = [ forceSSL = mkDefault false;
{
addr = "0.0.0.0";
port = 3000;
}
];
locations."/" = { locations."/" = {
root = webPackage "photos"; root = webPackage "photos";
tryFiles = "$uri $uri.html /index.html"; tryFiles = "$uri $uri.html /index.html";
@ -369,21 +343,6 @@ in {
''; '';
}; };
}; };
virtualHosts.${domainFor "auth"} = {
listen = [
{
addr = "0.0.0.0";
port = 3003;
}
];
locations."/" = {
root = webPackage "auth";
tryFiles = "$uri $uri.html /index.html";
extraConfig = ''
add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}';
'';
};
};
}; };
}) })
]; ];

View file

@ -1,462 +0,0 @@
# modules/global-config.nix
{
config,
lib,
outputs,
...
}:
with lib; let
cfg = config.homelab.global;
# Service type definition
serviceType = types.submodule {
options = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable this service";
};
description = mkOption {
type = types.str;
description = "Human-readable description of the service";
};
category = mkOption {
type = types.enum ["monitoring" "networking" "storage" "security" "media" "development" "backup" "other"];
default = "other";
description = "Service category for organization";
};
dependencies = mkOption {
type = types.listOf types.str;
default = [];
description = "List of other homelab services this depends on";
};
ports = mkOption {
type = types.listOf types.port;
default = [];
description = "Ports this service uses";
};
tags = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional tags for this service";
};
priority = mkOption {
type = types.int;
default = 100;
description = "Service priority (lower numbers start first)";
};
};
};
# Type definitions
monitoringEndpointType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the monitoring endpoint";
};
port = mkOption {
type = types.port;
description = "Port number for the endpoint";
};
path = mkOption {
type = types.str;
default = "/metrics";
description = "Path for the metrics endpoint";
};
jobName = mkOption {
type = types.str;
description = "Prometheus job name";
};
scrapeInterval = mkOption {
type = types.str;
default = "30s";
description = "Prometheus scrape interval";
};
labels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for this endpoint";
};
};
};
backupJobType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the backup job";
};
backend = mkOption {
type = types.enum ["restic" "borg" "rclone"];
description = "Backup backend to use";
};
paths = mkOption {
type = types.listOf types.str;
description = "List of paths to backup";
};
schedule = mkOption {
type = types.str;
default = "daily";
description = "Backup schedule (cron format or preset)";
};
retention = mkOption {
type = types.attrsOf types.str;
default = {
daily = "7";
weekly = "4";
monthly = "6";
yearly = "2";
};
description = "Retention policy";
};
excludePatterns = mkOption {
type = types.listOf types.str;
default = [];
description = "Patterns to exclude from backup";
};
preHook = mkOption {
type = types.nullOr types.str;
default = null;
description = "Script to run before backup";
};
postHook = mkOption {
type = types.nullOr types.str;
default = null;
description = "Script to run after backup";
};
};
};
reverseProxyEntryType = types.submodule {
options = {
subdomain = mkOption {
type = types.str;
description = "Subdomain for the service";
};
port = mkOption {
type = types.port;
description = "Internal port to proxy to";
};
path = mkOption {
type = types.str;
default = "/";
description = "Path prefix for the service";
};
enableAuth = mkOption {
type = types.bool;
default = false;
description = "Enable authentication for this service";
};
enableSSL = mkOption {
type = types.bool;
default = true;
description = "Enable SSL for this service";
};
customHeaders = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Custom headers to add";
};
websockets = mkOption {
type = types.bool;
default = false;
description = "Enable websocket support";
};
};
};
# Helper functions for services
enabledServices = filterAttrs (name: service: service.enable) cfg.services;
servicesByCategory = category: filterAttrs (name: service: service.enable && service.category == category) cfg.services;
in {
imports = [
./motd
];
options.homelab.global = {
enable = mkEnableOption "Global homelab configuration";
hostname = mkOption {
type = types.str;
description = "Hostname for this system";
};
domain = mkOption {
type = types.str;
default = "procopius.dk";
description = "Base domain for the homelab";
};
environment = mkOption {
type = types.enum ["production" "staging" "development"];
default = "production";
description = "Environment type";
};
location = mkOption {
type = types.str;
default = "homelab";
description = "Physical location identifier";
};
tags = mkOption {
type = types.listOf types.str;
default = [];
description = "Tags for this system";
};
services = mkOption {
type = types.attrsOf serviceType;
default = {};
description = "Homelab services configuration";
example = literalExpression ''
{
prometheus = {
enable = true;
description = "Metrics collection and monitoring";
category = "monitoring";
ports = [ 9090 ];
tags = [ "metrics" "alerting" ];
};
traefik = {
enable = true;
description = "Reverse proxy and load balancer";
category = "networking";
ports = [ 80 443 8080 ];
tags = [ "proxy" "loadbalancer" ];
priority = 10;
};
}
'';
};
monitoring = {
endpoints = mkOption {
type = types.listOf monitoringEndpointType;
default = [];
description = "Monitoring endpoints exposed by this system";
};
nodeExporter = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable node exporter";
};
port = mkOption {
type = types.port;
default = 9100;
description = "Node exporter port";
};
};
};
backups = {
jobs = mkOption {
type = types.listOf backupJobType;
default = [];
description = "Backup jobs for this system";
};
globalExcludes = mkOption {
type = types.listOf types.str;
default = [
"*.tmp"
"*.cache"
"*/.git"
"*/node_modules"
"*/target"
];
description = "Global exclude patterns for all backup jobs";
};
};
reverseProxy = {
entries = mkOption {
type = types.listOf reverseProxyEntryType;
default = [];
description = "Reverse proxy entries for this system";
};
};
# Helper function to add monitoring endpoint
addMonitoringEndpoint = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = name: endpoint: {
homelab.global.monitoring.endpoints = [
(endpoint // {inherit name;})
];
};
description = "Helper function to add monitoring endpoints";
};
# Helper function to add backup job
addBackupJob = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = name: job: {
homelab.global.backups.jobs = [
(job // {inherit name;})
];
};
description = "Helper function to add backup jobs";
};
# Helper function to add reverse proxy entry
addReverseProxyEntry = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = subdomain: entry: {
homelab.global.reverseProxy.entries = [
(entry // {inherit subdomain;})
];
};
description = "Helper function to add reverse proxy entries";
};
# Helper functions
enabledServicesList = mkOption {
type = types.listOf types.str;
default = attrNames enabledServices;
description = "List of enabled service names";
readOnly = true;
};
servicesByPriority = mkOption {
type = types.listOf types.str;
default =
map (x: x.name) (sort (a: b: a.priority < b.priority)
(mapAttrsToList (name: service: service // {inherit name;}) enabledServices));
description = "Services sorted by priority";
readOnly = true;
};
};
config = mkIf cfg.enable {
# Set hostname
networking.hostName = cfg.hostname;
# Configure node exporter if enabled
services.prometheus.exporters.node = mkIf cfg.monitoring.nodeExporter.enable {
enable = true;
port = cfg.monitoring.nodeExporter.port;
enabledCollectors = [
"systemd"
"textfile"
"filesystem"
"loadavg"
"meminfo"
"netdev"
"stat"
];
};
# Automatically add node exporter to monitoring endpoints
homelab.global.monitoring.endpoints = mkIf cfg.monitoring.nodeExporter.enable [
{
name = "node-exporter";
port = cfg.monitoring.nodeExporter.port;
path = "/metrics";
jobName = "node";
labels = {
instance = cfg.hostname;
environment = cfg.environment;
location = cfg.location;
};
}
];
# Export configuration for external consumption
environment.etc."homelab/config.json".text = builtins.toJSON {
inherit (cfg) hostname domain environment location tags;
services =
mapAttrs (name: service: {
inherit (service) enable description category dependencies ports tags priority;
})
cfg.services;
enabledServices = enabledServices;
servicesByCategory = {
monitoring = servicesByCategory "monitoring";
networking = servicesByCategory "networking";
storage = servicesByCategory "storage";
security = servicesByCategory "security";
media = servicesByCategory "media";
development = servicesByCategory "development";
backup = servicesByCategory "backup";
other = servicesByCategory "other";
};
monitoring = {
endpoints =
map (endpoint: {
name = endpoint.name;
url = "http://${cfg.hostname}:${toString endpoint.port}${endpoint.path}";
port = endpoint.port;
path = endpoint.path;
jobName = endpoint.jobName;
scrapeInterval = endpoint.scrapeInterval;
labels =
endpoint.labels
// {
hostname = cfg.hostname;
environment = cfg.environment;
};
})
cfg.monitoring.endpoints;
};
backups = {
jobs = cfg.backups.jobs;
};
reverseProxy = {
entries =
map (entry: {
subdomain = entry.subdomain;
url = "http://${cfg.hostname}:${toString entry.port}";
port = entry.port;
path = entry.path;
domain = "${entry.subdomain}.${cfg.domain}";
enableAuth = entry.enableAuth;
enableSSL = entry.enableSSL;
customHeaders = entry.customHeaders;
websockets = entry.websockets;
})
cfg.reverseProxy.entries;
};
};
# Create a status command that shows service information
environment.systemPackages = [
# (pkgs.writeScriptBin "homelab-services" ''
# #!/bin/bash
# echo "🏠 Homelab Services Status"
# echo "=========================="
# echo
# ${concatStringsSep "\n" (mapAttrsToList (name: service: ''
# echo "${name}: ${service.description}"
# echo " Category: ${service.category}"
# echo " Status: $(systemctl is-active ${name} 2>/dev/null || echo "not found")"
# ${optionalString (service.ports != []) ''
# echo " Ports: ${concatStringsSep ", " (map toString service.ports)}"
# ''}
# ${optionalString (service.tags != []) ''
# echo " Tags: ${concatStringsSep ", " service.tags}"
# ''}
# echo
# '')
# enabledServices)}
# '')
];
};
}

View file

@ -1,304 +0,0 @@
# modules/motd/default.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.motd;
globalCfg = config.homelab.global;
enabledServices = filterAttrs (name: service: service.enable) globalCfg.services;
homelab-motd = pkgs.writeShellScriptBin "homelab-motd" ''
#! /usr/bin/env bash
source /etc/os-release
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
WHITE='\033[1;37m'
NC='\033[0m' # No Color
BOLD='\033[1m'
# Helper functions
print_header() {
echo -e "''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}''${WHITE} 🏠 HOMELAB STATUS ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
}
print_section() {
echo -e "\n''${BOLD}''${CYAN} $1''${NC}"
echo -e "''${CYAN}''${NC}"
}
get_service_status() {
local service="$1"
if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then
echo -e "''${GREEN}''${NC} Active"
elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then
echo -e "''${YELLOW}''${NC} Inactive"
else
echo -e "''${RED}''${NC} Disabled"
fi
}
get_timer_status() {
local timer="$1"
if ${pkgs.systemd}/bin/systemctl is-active --quiet "$timer" 2>/dev/null; then
local next_run=$(${pkgs.systemd}/bin/systemctl show "$timer" --property=NextElapseUSecRealtime --value 2>/dev/null || echo "0")
if [[ "$next_run" != "0" && "$next_run" != "n/a" ]]; then
local next_readable=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager "$timer" 2>/dev/null | tail -n +2 | head -n 1 | awk '{print $1, $2}' || echo "Unknown")
echo -e "''${GREEN}''${NC} Next: ''${next_readable}"
else
echo -e "''${GREEN}''${NC} Active"
fi
else
echo -e "''${RED}''${NC} Inactive"
fi
}
# Main script
${optionalString cfg.clearScreen "clear"}
print_header
# Check if global config exists
CONFIG_FILE="/etc/homelab/config.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "''${RED} Global homelab configuration not found at $CONFIG_FILE''${NC}"
exit 1
fi
# Parse global configuration
HOSTNAME=$(${pkgs.jq}/bin/jq -r '.hostname' "$CONFIG_FILE" 2>/dev/null || hostname)
DOMAIN=$(${pkgs.jq}/bin/jq -r '.domain' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
ENVIRONMENT=$(${pkgs.jq}/bin/jq -r '.environment' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
LOCATION=$(${pkgs.jq}/bin/jq -r '.location' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
TAGS=$(${pkgs.jq}/bin/jq -r '.tags[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ' || echo "none")
print_section "SYSTEM INFO"
echo -e " ''${BOLD}Hostname:''${NC} $HOSTNAME"
echo -e " ''${BOLD}Domain:''${NC} $DOMAIN"
echo -e " ''${BOLD}Environment:''${NC} $ENVIRONMENT"
echo -e " ''${BOLD}Location:''${NC} $LOCATION"
echo -e " ''${BOLD}Tags:''${NC} ''${TAGS:-none}"
echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p)"
echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)"
${optionalString cfg.showServices ''
# Enabled services from homelab config
print_section "HOMELAB SERVICES"
${concatStringsSep "\n" (mapAttrsToList (name: service: ''
status=$(get_service_status "${service.systemdService}")
printf " %-25s %s\n" "${name}" "$status"
'')
cfg.services)}
''}
${optionalString cfg.showMonitoring ''
# Monitoring endpoints
print_section "MONITORING ENDPOINTS"
ENDPOINTS=$(${pkgs.jq}/bin/jq -c '.monitoring.endpoints[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$ENDPOINTS" ]]; then
while IFS= read -r endpoint; do
name=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.name')
port=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.port')
path=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.path')
job=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.jobName')
# Check if port is accessible
if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then
status="''${GREEN}''${NC}"
else
status="''${RED}''${NC}"
fi
printf " %-20s %s %s:%s%s (job: %s)\n" "$name" "$status" "$HOSTNAME" "$port" "$path" "$job"
done <<< "$ENDPOINTS"
else
echo -e " ''${YELLOW}No monitoring endpoints configured''${NC}"
fi
''}
${optionalString cfg.showBackups ''
# Backup jobs status
print_section "BACKUP JOBS"
BACKUP_JOBS=$(${pkgs.jq}/bin/jq -c '.backups.jobs[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$BACKUP_JOBS" ]]; then
while IFS= read -r job; do
name=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.name')
backend=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.backend')
schedule=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.schedule')
service_name="backup-''${name}"
timer_name="''${service_name}.timer"
timer_status=$(get_timer_status "$timer_name")
# Get last backup info
last_run="Unknown"
if ${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | grep -q "^[^n]"; then
last_run=$(${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | head -1)
if [[ "$last_run" != "n/a" && -n "$last_run" ]]; then
last_run=$(${pkgs.coreutils}/bin/date -d "$last_run" "+%Y-%m-%d %H:%M" 2>/dev/null || echo "Unknown")
fi
fi
printf " %-20s %s (%s, %s) Last: %s\n" "$name" "$timer_status" "$backend" "$schedule" "$last_run"
done <<< "$BACKUP_JOBS"
# Show backup-status command output if available
if command -v backup-status >/dev/null 2>&1; then
echo -e "\n ''${BOLD}Quick Status:''${NC}"
backup-status 2>/dev/null | tail -n +3 | head -10 | sed 's/^/ /'
fi
else
echo -e " ''${YELLOW}No backup jobs configured''${NC}"
fi
''}
${optionalString cfg.showReverseProxy ''
# Reverse proxy entries
print_section "REVERSE PROXY ENTRIES"
PROXY_ENTRIES=$(${pkgs.jq}/bin/jq -c '.reverseProxy.entries[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$PROXY_ENTRIES" ]]; then
while IFS= read -r entry; do
subdomain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.subdomain')
port=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.port')
domain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.domain')
auth=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableAuth')
ssl=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableSSL')
# Check if service is running on the port
if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then
status="''${GREEN}''${NC}"
else
status="''${RED}''${NC}"
fi
auth_indicator=""
[[ "$auth" == "true" ]] && auth_indicator=" 🔐"
ssl_indicator=""
[[ "$ssl" == "true" ]] && ssl_indicator=" 🔒"
printf " %-25s %s :%s %s%s%s\n" "''${domain}" "$status" "$port" "$domain" "$auth_indicator" "$ssl_indicator"
done <<< "$PROXY_ENTRIES"
else
echo -e " ''${YELLOW}No reverse proxy entries configured''${NC}"
fi
''}
${optionalString cfg.showResources ''
# Resource usage
print_section "RESOURCE USAGE"
echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s (%.1f%%)", $3, $2, ($3/$2)*100}')"
echo -e " ''${BOLD}Disk (root):''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')"
echo -e " ''${BOLD}CPU Usage:''${NC} $(${pkgs.procps}/bin/top -bn1 | grep "Cpu(s)" | awk '{printf "%.1f%%", $2+$4}' | sed 's/%us,//')%"
''}
${optionalString cfg.showRecentIssues ''
# Recent logs (errors only)
print_section "RECENT ISSUES"
error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | wc -l)
if [[ "$error_count" -gt 0 ]]; then
echo -e " ''${RED} $error_count errors in last 24h''${NC}"
${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | tail -3 | sed 's/^/ /'
else
echo -e " ''${GREEN} No critical errors in last 24h''${NC}"
fi
''}
echo -e "\n''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${WHITE}Run 'backup-status' for detailed backup info ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${WHITE}Config: /etc/homelab/config.json ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
echo
'';
in {
options.homelab.motd = {
enable = mkEnableOption "Dynamic homelab MOTD";
clearScreen = mkOption {
type = types.bool;
default = true;
description = "Clear screen before showing MOTD";
};
showServices = mkOption {
type = types.bool;
default = true;
description = "Show enabled homelab services";
};
showMonitoring = mkOption {
type = types.bool;
default = true;
description = "Show monitoring endpoints";
};
showBackups = mkOption {
type = types.bool;
default = true;
description = "Show backup jobs status";
};
showReverseProxy = mkOption {
type = types.bool;
default = true;
description = "Show reverse proxy entries";
};
showResources = mkOption {
type = types.bool;
default = true;
description = "Show system resource usage";
};
showRecentIssues = mkOption {
type = types.bool;
default = true;
description = "Show recent system issues";
};
services = mkOption {
type = types.attrsOf (types.submodule {
options = {
systemdService = mkOption {
type = types.str;
description = "Name of the systemd service to monitor";
};
description = mkOption {
type = types.str;
default = "";
description = "Human-readable description of the service";
};
};
});
default = {};
description = "Homelab services to monitor in MOTD";
};
};
config = mkIf (cfg.enable && globalCfg.enable) {
# Register services with MOTD
homelab.motd.services =
mapAttrs (name: service: {
systemdService = name;
description = service.description;
})
enabledServices;
# Create a command to manually run the MOTD
environment.systemPackages = with pkgs; [
jq
netcat
homelab-motd
];
};
}

View file

@ -1,4 +0,0 @@
{
jellyfin = import ./jellyfin.nix;
grafana = import ./grafana.nix;
}

View file

@ -1 +0,0 @@

View file

@ -1,72 +0,0 @@
# modules/services/grafana.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.grafana;
helpers = import ../lib/helpers.nix {inherit lib;};
in {
options.services.grafana = {
enable = mkEnableOption "Grafana monitoring dashboard";
port = mkOption {
type = types.port;
default = 3000;
description = "Grafana web interface port";
};
adminPassword = mkOption {
type = types.str;
description = "Admin password for Grafana";
};
};
config = mkIf cfg.enable {
services.grafana = {
enable = true;
settings = {
server = {
http_port = cfg.port;
domain = "${config.homelab.global.hostname}.${config.homelab.global.domain}";
};
security = {
admin_password = cfg.adminPassword;
};
};
};
homelab.global = {
backups.jobs = [
{
name = "grafana-data";
backend = "restic";
paths = ["/var/lib/grafana"];
schedule = "daily";
excludePatterns = ["*/plugins/*" "*/png/*"];
}
];
reverseProxy.entries = [
{
subdomain = "grafana";
port = cfg.port;
enableAuth = false; # Grafana handles its own auth
}
];
monitoring.endpoints = [
{
name = "grafana";
port = cfg.port;
path = "/metrics";
jobName = "grafana";
labels = {
service = "grafana";
type = "monitoring";
};
}
];
};
};
}

View file

@ -1,125 +0,0 @@
# modules/services/jellyfin.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.jellyfin;
in {
options.services.jellyfin = {
enable = mkEnableOption "Jellyfin media server";
port = mkOption {
type = types.port;
default = 8096;
description = "Port for Jellyfin web interface";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/jellyfin";
description = "Directory to store Jellyfin data";
};
mediaDir = mkOption {
type = types.str;
default = "/media";
description = "Directory containing media files";
};
enableMetrics = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus metrics";
};
exposeWeb = mkOption {
type = types.bool;
default = true;
description = "Expose web interface through reverse proxy";
};
};
config = mkIf cfg.enable {
# Enable the service
services.jellyfin = {
enable = true;
dataDir = cfg.dataDir;
};
# Configure global settings
homelab.global = {
# Add backup job for Jellyfin data
backups.jobs = [
{
name = "jellyfin-config";
backend = "restic";
paths = ["${cfg.dataDir}/config" "${cfg.dataDir}/data"];
schedule = "0 2 * * *"; # Daily at 2 AM
excludePatterns = [
"*/cache/*"
"*/transcodes/*"
"*/logs/*"
];
preHook = ''
# Stop jellyfin for consistent backup
systemctl stop jellyfin
'';
postHook = ''
# Restart jellyfin after backup
systemctl start jellyfin
'';
}
{
name = "jellyfin-media";
backend = "restic";
paths = [cfg.mediaDir];
schedule = "0 3 * * 0"; # Weekly on Sunday at 3 AM
excludePatterns = [
"*.tmp"
"*/.@__thumb/*" # Synology thumbnails
];
}
];
# Add reverse proxy entry if enabled
reverseProxy.entries = mkIf cfg.exposeWeb [
{
subdomain = "jellyfin";
port = cfg.port;
enableAuth = false; # Jellyfin has its own auth
websockets = true;
customHeaders = {
"X-Forwarded-Proto" = "$scheme";
"X-Forwarded-Host" = "$host";
};
}
];
# Add monitoring endpoint if metrics enabled
monitoring.endpoints = mkIf cfg.enableMetrics [
{
name = "jellyfin";
port = cfg.port;
path = "/metrics"; # Assuming you have a metrics plugin
jobName = "jellyfin";
scrapeInterval = "60s";
labels = {
service = "jellyfin";
type = "media-server";
};
}
];
};
# Open firewall
networking.firewall.allowedTCPPorts = [cfg.port];
# Create media directory
systemd.tmpfiles.rules = [
"d ${cfg.mediaDir} 0755 jellyfin jellyfin -"
];
};
}

View file

@ -1,208 +0,0 @@
# modules/services/prometheus.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.prometheus;
globalCfg = config.homelab.global;
in {
options.homelab.services.prometheus = {
enable = mkEnableOption "Prometheus monitoring server";
port = mkOption {
type = types.port;
default = 9090;
description = "Prometheus server port";
};
webExternalUrl = mkOption {
type = types.str;
default = "http://${globalCfg.hostname}:${toString cfg.port}";
description = "External URL for Prometheus";
};
retention = mkOption {
type = types.str;
default = "30d";
description = "Data retention period";
};
scrapeConfigs = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional scrape configurations";
};
alertmanager = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Alertmanager integration";
};
url = mkOption {
type = types.str;
default = "http://localhost:9093";
description = "Alertmanager URL";
};
};
};
config = mkIf cfg.enable {
# Register service with global homelab config
homelab.global.services.prometheus = {
enable = true;
description = "Metrics collection and monitoring server";
category = "monitoring";
ports = [cfg.port];
tags = ["metrics" "monitoring" "alerting"];
priority = 20;
dependencies = ["node-exporter"];
};
# Configure the actual Prometheus service
services.prometheus = {
enable = true;
port = cfg.port;
webExternalUrl = cfg.webExternalUrl;
retentionTime = cfg.retention;
scrapeConfigs =
[
# Auto-discover monitoring endpoints from global config
{
job_name = "homelab-auto";
static_configs = [
{
targets =
map (
endpoint: "${globalCfg.hostname}:${toString endpoint.port}"
)
globalCfg.monitoring.endpoints;
}
];
scrape_interval = "30s";
metrics_path = "/metrics";
}
]
++ cfg.scrapeConfigs;
# Alertmanager configuration
alertmanagers = mkIf cfg.alertmanager.enable [
{
static_configs = [
{
targets = [cfg.alertmanager.url];
}
];
}
];
rules = [
# Basic homelab alerting rules
(pkgs.writeText "homelab-alerts.yml" ''
groups:
- name: homelab
rules:
- alert: ServiceDown
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Service {{ $labels.instance }} is down"
description: "{{ $labels.job }} on {{ $labels.instance }} has been down for more than 5 minutes."
- alert: HighMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9
for: 10m
labels:
severity: warning
annotations:
summary: "High memory usage on {{ $labels.instance }}"
description: "Memory usage is above 90% on {{ $labels.instance }}"
- alert: HighDiskUsage
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85
for: 5m
labels:
severity: warning
annotations:
summary: "High disk usage on {{ $labels.instance }}"
description: "Disk usage is above 85% on {{ $labels.instance }} for filesystem {{ $labels.mountpoint }}"
'')
];
};
# Add monitoring endpoint to global config
homelab.global.monitoring.endpoints = [
{
name = "prometheus";
port = cfg.port;
path = "/metrics";
jobName = "prometheus";
scrapeInterval = "30s";
labels = {
service = "prometheus";
role = "monitoring";
};
}
];
# Add reverse proxy entry if configured
homelab.global.reverseProxy.entries = mkIf (globalCfg.domain != null) [
{
subdomain = "prometheus";
port = cfg.port;
path = "/";
enableAuth = true;
enableSSL = true;
customHeaders = {
"X-Frame-Options" = "DENY";
"X-Content-Type-Options" = "nosniff";
};
}
];
# Add backup job for Prometheus data
homelab.global.backups.jobs = [
{
name = "prometheus-data";
backend = "restic";
paths = ["/var/lib/prometheus2"];
schedule = "daily";
retention = {
daily = "7";
weekly = "4";
monthly = "3";
yearly = "1";
};
excludePatterns = [
"*.tmp"
"*/wal/*"
];
preHook = ''
# Stop prometheus temporarily for consistent backup
systemctl stop prometheus
'';
postHook = ''
# Restart prometheus after backup
systemctl start prometheus
'';
}
];
# Open firewall port
networking.firewall.allowedTCPPorts = [cfg.port];
# Create prometheus configuration directory
systemd.tmpfiles.rules = [
"d /var/lib/prometheus2 0755 prometheus prometheus -"
"d /etc/prometheus 0755 root root -"
];
};
}

View file

@ -1,4 +0,0 @@
{
# TODO
# https://github.com/L-Trump/nixos-configs/blob/ab3fb16e330b8a2904b9967e46af8c061b56266e/modules/nixos/server/backrest.nix#L7
}

View file

@ -1,95 +0,0 @@
# backups-option.nix
cfg: let
inherit (cfg.lib) mkOption types mkEnableOption attrNames;
in
mkOption {
type = types.attrsOf (
types.submodule (
{
name,
config,
...
} @ args: {
options = {
backend = mkOption {
type = types.enum (attrNames cfg.backends);
description = "The backup backend to use";
};
paths = mkOption {
type = types.listOf types.str;
default = [];
description = "Paths to backup";
};
enable = mkOption {
type = types.bool;
default = true;
description = "Whether to enable this backup job";
};
timerConfig = mkOption {
type = with types; nullOr attrs;
default = null;
example = {
OnCalendar = "00:05";
Persistent = true;
RandomizedDelaySec = "5h";
};
description = ''
When to run the backup. If null, inherits from backend's default timerConfig.
Set to null to disable automatic scheduling.
'';
};
backendOptions = mkOption {
type = let
backupConfig = config;
backupName = name;
in
types.submodule (
{config, ...} @ args'':
cfg.backends.${args.config.backend} (args'' // {inherit backupConfig backupName;})
);
default = {};
description = "Backend-specific options";
};
preBackupScript = mkOption {
type = types.lines;
default = "";
description = "Script to run before backing up";
};
postBackupScript = mkOption {
type = types.lines;
default = "";
description = ''
Script to run after backing up. Runs even if the backup fails.
'';
};
notifications = {
failure = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable failure notifications";
};
};
success = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable success notifications";
};
};
};
};
}
)
);
default = {};
description = "Backup job definitions";
}

View file

@ -1,6 +0,0 @@
{
imports = [
./root.nix
./restic.nix
];
}

View file

@ -1,234 +0,0 @@
# restic.nix - Restic backend implementation
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.system.backups;
resticCfg = cfg.restic;
# Get only restic backups that are enabled
resticBackups = filterAttrs (_: backup: backup.backend == "restic" && backup.enable) cfg.backups;
# Create restic service configurations
createResticServices =
mapAttrs (
name: backup: let
# Merge global defaults with backup-specific options
serviceConfig =
recursiveUpdate resticCfg.defaultBackendOptions backup.backendOptions
// {
inherit (backup) paths;
# Use backup-specific timer or fall back to global default
timerConfig =
if backup.timerConfig != null
then backup.timerConfig
else resticCfg.timerConfig;
};
in
serviceConfig
)
resticBackups;
in {
options.system.backups.restic = {
enable = mkEnableOption "restic backup backend";
timerConfig = mkOption {
type = types.attrs;
default = {
OnCalendar = "*-*-* 05:00:00";
Persistent = true;
};
description = "Default systemd timer configuration for restic backups";
};
defaultBackendOptions = mkOption {
type = types.attrs;
default = {};
example = {
repository = "/backup/restic";
passwordFile = "/etc/nixos/secrets/restic-password";
initialize = true;
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 75"
];
};
description = "Default backend options applied to all restic backup jobs";
};
# Advanced options
runMaintenance = mkOption {
type = types.bool;
default = true;
description = "Whether to run repository maintenance after backups";
};
maintenanceTimer = mkOption {
type = types.attrs;
default = {
OnCalendar = "*-*-* 06:00:00";
Persistent = true;
};
description = "Timer configuration for maintenance tasks";
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 6"
"--keep-yearly 3"
];
description = "Default pruning options for maintenance";
};
};
config = mkIf resticCfg.enable {
# Register restic backend
system.backups.backends.restic = {
backupConfig,
backupName,
...
}: {
# Define the proper options schema for restic backendOptions
options = {
repository = mkOption {
type = types.str;
description = "Restic repository path or URL";
};
passwordFile = mkOption {
type = types.str;
description = "Path to file containing the repository password";
};
initialize = mkOption {
type = types.bool;
default = true;
description = "Whether to initialize the repository if it doesn't exist";
};
exclude = mkOption {
type = types.listOf types.str;
default = [];
description = "Patterns to exclude from backup";
};
extraBackupArgs = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional arguments passed to restic backup command";
};
user = mkOption {
type = types.str;
default = "root";
description = "User to run the backup as";
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = resticCfg.pruneOpts;
description = "Pruning options for this backup";
};
};
# Default config merged with global defaults
config = {
extraBackupArgs =
[
"--tag ${backupName}"
"--verbose"
]
++ (resticCfg.defaultBackendOptions.extraBackupArgs or []);
};
};
# Create actual restic backup services
services.restic.backups = createResticServices;
# Add restic package
environment.systemPackages = [pkgs.restic];
# Systemd service customizations for restic backups
systemd.services =
(mapAttrs' (
name: backup:
nameValuePair "restic-backups-${name}" {
# Custom pre/post scripts
preStart = mkBefore backup.preBackupScript;
postStop = mkAfter backup.postBackupScript;
# Enhanced service configuration
serviceConfig = {
# Restart configuration
Restart = "on-failure";
RestartSec = "5m";
RestartMaxDelaySec = "30m";
RestartSteps = 3;
# Rate limiting
StartLimitBurst = 4;
StartLimitIntervalSec = "2h";
};
# Failure handling could be extended here for notifications
# onFailure = optional backup.notifications.failure.enable "restic-backup-${name}-failure-notify.service";
}
)
resticBackups)
// optionalAttrs resticCfg.runMaintenance {
# Repository maintenance service
restic-maintenance = {
description = "Restic repository maintenance";
after = map (name: "restic-backups-${name}.service") (attrNames resticBackups);
environment =
resticCfg.defaultBackendOptions
// {
RESTIC_CACHE_DIR = "/var/cache/restic-maintenance";
};
serviceConfig = {
Type = "oneshot";
ExecStart = [
"${pkgs.restic}/bin/restic forget --prune ${concatStringsSep " " resticCfg.pruneOpts}"
"${pkgs.restic}/bin/restic check --read-data-subset=500M"
];
User = "root";
CacheDirectory = "restic-maintenance";
CacheDirectoryMode = "0700";
};
};
};
# Maintenance timer
systemd.timers = mkIf resticCfg.runMaintenance {
restic-maintenance = {
description = "Timer for restic repository maintenance";
wantedBy = ["timers.target"];
timerConfig = resticCfg.maintenanceTimer;
};
};
# Helpful shell aliases
programs.zsh.shellAliases =
{
restic-snapshots = "restic snapshots --compact --group-by tags";
restic-repo-size = "restic stats --mode raw-data";
}
// (mapAttrs' (
name: _:
nameValuePair "backup-${name}" "systemctl start restic-backups-${name}"
)
resticBackups);
};
}

View file

@ -1,66 +0,0 @@
# root.nix - Main backup system module
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.system.backups;
# Filter backups by backend
getBackupsByBackend = backend:
filterAttrs (_: backup: backup.backend == backend && backup.enable) cfg.backups;
in {
options.system.backups = {
# Backend registration system - backends register themselves here
backends = mkOption {
type = with types; attrsOf (functionTo attrs);
internal = true;
default = {};
description = ''
Attribute set of backends where the value is a function that accepts
backend-specific arguments and returns an attribute set for the backend's options.
'';
};
# Import the backups option from separate file, passing cfg for backend inference
backups = import ./backups-option.nix cfg;
# Pass lib to the backups-option for access to mkOption, types, etc.
lib = mkOption {
type = types.attrs;
internal = true;
default = lib;
};
};
config = {
# Re-export backups at root level for convenience
# backups = cfg.backups;
# Common backup packages
environment.systemPackages = with pkgs; [
# Add common backup utilities here
];
# Common systemd service modifications for all backup services
systemd.services = let
allBackupServices = flatten (
mapAttrsToList (
backendName: backups:
mapAttrsToList (name: backup: "${backendName}-backups-${name}") backups
) (genAttrs (attrNames cfg.backends) (backend: getBackupsByBackend backend))
);
in
genAttrs allBackupServices (serviceName: {
serviceConfig = {
# Common hardening for all backup services
ProtectSystem = "strict";
ProtectHome = "read-only";
PrivateTmp = true;
NoNewPrivileges = true;
};
});
};
}

View file

@ -3,7 +3,7 @@ nixos-rebuild switch --flake .#proxmox --target-host root@192.168.1.205 --verbos
nixos-rebuild switch --flake .#sandbox --target-host root@sandbox.lab --verbose nixos-rebuild switch --flake .#sandbox --target-host root@sandbox.lab --verbose
nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose
nixos-rebuild switch --flake .#forgejo --target-host root@forgejo.lab --verbose nixos-rebuild switch --flake .#forgejo --target-host root@forgejo.lab --verbose
nixos-rebuild switch --flake .#dns --target-host root@dns.lab --verbose nixos-rebuild switch --flake .#dns --target-host root@192.168.1.140 --verbose
nixos-rebuild switch --flake .#keycloak --target-host root@keycloak.lab --verbose nixos-rebuild switch --flake .#keycloak --target-host root@keycloak.lab --verbose
nixos-rebuild switch --flake .#mail --target-host root@mail.lab --verbose nixos-rebuild switch --flake .#mail --target-host root@mail.lab --verbose
nixos-rebuild switch --flake .#media --target-host root@media.lab --verbose nixos-rebuild switch --flake .#media --target-host root@media.lab --verbose

View file

@ -0,0 +1,17 @@
🥇 Phase 1: Git + Secrets
✅ Set up Forgejo VM (NixOS declarative)
✅ Set up sops-nix + age keys (can live in the Git repo)
✅ Push flake + ansible + secrets to Forgejo
✅ Write a basic README with how to rebuild infra
🥈 Phase 2: GitOps
🔁 Add CI runner VM
🔁 Configure runner to deploy (nixos-rebuild or ansible-playbook) on commit
🔁 Optional: add webhooks to auto-trigger via Forgejo

View file

@ -18,7 +18,7 @@ in {
stateDir = "/srv/forgejo"; stateDir = "/srv/forgejo";
secrets = { secrets = {
mailer = { mailer = {
PASSWD = config.sops.secrets.forgejo-mailer-password.path; PASSWD = ;
}; };
}; };
settings = { settings = {
@ -76,12 +76,12 @@ in {
ALLOW_DEACTIVATE_ALL = false; ALLOW_DEACTIVATE_ALL = false;
}; };
# oauth2 = { oauth2 = {
# }; };
# oauth2_client = { oauth2_client = {
# ENABLE_AUTO_REGISTRATION = true; ENABLE_AUTO_REGISTRATION = true;
# UPDATE_AVATAR = true; UPDATE_AVATAR = true;
# }; };
# log = { # log = {
# ROOT_PATH = "/var/log/forgejo"; # ROOT_PATH = "/var/log/forgejo";
# MODE = "file"; # MODE = "file";

View file

@ -1,6 +1,7 @@
let let
forgejoSops = ../../secrets/forgejo/secrets.yml; forgejoSops = ../../secrets/forgejo/secrets.yml;
in { in
{
sops.secrets = { sops.secrets = {
"forgejo-admin-password" = { "forgejo-admin-password" = {
sopsFile = forgejoSops; sopsFile = forgejoSops;
@ -14,9 +15,5 @@ in {
sopsFile = forgejoSops; sopsFile = forgejoSops;
owner = "forgejo"; owner = "forgejo";
}; };
"forgejo-mailer-password" = {
sopsFile = forgejoSops;
owner = "forgejo";
};
}; };
} }

View file

@ -15,13 +15,6 @@
middlewares = []; middlewares = [];
}; };
roundcube = {
rule = "Host(`roundcube.procopius.dk`)";
service = "roundcube";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
forgejo = { forgejo = {
rule = "Host(`git.procopius.dk`)"; rule = "Host(`git.procopius.dk`)";
service = "forgejo"; service = "forgejo";
@ -41,4 +34,10 @@
entryPoints = ["websecure"]; entryPoints = ["websecure"];
tls.certResolver = "letsencrypt"; tls.certResolver = "letsencrypt";
}; };
catchAll = {
rule = "HostRegexp(`.+`)";
service = "nginx";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
} }

View file

@ -2,11 +2,12 @@
traefik.loadBalancer.servers = [{url = "http://localhost:8080";}]; traefik.loadBalancer.servers = [{url = "http://localhost:8080";}];
mail-acme.loadBalancer.servers = [{url = "http://mail.lab:80";}]; mail-acme.loadBalancer.servers = [{url = "http://mail.lab:80";}];
roundcube.loadBalancer.servers = [{url = "http://mail.lab:80";}];
forgejo.loadBalancer.servers = [{url = "http://forgejo.lab:3000";}]; forgejo.loadBalancer.servers = [{url = "http://forgejo.lab:3000";}];
proxmox.loadBalancer.servers = [{url = "https://192.168.1.205:8006";}]; proxmox.loadBalancer.servers = [{url = "https://192.168.1.205:8006";}];
proxmox.loadBalancer.serversTransport = "insecureTransport"; proxmox.loadBalancer.serversTransport = "insecureTransport";
nas.loadBalancer.servers = [{url = "https://192.168.1.226:5001";}]; nas.loadBalancer.servers = [{url = "https://192.168.1.226:5001";}];
nas.loadBalancer.serversTransport = "insecureTransport"; nas.loadBalancer.serversTransport = "insecureTransport";
nginx.loadBalancer.servers = [{url = "https://192.168.1.226:4433";}];
nginx.loadBalancer.serversTransport = "insecureTransport";
} }

View file

@ -32,52 +32,4 @@
entryPoints = ["websecure"]; entryPoints = ["websecure"];
tls.certResolver = "letsencrypt"; tls.certResolver = "letsencrypt";
}; };
ente-minio = {
rule = "Host(`ente-minio.procopius.dk`)";
service = "ente-minio";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-minio-api = {
rule = "Host(`ente-minio-api.procopius.dk`)";
service = "ente-minio-api";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-museum = {
rule = "Host(`ente-museum.procopius.dk`)";
service = "ente-museum";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-photos = {
rule = "Host(`ente-photos.procopius.dk`) || Host(`ente-albums.procopius.dk`)";
service = "ente-photos";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-cast = {
rule = "Host(`ente-cast.procopius.dk`) ";
service = "ente-cast";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-accounts = {
rule = "Host(`ente-accounts.procopius.dk`) ";
service = "ente-accounts";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
ente-auth = {
rule = "Host(`ente-auth.procopius.dk`) ";
service = "ente-auth";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
} }

View file

@ -4,12 +4,4 @@
account.loadBalancer.servers = [{url = "http://192.168.1.226:3001";}]; account.loadBalancer.servers = [{url = "http://192.168.1.226:3001";}];
minio.loadBalancer.servers = [{url = "http://192.168.1.226:3201";}]; minio.loadBalancer.servers = [{url = "http://192.168.1.226:3201";}];
minio-api.loadBalancer.servers = [{url = "http://192.168.1.226:3200";}]; minio-api.loadBalancer.servers = [{url = "http://192.168.1.226:3200";}];
ente-minio.loadBalancer.servers = [{url = "http://photos.lab:9001";}];
ente-minio-api.loadBalancer.servers = [{url = "http://photos.lab:9000";}];
ente-museum.loadBalancer.servers = [{url = "http://photos.lab:8080";}];
ente-photos.loadBalancer.servers = [{url = "http://photos.lab:3000";}];
ente-accounts.loadBalancer.servers = [{url = "http://photos.lab:3001";}];
ente-cast.loadBalancer.servers = [{url = "http://photos.lab:3004";}];
ente-auth.loadBalancer.servers = [{url = "http://photos.lab:3003";}];
} }

127
nixos/hosts/vpn/README.md Normal file
View file

@ -0,0 +1,127 @@
Great question — and you're absolutely right to ask.
You **dont need** Innernet or Headscale+patch *if* you're comfortable building a small self-hosted portal that handles:
* ✅ OIDC login
* ✅ WireGuard peer key management
* ✅ Config generation and download
So lets break it down:
---
## 🧩 Why *Innernet* and *Headscale+patch* Exist
Those tools solve **user/device coordination**, dynamic routing, and access control *automatically*, **with batteries included**. They're great if you want a more managed, “plug-and-play” experience. Specifically:
| Tool | Purpose |
| ------------------------ | ----------------------------------------------------------------- |
| **Innernet** | Full WireGuard-based mesh + OIDC login + per-device management |
| **Headscale** | Tailscale backend, enables user login + ACL + device coordination |
| **Headscale+OIDC patch** | Add OIDC login to Headscale (by default, only pre-shared keys) |
These tools do things like:
* Assign IPs/DNS names to users
* Regenerate keys
* Handle coordination between peers
* Gate access with identity
---
## 🛠️ Why You *Dont* Need Them (Your Custom Portal Idea)
If you're fine with:
* Managing WireGuard server and IPs manually
* Writing a small web service to issue `.conf` files after login
* Using firewall rules for access control
Then you absolutely **can** build a custom system around **WireGuard + OIDC** like this:
---
### 🧱 Architecture: WireGuard + OIDC + Config Generator
```
┌──────────────┐
│ Your OIDC │
│ provider │◄────── Google, Authentik, Keycloak
└─────┬────────┘
(OIDC Login)
┌────────▼────────┐
│ Web Portal │◄────── nginx + Go, Node.js, Flask, etc.
│ (self-hosted) │
└──┬────────────┬─┘
Gen Keys │ │ Serve config
& register │ └─────> `client.conf` / QR code
┌──────────────────────┐
│ WireGuard server │ (NixOS)
│ - Handles all peers │
└──────────────────────┘
```
---
### 🔐 What Your Portal Needs to Do
* Authenticate via OIDC (OAuth2 flow)
* Store a list of "authorized users"
* When a user logs in:
* Check if they already have a WireGuard peer entry
* If not: generate a keypair, assign IP (e.g. `10.100.0.5`)
* Append to server config (`/etc/wireguard/wg0.conf`) or use `wg set`
* Return config file or QR code for the mobile client
---
## ✅ Benefits of This DIY Approach
* 💡 Full control: you own every step
* 🔐 OIDC is the gatekeeper (centrally managed access)
* ⚡ No extra daemon/software overhead (just WireGuard + your app)
* 🧩 Easily extensible (e.g., device limits, IP lease expiration)
---
## ⚙️ Tools You Can Use
| Purpose | Tools |
| ---------------------- | -------------------------------------------------------------------------- |
| OIDC Auth | [Dex](https://github.com/dexidp/dex), Keycloak, Authentik, or Firebase |
| Web Portal Backend | Node.js (Express), Go, Flask, FastAPI |
| Config Template Engine | Go templates, EJS, Jinja2 |
| WireGuard Integration | `wg`, `wg-quick`, or directly edit `wg0.conf` |
| NixOS Support | Use `networking.wireguard.interfaces.wg0.peers` to declaratively add peers |
---
## 🧪 Bonus Features You Could Build
* 🔐 Per-user firewall rules
* 🔁 One-time config URLs (`vpn.mydomain.com/invite/xyz`)
* 🔐 Optional Authelia/Keycloak MFA
* 📉 Dashboard with currently active peers (use `wg show`)
---
## ✨ Summary
| Option | You Need Innernet / Headscale? | Login via OIDC? | DNS Routing? | Complexity |
| ----------------- | ------------------------------ | --------------- | ------------ | ---------- |
| Your own portal | ❌ No | ✅ Yes | ❌ Manual | 🟡 Medium |
| Innernet | ✅ Yes | ✅ Yes | ✅ Built-in | 🟠 Medium |
| Headscale + patch | ✅ Yes | ✅ Yes | ✅ Built-in | 🔴 High |
Youre on a great path by choosing the custom VPN portal idea.
Let me know if you'd like:
* A **sample architecture repo**
* A **NixOS module to support peer configs**
* Help building the **login + config generator backend**
I can generate a Nix flake and a working OIDC portal template to kickstart the project.

View file

@ -0,0 +1,14 @@
{
virtualisation.oci-containers.containers = {
warpgate = {
image = "ghcr.io/warp-tech/warpgate";
ports = [
"2222:2222"
"8888:8888"
];
volumes = [
"/srv/warpgate/data:/data"
];
};
};
}

View file

@ -1,7 +1,6 @@
forgejo-admin-password: ENC[AES256_GCM,data:S05b/J9AK2SuIKDSWmtRf72C7V5FwMgZv/o5yxzNXRZEH2eIm18sC6+FEg==,iv:Ig/c4K9Io0S07Ywl4JQtbfxhjXJ7Rvea7+N4KhLUqjc=,tag:rx44tRuAbERBZR45QN6b9A==,type:str] forgejo-admin-password: ENC[AES256_GCM,data:S05b/J9AK2SuIKDSWmtRf72C7V5FwMgZv/o5yxzNXRZEH2eIm18sC6+FEg==,iv:Ig/c4K9Io0S07Ywl4JQtbfxhjXJ7Rvea7+N4KhLUqjc=,tag:rx44tRuAbERBZR45QN6b9A==,type:str]
forgejo-db-password: ENC[AES256_GCM,data:5YwRl6HNa1LzJgr73ArllG9s+vWCS7m/s6QQh5YUz8I0anG7GQ==,iv:5ARq3unUy2xbDcAFkucvEhjz/QYC2rYgutEo4T2bw2E=,tag:k7eHKqeA7k6XzksLVcnXRw==,type:str] forgejo-db-password: ENC[AES256_GCM,data:5YwRl6HNa1LzJgr73ArllG9s+vWCS7m/s6QQh5YUz8I0anG7GQ==,iv:5ARq3unUy2xbDcAFkucvEhjz/QYC2rYgutEo4T2bw2E=,tag:k7eHKqeA7k6XzksLVcnXRw==,type:str]
forgejo-secret-key: ENC[AES256_GCM,data:iserDzOnJkM4HLP4c6rekSFANtRmEXwuCPyfMqo=,iv:3CNqN/DyS4PIl/iOO4JCpWJn3ARlb5KQSCNv5Orx2mo=,tag:q34jEpGrK2EKf0bcBznpQQ==,type:str] forgejo-secret-key: ENC[AES256_GCM,data:iserDzOnJkM4HLP4c6rekSFANtRmEXwuCPyfMqo=,iv:3CNqN/DyS4PIl/iOO4JCpWJn3ARlb5KQSCNv5Orx2mo=,tag:q34jEpGrK2EKf0bcBznpQQ==,type:str]
forgejo-mailer-password: ENC[AES256_GCM,data:6mX8wB7RkiCj/43G4vttusOPogUifKua3Ozgch8ewz8=,iv:BxFIto7L0A8YhhmiRYwUFDy8PeXaghE2j9SQbZ1GaZQ=,tag:gB6/9lUrz0HeQUl536Vp4A==,type:str]
sops: sops:
age: age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
@ -13,7 +12,7 @@ sops:
LzBHRWZXODVDZTE2WnVZOGNQckk4KzAKdm3xnA03JnQnc07yhVVtYkVYS6654Zm1 LzBHRWZXODVDZTE2WnVZOGNQckk4KzAKdm3xnA03JnQnc07yhVVtYkVYS6654Zm1
4AcLRSCcWvWrvp26XYVE2UGqU7acfxrTsk07o0nHAQpa5LjgJ4oFKw== 4AcLRSCcWvWrvp26XYVE2UGqU7acfxrTsk07o0nHAQpa5LjgJ4oFKw==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
lastmodified: "2025-07-25T10:22:17Z" lastmodified: "2025-06-06T18:38:08Z"
mac: ENC[AES256_GCM,data:JiqFsbC6rxk3Pmc0vqHwElfT3kXDLJwiBZS50xo/iyOgwyWbwf5sCNdn9CMFciDsDHfd8jRp8hYfdr7VaPFwc/Iec5cwHY23+lzat1hwOkmwEDdxW7pY4IVXZEWdBaeVrFInnvdLgJAOi+KecZ2BIx0iyMEQZUKs6exxSXB2/fE=,iv:LWv0XKSBPz35+pIur98+js3ETnFDOf6aEY67L2RGpHU=,tag:VzTG6zhHVHpbVDAc2266qQ==,type:str] mac: ENC[AES256_GCM,data:BvpIz6tfVSR3m1l7g4ilUyoTKKqirt+k6tPizxCsAgjztt0IyDCio+cLTln4P1tGSy/frjvbxy1mR3tIDkWn6aDFoYz/gnsbTKHSo/K5Q77jJ3uJffoB3/Wruigojl3EBIQHALicq9xhF8rsH/RKjpWqh+TrQwO+ibbA6ff76cw=,iv:Z0ZwJ9aPpI9MtbsZnvFkW7zsFFOMj5/Gv+tF/mal+yI=,tag:knf01NC/XwgjPUHH+8RpSg==,type:str]
unencrypted_suffix: _unencrypted unencrypted_suffix: _unencrypted
version: 3.10.2 version: 3.10.2

View file

@ -9,7 +9,7 @@
nix-update-script, nix-update-script,
extraBuildEnv ? {}, extraBuildEnv ? {},
# This package contains serveral sub-applications. This specifies which of them you want to build. # This package contains serveral sub-applications. This specifies which of them you want to build.
enteApp ? "auth", enteApp ? "photos",
# Accessing some apps (such as account) directly will result in a hardcoded redirect to ente.io. # Accessing some apps (such as account) directly will result in a hardcoded redirect to ente.io.
# To prevent users from accidentally logging in to ente.io instead of the selfhosted instance, you # To prevent users from accidentally logging in to ente.io instead of the selfhosted instance, you
# can set this parameter to override these occurrences with your own url. Must include the schema. # can set this parameter to override these occurrences with your own url. Must include the schema.
@ -18,7 +18,7 @@
}: }:
stdenv.mkDerivation (finalAttrs: { stdenv.mkDerivation (finalAttrs: {
pname = "ente-web-${enteApp}"; pname = "ente-web-${enteApp}";
version = "1.1.57"; version = "1.0.4";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "ente-io"; owner = "ente-io";
@ -26,13 +26,13 @@ stdenv.mkDerivation (finalAttrs: {
sparseCheckout = ["web"]; sparseCheckout = ["web"];
tag = "photos-v${finalAttrs.version}"; tag = "photos-v${finalAttrs.version}";
fetchSubmodules = true; fetchSubmodules = true;
hash = "sha256-SCkxGm/w0kES7wDuLBsUTgwrFYNLvLD51NyioAVTLrg="; # lib.fakeHash; hash = "sha256-M1kAZgqjbWNn6LqymtWRmAk/v0vWEGbyS50lVrsr85o=";
}; };
sourceRoot = "${finalAttrs.src.name}/web"; sourceRoot = "${finalAttrs.src.name}/web";
offlineCache = fetchYarnDeps { offlineCache = fetchYarnDeps {
yarnLock = "${finalAttrs.src}/web/yarn.lock"; yarnLock = "${finalAttrs.src}/web/yarn.lock";
hash = "sha256-FnLMXOpIVNOhaM7VjNEDlwpew9T/5Ch5eFed9tLpDsI="; hash = "sha256-EYhYwy6+7bgWckU/7SfL1PREWw9JUgKxWadSVtoZwXs=";
}; };
nativeBuildInputs = [ nativeBuildInputs = [

View file

@ -1,43 +0,0 @@
# profiles/proxmox-vm.nix - Proxmox VM specific profile
{
config,
lib,
modulesPath,
...
}: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
# Proxmox VM specific configuration
services.qemuGuest.enable = true;
# Boot configuration for Proxmox VMs
boot = {
loader.grub = {
enable = true;
devices = ["nodev"];
};
growPartition = true;
tmp.cleanOnBoot = true;
# Proxmox specific kernel modules
initrd.availableKernelModules = ["ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk"];
};
# Standard Proxmox VM filesystem
fileSystems."/" = lib.mkDefault {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
# Update global config with Proxmox-specific info
homelab = {
location = lib.mkDefault "proxmox-cluster";
tags = lib.mkDefault ["proxmox-vm" "homelab"];
};
# VM-specific optimizations
services.fstrim.enable = true;
}

View file

@ -1,18 +0,0 @@
{
nodes,
lib,
...
}: let
extractGlobal = name: node:
if node ? config.homelab.global
then {
${name} = {
hostname = node.config.homelab.global.hostname;
monitoring = map (e: "${e.name}:${toString e.port}") node.config.homelab.global.monitoring.endpoints;
backups = map (b: "${b.name}(${b.backend})") node.config.homelab.global.backups.jobs;
proxy = map (p: "${p.subdomain}.${node.config.homelab.global.domain}") node.config.homelab.global.reverseProxy.entries;
};
}
else {};
in
lib.foldl (acc: name: acc // (extractGlobal name nodes.${name})) {} (builtins.attrNames nodes)

View file

@ -1,115 +0,0 @@
# Helper script: scripts/deploy-homelab.sh
#!/bin/bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${GREEN}=== Homelab Deployment Script ===${NC}"
# Function to print colored output
log() {
echo -e "${GREEN}[INFO]${NC} $1"
}
warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if colmena is available
if ! command -v colmena &> /dev/null; then
error "colmena is not installed. Please install it first."
exit 1
fi
# Parse arguments
COMMAND=${1:-"deploy"}
TARGET=${2:-""}
case $COMMAND in
"deploy")
if [ -n "$TARGET" ]; then
log "Deploying to specific target: $TARGET"
colmena apply --on "$TARGET"
else
log "Deploying to all targets"
colmena apply
fi
;;
"build")
if [ -n "$TARGET" ]; then
log "Building specific target: $TARGET"
colmena build --on "$TARGET"
else
log "Building all targets"
colmena build
fi
;;
"status")
log "Checking deployment status"
colmena apply --dry-run
;;
"config")
log "Showing global configuration summary"
# Extract global configs from all nodes
colmena eval ./scripts/config.nix | jq .
;;
"backup-status")
log "Checking backup status across all nodes"
if [ -n "$TARGET" ]; then
colmena exec --on "$TARGET" -- backup-status
else
colmena exec -- backup-status
fi
;;
"monitoring")
log "Collecting monitoring endpoints"
nix eval --json .#colmena --apply 'colmena:
let
lib = (import <nixpkgs> {}).lib;
nodes = removeAttrs colmena ["meta"];
collectEndpoints = lib.flatten (
lib.mapAttrsToList (name: node:
if node ? config.homelab.global.monitoring.endpoints then
map (e: {
node = name;
hostname = node.config.homelab.global.hostname;
endpoint = "${e.name}:${toString e.port}${e.path}";
job = e.jobName;
}) node.config.homelab.global.monitoring.endpoints
else []
) nodes
);
in collectEndpoints
' | jq .
;;
"help")
echo "Usage: $0 [COMMAND] [TARGET]"
echo ""
echo "Commands:"
echo " deploy [TARGET] Deploy to all nodes or specific target"
echo " build [TARGET] Build configuration for all nodes or specific target"
echo " status Show deployment status (dry-run)"
echo " config Show global configuration summary"
echo " backup-status Check backup status on all nodes"
echo " monitoring List all monitoring endpoints"
echo " help Show this help message"
echo ""
echo "Examples:"
echo " $0 deploy media-server # Deploy only to media-server"
echo " $0 build # Build all configurations"
echo " $0 config # Show global config summary"
;;
*)
error "Unknown command: $COMMAND"
echo "Run '$0 help' for usage information"
exit 1
;;
esac

View file

@ -1,41 +0,0 @@
# scripts/generate-docs.sh
#!/bin/bash
echo "# Homelab Global Configuration Documentation"
echo
echo "This document describes the global configuration system for the NixOS homelab."
echo
echo "## Available Services"
echo
# List all service modules
find modules/nixos/services -name "*.nix" | while read -r file; do
service=$(basename "$file" .nix)
echo "### $service"
echo
# Extract description from the module
grep -m1 "mkEnableOption" "$file" | sed 's/.*mkEnableOption "\([^"]*\)".*/\1/' || echo "Service module for $service"
echo
done
echo "## Configuration Examples"
echo
echo "### Basic Media Server Setup"
echo '```nix'
echo 'media-server = { ... }: {'
echo ' homelab.global = {'
echo ' enable = true;'
echo ' hostname = "media-server";'
echo ' domain = "homelab.local";'
echo ' };'
echo ' services.jellyfin.enable = true;'
echo '};'
echo '```'
echo
echo "### Monitoring Configuration"
echo '```nix'
echo 'monitoring = { nodes, ... }: {'
echo ' services.prometheus.scrapeConfigs = collectMonitoringEndpoints nodes;'
echo '};'
echo '```'# modules/global-config.nix

View file

@ -1,79 +0,0 @@
# scripts/validate-config.nix
{
lib,
pkgs,
}: let
inherit (lib) types mkOption;
# Validation functions
validateBackupJob = job: let
errors =
[]
++ (
if job.paths == []
then ["Backup job '${job.name}' has no paths defined"]
else []
)
++ (
if !(builtins.elem job.backend ["restic" "borg" "rclone"])
then ["Invalid backup backend: ${job.backend}"]
else []
)
++ (
if job.schedule == ""
then ["Backup job '${job.name}' has no schedule defined"]
else []
);
in
errors;
validateMonitoringEndpoint = endpoint: let
errors =
[]
++ (
if endpoint.port < 1 || endpoint.port > 65535
then ["Invalid port ${toString endpoint.port} for endpoint '${endpoint.name}'"]
else []
)
++ (
if endpoint.jobName == ""
then ["Monitoring endpoint '${endpoint.name}' has no job name"]
else []
);
in
errors;
validateReverseProxyEntry = entry: let
errors =
[]
++ (
if entry.subdomain == ""
then ["Reverse proxy entry has no subdomain defined"]
else []
)
++ (
if entry.port < 1 || entry.port > 65535
then ["Invalid port ${toString entry.port} for subdomain '${entry.subdomain}'"]
else []
);
in
errors;
validateGlobalConfig = config: let
backupErrors = lib.flatten (map validateBackupJob config.backups.jobs);
monitoringErrors = lib.flatten (map validateMonitoringEndpoint config.monitoring.endpoints);
proxyErrors = lib.flatten (map validateReverseProxyEntry config.reverseProxy.entries);
allErrors = backupErrors ++ monitoringErrors ++ proxyErrors;
in
if allErrors == []
then {
valid = true;
errors = [];
}
else {
valid = false;
errors = allErrors;
};
in {
inherit validateGlobalConfig validateBackupJob validateMonitoringEndpoint validateReverseProxyEntry;
}

1
secrets/.gitignore vendored
View file

@ -1 +0,0 @@
*.key

View file

@ -1,42 +0,0 @@
{
config,
lib,
pkgs,
...
}: {
# SOPS configuration
sops = {
age.keyFile = "/run/keys/age.key";
defaultSopsFile = ./secrets.yaml;
# Define secrets that all systems need
secrets = {
# SSH keys
# "ssh/plasmagoat_private_key" = {
# owner = "plasmagoat";
# mode = "0600";
# path = "/home/plasmagoat/.ssh/id_rsa";
# };
# # Age key for the system
# "age/system_key" = {
# mode = "0600";
# path = "/run/keys/age.key";
# };
# # Backup credentials
# "backup/restic_password" = {
# path = "/etc/backup/restic-password";
# mode = "0600";
# };
};
};
# Deployment keys for colmena
deployment.keys = {
"age.key" = {
destDir = "/run/keys";
keyFile = "/home/plasmagoat/.config/age/age.key"; # Your local age key
};
};
}

View file

@ -25,18 +25,6 @@ service_accounts:
password: ENC[AES256_GCM,data:PpUHEhNfnR1eg7DmnO7tyNciNE4Tsx/Y4uL92gqiods=,iv:DNKQfymvgEu/iEW8t79m0ZmKTU0Ffasu+gp2KOIAK3o=,tag:lGKw5dbXqImDJNVX6p8kLg==,type:str] password: ENC[AES256_GCM,data:PpUHEhNfnR1eg7DmnO7tyNciNE4Tsx/Y4uL92gqiods=,iv:DNKQfymvgEu/iEW8t79m0ZmKTU0Ffasu+gp2KOIAK3o=,tag:lGKw5dbXqImDJNVX6p8kLg==,type:str]
mail: mail:
password: ENC[AES256_GCM,data:6lfziq1zXlFxCAFWv5co3MkBgwaWixjHHX9riQXCbe0=,iv:/t4CnW3bKUDxfpE/qGf1LPs0ciivRMkfgJ1nMseruy4=,tag:TWApzLsm2HV+JMaZLG/Kig==,type:str] password: ENC[AES256_GCM,data:6lfziq1zXlFxCAFWv5co3MkBgwaWixjHHX9riQXCbe0=,iv:/t4CnW3bKUDxfpE/qGf1LPs0ciivRMkfgJ1nMseruy4=,tag:TWApzLsm2HV+JMaZLG/Kig==,type:str]
ente:
password: ENC[AES256_GCM,data:bQxiCr9OgFU7oSGjkEO43iH9L2nikvvFQZsjGurtOFM=,iv:LIwzaZARQgiGdOLfpebJkKO0I71I3kX8mq8W1WC2lT4=,tag:VxK6oON6th9b1YhvC7cjjA==,type:str]
ente:
minio:
root_password: ENC[AES256_GCM,data:TIurrIEjWKdMYzIZY3dp00ert90=,iv:5kT06lUUlRC9J4DVwo7RDdxAM8zCJwwjWOF9YAZbbmk=,tag:qk7Cszn39kPijkr71ckxvg==,type:str]
root_user: ENC[AES256_GCM,data:wPj8SBzeohdG,iv:bSgCKGc+X+oofpYN0yV1aQNhAvWzcw9CTaK3FzUBKj4=,tag:ArUwveqBWXDRc5eSPZYa9g==,type:str]
cloudflare:
dns-api-token: ENC[AES256_GCM,data:/NroEdwOwqY30nwLLzO9RvEYPttDIw85A0M81fOPJSzEodtF95VCPA==,iv:BN5xZhSyvoZiXZk096KYpj59qns6hHg3PvhWC6c2sXo=,tag:3DaP3/p/JTM+beBRacGzSA==,type:str]
zone-api-token: ENC[AES256_GCM,data:FoMHKi5q+d97+pxUsyyNZAxGGgBRsZsNMePL5OeD3pcBIqtZP9MP5g==,iv:yRno4aJRlVyFTZVmat9tmFGFI1ghLw2vW+Py0+viFdE=,tag:pooTMSOsBZdbN/AE5J04MA==,type:str]
restic:
default-password: ENC[AES256_GCM,data:9gHH8V00XFveogOhVl0nLvq3olI=,iv:+wdSlZXnkTw1VKXesx3GMy5yz+kPf2FlYSPNXMB0Y0o=,tag:jHKQTfvm+G+L+Fb+3qP+rA==,type:str]
default-repository: ENC[AES256_GCM,data:znNTSZknMvL5ceINgt0iHA==,iv:taobWUuT4nKfzegk329dzFGIOdL03d6kw8JlgO1E78Y=,tag:kgM4551xZcaxzZw58AqBBQ==,type:str]
sops: sops:
age: age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
@ -48,7 +36,7 @@ sops:
QzNYRk5ERmR4aGtLQ3dwQ1lPeDZyaEkKJMLXqv6tBBql7VVnWDIwAh24SfQ2O6Ca QzNYRk5ERmR4aGtLQ3dwQ1lPeDZyaEkKJMLXqv6tBBql7VVnWDIwAh24SfQ2O6Ca
CEOQTGEonbqr5doWqTsXUXrdQAS0amL45UdT6ITFtfNAjaHwCMfhZg== CEOQTGEonbqr5doWqTsXUXrdQAS0amL45UdT6ITFtfNAjaHwCMfhZg==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
lastmodified: "2025-07-27T17:17:02Z" lastmodified: "2025-07-16T15:33:06Z"
mac: ENC[AES256_GCM,data:i0S0G7D+yPiCWaiCmI++N0EKpED0uGpsEs+3Mc1LbLaHj5kFUMAbOPPl/QGDGhq2eL99+w1PKOfmdHYe2AdtsIhkIQ0F0FUkgItSjdOKlh0hKI+Hk2OqpfA6PRLlZT5dh8r0q0WcI1JPE46egNogjN2za4i6KrkjnTRSchhrxNg=,iv:k0BZ8b+5kmMqaKi9dx6fibIGVVJLZRa3oApwa/fWVdE=,tag:DbV4ZS/ciZVSi+aE0wOZfg==,type:str] mac: ENC[AES256_GCM,data:nZm7N8qfANzHadtW+3eTJljpmYejJdKGFO44iw40CnwlGgb454us9LZbQIAYkNiS7UkivoWa5BqvgLcpObHNAn3tVi+ha0jySIrAmp43y5ilmg76fvL4znel4Nk7eRiGoF3t3xiCR39/3l7PPffx2RJ6PerEyGBpiUZ6mBcWoTE=,iv:UmhSynpMdTnY0R6jwDJts13b0rKsaRFlCizdM2oargE=,tag:Q2xh/QXFOQYqqkxKs7nujA==,type:str]
unencrypted_suffix: _unencrypted unencrypted_suffix: _unencrypted
version: 3.10.2 version: 3.10.2

View file

@ -1,31 +0,0 @@
# users/plasmagoat.nix - Your user configuration
{
config,
lib,
pkgs,
...
}: {
users.users.plasmagoat = {
isNormalUser = true;
description = "plasmagoat";
extraGroups = ["wheel" "docker" "backup"];
shell = pkgs.bash; # or pkgs.zsh, pkgs.fish
# SSH keys managed through secrets
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\dbs@ALPHA-DBS-P14sG2"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air"
];
};
# Root SSH access (for deployment)
users.users.root.openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\\dbs@ALPHA-DBS-P14sG2"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air"
];
# Home directory management (optional)
# You could add home-manager here if you want
}