dump
All checks were successful
Hello World / test (push) Successful in 12s

This commit is contained in:
plasmagoat 2025-07-05 11:12:20 +02:00
parent 4ed9ba0d24
commit a90630ecb6
98 changed files with 2063 additions and 729 deletions

View file

@ -0,0 +1,39 @@
name: Deploy NixOS VM
on:
workflow_dispatch:
jobs:
deploy:
runs-on: docker
container:
image: nixos/nix
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Install Terraform
run: nix-env -iA nixpkgs.terraform
- name: Setup SSH key
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
env:
SSH_PRIVATE_KEY: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Terraform Init & Apply
run: |
terraform init
terraform apply -auto-approve
working-directory: ./terraform
env:
PROXMOX_PASSWORD: ${{ secrets.PROXMOX_PASSWORD }}
- name: Deploy NixOS via nixos-anywhere
run: |
nix run github:numtide/nixos-anywhere -- \
--build-on-remote \
--flake .#new-vm \
root@<new-vm-ip>

View file

@ -0,0 +1,34 @@
name: Terraform Proxmox NixOS VM Deploy
on:
workflow_dispatch:
jobs:
deploy-nixos-vm:
runs-on: nixos-latest
steps:
- name: Install nodejs
run: nix-env -iA nixpkgs.nodejs
- name: Install terraform
run: nix-env -iA nixpkgs.terraform
- name: Install sops
run: nix-env -iA nixpkgs.sops
- name: Checkout repo
uses: actions/checkout@v3
- name: Decrypt secrets
env:
SOPS_AGE_KEY_FILE: ${{ secrets.AGE_KEY_FILE }}
run: |
sops --decrypt secrets.yaml.enc > secrets.yaml
- name: Terraform Init
run: terraform init
- name: Terraform Apply
env:
PROXMOX_PASSWORD: ${{ secrets.PROXMOX_PASSWORD }}
run: terraform apply -auto-approve

View file

@ -3,37 +3,14 @@
become: true become: true
pre_tasks: pre_tasks:
- name: Remove enterprise repository - import_tasks: ../tasks/proxmox-repos.yml
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/pve bookworm pve-enterprise
state: absent
- name: Remove enterprise pbs repository
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/pbs bookworm InRelease
state: absent
- name: Remove enterprise ceph repository
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/ceph-quincy bookworm enterprise
state: absent
- name: Add community repository
ansible.builtin.apt_repository:
update_cache: true
repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription
state: present
tasks: tasks:
- name: Ensure ethtool offload post-up is present for eno1 - import_tasks: ../tasks/network-interface-patch.yml
ansible.builtin.lineinfile:
path: /etc/network/interfaces
line: "\tpost-up ethtool -K eno2 tso off gso off"
insertafter: "^iface eno2 inet manual"
state: present
backup: yes
- import_tasks: ../tasks/packages.yml - import_tasks: ../tasks/packages.yml
- import_tasks: ../tasks/terraform-user.yml
- import_tasks: ../tasks/cloud-init-snippets.yml
# - import_tasks: ../tasks/locale.yml # - import_tasks: ../tasks/locale.yml
# - import_tasks: ../tasks/keyboard.yml # - import_tasks: ../tasks/keyboard.yml
# - import_tasks: ../tasks/users.yml # - import_tasks: ../tasks/users.yml

View file

@ -0,0 +1,16 @@
- name: Ensure snippets directory exists
ansible.builtin.file:
path: /var/lib/vz/snippets
state: directory
mode: "0755"
- name: Upload cloud-init snippet
ansible.builtin.copy:
src: cloud-init-user-data.yaml
dest: /var/lib/vz/snippets/cloud-init-user-data.yaml
mode: "0644"
- name: Verify cloud-init snippet is recognized
ansible.builtin.shell: qm cloudinit dump 9000 user
register: cloudinit_check
ignore_errors: true

View file

@ -0,0 +1,7 @@
- name: Ensure ethtool offload post-up is present for eno1
ansible.builtin.lineinfile:
path: /etc/network/interfaces
line: "\tpost-up ethtool -K eno2 tso off gso off"
insertafter: "^iface eno2 inet manual"
state: present
backup: yes

View file

@ -0,0 +1,20 @@
- name: Remove enterprise repository
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/pve bookworm pve-enterprise
state: absent
- name: Remove enterprise pbs repository
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/pbs bookworm InRelease
state: absent
- name: Remove enterprise ceph repository
ansible.builtin.apt_repository:
update_cache: false
repo: deb https://enterprise.proxmox.com/debian/ceph-quincy bookworm enterprise
state: absent
- name: Add community repository
ansible.builtin.apt_repository:
update_cache: true
repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription
state: present

View file

@ -0,0 +1,13 @@
- name: Create Terraform user
ansible.builtin.command: pveum user add terraform@pve --password {{ terraform_password }}
args:
creates: "/etc/pve/user.cfg" # Prevent re-run errors
- name: Add TerraformProvisioner role
ansible.builtin.command: >
pveum role add TerraformProvisioner -privs "VM.Allocate VM.Config.CloudInit VM.Config.Disk VM.Config.Network VM.PowerMgmt VM.Console Datastore.AllocateSpace"
ignore_errors: true # Might already exist
- name: Assign role to user
ansible.builtin.command: >
pveum aclmod / -user terraform@pve -role TerraformProvisioner

View file

@ -1,9 +1,13 @@
nixos-rebuild switch --flake .#traefik --target-host root@192.168.1.171 --verbose nixos-rebuild switch --flake .#traefik --target-host root@192.168.1.171 --verbose
nixos-rebuild switch --flake .#proxmox --target-host root@192.168.1.205 --verbose nixos-rebuild switch --flake .#proxmox --target-host root@192.168.1.205 --verbose
nixos-rebuild switch --flake .#sandbox --target-host root@192.168.1.148 --verbose nixos-rebuild switch --flake .#sandbox --target-host root@sandbox.lab --verbose
nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose
nixos-rebuild switch --flake .#forgejo --target-host root@192.168.1.249 --verbose nixos-rebuild switch --flake .#forgejo --target-host root@forgejo.lab --verbose
nixos-rebuild switch --flake .#dns --target-host root@192.168.1.140 --verbose nixos-rebuild switch --flake .#dns --target-host root@192.168.1.140 --verbose
nixos-rebuild switch --flake .#keycloak --target-host root@keycloak.lab --verbose
nixos-rebuild switch --flake .#mail --target-host root@mail.lab --verbose
nixos-rebuild switch --flake .#runner --target-host root@forgejo-runner-01.lab --override-input runnerId 01
nixos-rebuild switch --flake .#runner01 --target-host root@forgejo-runner-01.lab --verbose nixos-rebuild switch --flake .#runner01 --target-host root@forgejo-runner-01.lab --verbose
nixos-rebuild switch --flake .#builder --target-host root@nixos-builder.lab --verbose
nix build .#base --builders "ssh://root@nixos-builder.lab x86_64-linux"

94
nixos/flake.lock generated
View file

@ -1,42 +1,42 @@
{ {
"nodes": { "nodes": {
"nixlib": { "nixarr": {
"locked": {
"lastModified": 1736643958,
"narHash": "sha256-tmpqTSWVRJVhpvfSN9KXBvKEXplrwKnSZNAoNPf/S/s=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "1418bc28a52126761c02dd3d89b2d8ca0f521181",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixos-generators": {
"inputs": { "inputs": {
"nixlib": "nixlib", "nixpkgs": "nixpkgs",
"nixpkgs": [ "vpnconfinement": "vpnconfinement",
"nixpkgs" "website-builder": "website-builder"
]
}, },
"locked": { "locked": {
"lastModified": 1747663185, "lastModified": 1749909656,
"narHash": "sha256-Obh50J+O9jhUM/FgXtI3he/QRNiV9+J53+l+RlKSaAk=", "narHash": "sha256-+BetnYiov7fa/rHNiAq29rFa31Kjfmxh0HrNO1um2Ak=",
"owner": "nix-community", "owner": "rasmus-kirk",
"repo": "nixos-generators", "repo": "nixarr",
"rev": "ee07ba0d36c38e9915c55d2ac5a8fb0f05f2afcc", "rev": "ec1cce4c218f32d8fa209b30e093e5da9d95fb50",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "nix-community", "owner": "rasmus-kirk",
"repo": "nixos-generators", "repo": "nixarr",
"type": "github" "type": "github"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": {
"lastModified": 1748662220,
"narHash": "sha256-7gGa49iB9nCnFk4h/g9zwjlQAyjtpgcFkODjcOQS0Es=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "59138c7667b7970d205d6a05a8bfa2d78caa3643",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": { "locked": {
"lastModified": 1748809735, "lastModified": 1748809735,
"narHash": "sha256-UR5vKj8rwKQmE8wxKFHgoJKbod05DMoH5phTje4L1l8=", "narHash": "sha256-UR5vKj8rwKQmE8wxKFHgoJKbod05DMoH5phTje4L1l8=",
@ -53,8 +53,8 @@
}, },
"root": { "root": {
"inputs": { "inputs": {
"nixos-generators": "nixos-generators", "nixarr": "nixarr",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs_2",
"sops-nix": "sops-nix" "sops-nix": "sops-nix"
} }
}, },
@ -77,6 +77,42 @@
"repo": "sops-nix", "repo": "sops-nix",
"type": "github" "type": "github"
} }
},
"vpnconfinement": {
"locked": {
"lastModified": 1743810720,
"narHash": "sha256-kbv/W4gizUSa6qH2rUQdgPj9AJaeN9k2XSWUYqj7IMU=",
"owner": "Maroka-chan",
"repo": "VPN-Confinement",
"rev": "74ae51e6d18b972ecc918ab43e8bde60c21a65d8",
"type": "github"
},
"original": {
"owner": "Maroka-chan",
"repo": "VPN-Confinement",
"type": "github"
}
},
"website-builder": {
"inputs": {
"nixpkgs": [
"nixarr",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748552643,
"narHash": "sha256-UI3dlA/6WOitW3ejPhwYvB/yxrVWpdTmh96Hl8CEUis=",
"owner": "rasmus-kirk",
"repo": "website-builder",
"rev": "f399b9c89d45a0150ce6230c6df23f62f9c3cf89",
"type": "github"
},
"original": {
"owner": "rasmus-kirk",
"repo": "website-builder",
"type": "github"
}
} }
}, },
"root": "root", "root": "root",

View file

@ -1,107 +1,106 @@
{ {
description = "Unified flake for Proxmox base image + live NixOS VMs"; description = "NixOS HomeLab";
inputs = { inputs = {
# Nixpkgs repo for system packages
nixpkgs.url = "github:nixos/nixpkgs"; nixpkgs.url = "github:nixos/nixpkgs";
# nixos-generators lets us produce a "proxmox"-formatted image
nixos-generators = {
url = "github:nix-community/nixos-generators";
inputs.nixpkgs.follows = "nixpkgs";
};
# sops-nix secret management
sops-nix = { sops-nix = {
url = "github:Mic92/sops-nix"; url = "github:Mic92/sops-nix";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
nixarr.url = "github:rasmus-kirk/nixarr";
# simple-nixos-mailserver = {
# url = "gitlab:simple-nixos-mailserver/nixos-mailserver";
# inputs.nixpkgs.follwos = "nixpkgs";
# };
}; };
outputs = { self, nixpkgs, nixos-generators, sops-nix,... }: outputs = inputs @ {...}: let
let system = "x86_64-linux";
system = "x86_64-linux";
################################################################################ liveVMs = {
# A) Define “live” NixOS VM configurations under nixosConfigurations traefik = inputs.nixpkgs.lib.nixosSystem {
################################################################################ inherit system;
liveVMs = { modules = [
traefik = nixpkgs.lib.nixosSystem { ./hosts/traefik/host.nix
inherit system; inputs.sops-nix.nixosModules.sops
modules = [ ./hosts/traefik/host.nix ]; ];
};
sandbox = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./hosts/sandbox/host.nix ];
};
dns = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./hosts/dns/host.nix ];
};
monitoring = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./hosts/monitoring/host.nix sops-nix.nixosModules.sops ];
};
forgejo = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./hosts/forgejo/host.nix sops-nix.nixosModules.sops ];
};
runner01 = nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./hosts/forgejo-runner/host.nix sops-nix.nixosModules.sops ];
specialArgs.runnerId = "01";
};
# dockerHost = pkgs.lib.nixosSystem {
# inherit system;
# modules = [
# ./configuration.nix
# ./users/plasmagoat.nix
# ./hosts/docker-host.nix # DockerHost VM settings (shown below)
# ];
# };
}; };
################################################################################ sandbox = inputs.nixpkgs.lib.nixosSystem {
# B) Use nixos-generators to produce “template” images for Proxmox inherit system;
################################################################################ modules = [
./hosts/sandbox/host.nix
# 1) Existing Proxmox “base” image generator inputs.sops-nix.nixosModules.sops
proxmoxTemplate = nixos-generators.nixosGenerate { ];
system = "x86_64-linux";
modules = [ ./base.nix ];
format = "proxmox"; # outputs a .vma.zst suitable for qmrestore
}; };
# 2) A “docker” generator which builds a Proxmoxready template mail = inputs.nixpkgs.lib.nixosSystem {
docker = nixos-generators.nixosGenerate { inherit system;
system = "x86_64-linux"; modules = [
modules = [ ./templates/docker.nix ]; ./hosts/mail/host.nix
format = "proxmox"; inputs.sops-nix.nixosModules.sops
];
}; };
in keycloak = inputs.nixpkgs.lib.nixosSystem {
{ inherit system;
################################################################################ modules = [
# 1) Export “live” VM configs so you can run: ./hosts/keycloak/host.nix
# nixos-rebuild switch --flake .#traefik --target-host root@<traefik-IP> inputs.sops-nix.nixosModules.sops
# nixos-rebuild switch --flake .#sandbox --target-host root@<sandbox-IP> ];
# nixos-rebuild switch --flake .#dockerHost --target-host root@<dockerHost-IP> };
################################################################################
nixosConfigurations = liveVMs;
################################################################################ dns = inputs.nixpkgs.lib.nixosSystem {
# 2) Export Proxmox template images under packages.x86_64-linux: inherit system;
# modules = [
# • proxmox → `nix build .#proxmox` (generic base) ./hosts/dns/host.nix
# • docker → `nix build .#docker` (docker template) inputs.sops-nix.nixosModules.sops
################################################################################ ];
packages.x86_64-linux = { };
proxmoxTemplate = proxmoxTemplate;
docker = docker; monitoring = inputs.nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./hosts/monitoring/host.nix
inputs.sops-nix.nixosModules.sops
];
};
media = inputs.nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./hosts/media/host.nix
inputs.sops-nix.nixosModules.sops
inputs.nixarr.nixosModules.default
];
};
forgejo = inputs.nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./hosts/forgejo/host.nix
inputs.sops-nix.nixosModules.sops
];
};
runner01 = inputs.nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./hosts/forgejo-runner/host.nix
inputs.sops-nix.nixosModules.sops
];
specialArgs.runnerId = "01";
};
builder = inputs.nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./hosts/nixos-builder/host.nix
inputs.sops-nix.nixosModules.sops
];
}; };
}; };
in {
nixosConfigurations = liveVMs;
};
} }

View file

@ -33,6 +33,8 @@
# "/proxmox-01.lab/192.168.1.205" # "/proxmox-01.lab/192.168.1.205"
# "/nas-01.lab/192.168.1.226" # "/nas-01.lab/192.168.1.226"
"/mail.procopius.dk/213.32.245.247"
# Split Horizon DNS # Split Horizon DNS
"/procopius.dk/192.168.1.80" "/procopius.dk/192.168.1.80"
"/.procopius.dk/192.168.1.80" "/.procopius.dk/192.168.1.80"

View file

@ -1,46 +0,0 @@
{ config, pkgs, modulesPath, lib, ... }:
{
# Pull in all the shared settings from configuration.nix
imports = [ ../configuration.nix ];
config = lib.recursiveUpdate config ({
# (Here, add anything liveVMspecific—e.g. NFS mounts, Docker, Compose service,
# static IP, or “import users/plasmagoat.nix” if you prefer.)
networking.interfaces.enp0s25 = {
useDHCP = false;
ipv4.addresses = [ { address = "192.168.1.50"; prefixLength = 24; } ];
ipv4.gateway = "192.168.1.1";
};
# Docker + Compose bits, for example:
fileSystems."/mnt/nas" = {
device = "192.168.1.100:/export/docker-volumes";
fsType = "nfs";
options = [ "defaults" "nofail" "x-systemd.requires=network-online.target" ];
};
environment.systemPackages = with pkgs; [
pkgs.docker
pkgs.docker-compose
# …plus anything else you want only on live VM…
];
services.docker.enable = true;
systemd.services.dockerComposeApp = {
description = "Auto-start DockerCompose stack";
after = [ "network-online.target" "docker.service" ];
wants = [ "network-online.target" "docker.service" ];
serviceConfig = {
WorkingDirectory = "/etc/docker-compose-app";
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker-compose-app/docker-compose.yml up";
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /etc/docker-compose-app/docker-compose.yml down";
Restart = "always";
RestartSec = 10;
};
wantedBy = [ "multi-user.target" ];
};
});
}

View file

@ -1,11 +1,8 @@
{ config, pkgs,... }:
{ {
# users.users.forgejo-runner = { config,
# isSystemUser = true; pkgs,
# extraGroups = [ "docker" ]; # Optional: if using docker jobs ...
# }; }: {
services.gitea-actions-runner = { services.gitea-actions-runner = {
package = pkgs.forgejo-actions-runner; package = pkgs.forgejo-actions-runner;
instances.default = { instances.default = {
@ -25,19 +22,18 @@
## optionally provide native execution on the host: ## optionally provide native execution on the host:
"native:host" "native:host"
]; ];
settings = {
log = {
level = "debug";
};
};
}; };
}; };
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
wget wget
nodejs nodejs
]; ];
# systemd.services."forgejo-actions-runner-default".serviceConfig = { virtualisation.docker.enable = true; # Optional: if using docker
# User = "forgejo-runner";
# Group = "forgejo-runner";
# };
virtualisation.docker.enable = true; # Optional: if using docker
} }

View file

@ -1,14 +1,17 @@
{ lib, pkgs, config, ... }:
{ {
lib,
pkgs,
config,
...
}: {
systemd.services.forgejo = { systemd.services.forgejo = {
after = [ "postgresql.service" ]; after = ["postgresql.service"];
requires = [ "postgresql.service" ]; requires = ["postgresql.service"];
}; };
services.postgresql = { services.postgresql = {
enable = true; enable = true;
ensureDatabases = [ "forgejo" ]; ensureDatabases = ["forgejo"];
ensureUsers = [ ensureUsers = [
{ {
name = "forgejo"; name = "forgejo";

View file

@ -1,12 +1,15 @@
{ lib, pkgs, config, ... }: {
let lib,
pkgs,
config,
...
}: let
cfg = config.services.forgejo; cfg = config.services.forgejo;
srv = cfg.settings.server; srv = cfg.settings.server;
domain = "git.procopius.dk"; domain = "git.procopius.dk";
ssh_domain = "gitssh.procopius.dk"; ssh_domain = "gitssh.procopius.dk";
in in {
{ users.users.plasmagoat.extraGroups = ["forgejo"];
users.users.plasmagoat.extraGroups = [ "forgejo" ];
services.forgejo = { services.forgejo = {
enable = true; enable = true;
@ -25,6 +28,15 @@ in
SSH_PORT = 2222; SSH_PORT = 2222;
SSH_DOMAIN = ssh_domain; SSH_DOMAIN = ssh_domain;
}; };
mailer = {
ENABLED = true;
FROM = "git@procopius.dk";
PROTOCOL = "smtp+starttls";
SMTP_ADDR = "mail.procopius.dk";
USER = "admin@procopius.dk";
PASSWD = "mikael";
};
database = { database = {
DB_TYPE = lib.mkForce "postgres"; DB_TYPE = lib.mkForce "postgres";
HOST = "/run/postgresql"; HOST = "/run/postgresql";
@ -33,12 +45,23 @@ in
}; };
service = { service = {
DISABLE_REGISTRATION = true; DISABLE_REGISTRATION = true;
# ENABLE_INTERNAL_SIGNIN = false;
ENABLE_NOTIFY_MAIL = true;
}; };
metrics = { metrics = {
ENABLED = true; ENABLED = true;
ENABLED_ISSUE_BY_REPOSITORY = true; ENABLED_ISSUE_BY_REPOSITORY = true;
ENABLED_ISSUE_BY_LABEL = true; ENABLED_ISSUE_BY_LABEL = true;
}; };
actions = {
ZOMBIE_TASK_TIMEOUT = "30m";
};
oauth2 = {
};
oauth2_client = {
ENABLE_AUTO_REGISTRATION = true;
UPDATE_AVATAR = true;
};
# log = { # log = {
# ROOT_PATH = "/var/log/forgejo"; # ROOT_PATH = "/var/log/forgejo";
# MODE = "file"; # MODE = "file";
@ -63,5 +86,5 @@ in
''; '';
# Optional: firewall # Optional: firewall
networking.firewall.allowedTCPPorts = [ 3000 2222 ]; networking.firewall.allowedTCPPorts = [3000 2222];
} }

View file

@ -0,0 +1,13 @@
{ config, pkgs, modulesPath, lib, ... }:
{
imports = [
../../templates/base.nix
../../secrets/shared-sops.nix
./sops.nix
./networking.nix
./storage.nix
./forgejo.nix
./database.nix
];
}

View file

@ -0,0 +1,15 @@
{stdenv}:
stdenv.mkDerivation rec {
name = "keycloak_custom_theme";
version = "1.0";
src = ./custom_theme;
nativeBuildInputs = [];
buildInputs = [];
installPhase = ''
mkdir -p $out
cp -a login $out
'';
}

View file

@ -0,0 +1,4 @@
body {
background: red;
color: blue;
}

View file

@ -0,0 +1,3 @@
parent=base
import=common/keycloak
styles=css/custom.css

View file

@ -0,0 +1,11 @@
{pkgs, ...}: let
callPackage = pkgs.callPackage;
in {
nixpkgs.overlays = [
(final: prev: {
custom_keycloak_themes = {
custom = callPackage ./custom_theme.nix {};
};
})
];
}

View file

@ -0,0 +1,14 @@
{
config,
pkgs,
modulesPath,
lib,
...
}: {
imports = [
../../templates/base.nix
./networking.nix
./sops.nix
./keycloak.nix
];
}

View file

@ -0,0 +1,31 @@
{
config,
pkgs,
...
}: {
services.postgresql.enable = true;
services.keycloak = {
enable = true;
initialAdminPassword = "password";
database = {
type = "postgresql";
createLocally = true;
username = "keycloak";
passwordFile = config.sops.secrets.keycloak_psql_pass.path;
};
settings = {
hostname = "keycloak.procopius.dk";
# hostname-admin = "http://keycloak.lab:8080";
# hostname-strict = false;
# hostname-backchannel-dynamic = true;
http-enabled = true;
http-port = 8080;
proxy-headers = "xforwarded";
};
};
networking.firewall.allowedTCPPorts = [8080];
}

View file

@ -0,0 +1,8 @@
{
config,
lib,
pkgs,
...
}: {
networking.hostName = "keycloak";
}

View file

@ -0,0 +1,12 @@
{...}: let
keycloakSops = ../../secrets/keycloak/secrets.yml;
in {
sops.secrets.keycloak_psql_pass = {
sopsFile = keycloakSops;
mode = "0440";
};
sops.secrets.keycloak_admin_pass = {
sopsFile = keycloakSops;
mode = "0440";
};
}

14
nixos/hosts/mail/host.nix Normal file
View file

@ -0,0 +1,14 @@
{
config,
pkgs,
modulesPath,
lib,
...
}: {
imports = [
../../templates/base.nix
./networking.nix
./sops.nix
./mailserver.nix
];
}

View file

@ -0,0 +1,39 @@
{
config,
pkgs,
...
}: {
imports = [
(builtins.fetchTarball {
# Pick a release version you are interested in and set its hash, e.g.
url = "https://gitlab.com/simple-nixos-mailserver/nixos-mailserver/-/archive/nixos-25.05/nixos-mailserver-nixos-25.05.tar.gz";
# To get the sha256 of the nixos-mailserver tarball, we can use the nix-prefetch-url command:
# release="nixos-25.05"; nix-prefetch-url "https://gitlab.com/simple-nixos-mailserver/nixos-mailserver/-/archive/${release}/nixos-mailserver-${release}.tar.gz" --unpack
sha256 = "0jpp086m839dz6xh6kw5r8iq0cm4nd691zixzy6z11c4z2vf8v85";
})
];
mailserver = {
enable = true;
fqdn = "mail.procopius.dk";
domains = ["procopius.dk"];
# A list of all login accounts. To create the password hashes, use
# nix-shell -p mkpasswd --run 'mkpasswd -sm bcrypt'
loginAccounts = {
"admin@procopius.dk" = {
hashedPasswordFile = config.sops.secrets.mailserver-admin-pass.path;
aliases = [
"@procopius.dk"
"postmaster@procopius.dk"
];
};
};
# Use Let's Encrypt certificates. Note that this needs to set up a stripped
# down nginx and opens port 80.
certificateScheme = "acme-nginx";
};
security.acme.acceptTerms = true;
security.acme.defaults.email = "david.mikael@proton.me";
}

View file

@ -0,0 +1,8 @@
{
config,
lib,
pkgs,
...
}: {
networking.hostName = "mail";
}

View file

@ -0,0 +1,8 @@
{...}: let
mailserverSops = ../../secrets/mailserver/secrets.yml;
in {
sops.secrets.mailserver-admin-pass = {
sopsFile = mailserverSops;
mode = "0440";
};
}

View file

@ -0,0 +1,44 @@
{config, ...}: {
services.prometheus.exporters.exportarr-sonarr = {
enable = true;
url = "http://media.lab:8989";
port = 9707;
openFirewall = true;
apiKeyFile = config.sops.secrets.sonarr-api-key.path;
};
services.prometheus.exporters.exportarr-readarr = {
enable = true;
url = "http://media.lab:8787";
port = 9708;
openFirewall = true;
apiKeyFile = config.sops.secrets.readarr-api-key.path;
};
services.prometheus.exporters.exportarr-radarr = {
enable = true;
url = "http://media.lab:7878";
port = 9709;
openFirewall = true;
apiKeyFile = config.sops.secrets.radarr-api-key.path;
};
services.prometheus.exporters.exportarr-prowlarr = {
enable = true;
url = "http://media.lab:9696";
port = 9710;
openFirewall = true;
apiKeyFile = config.sops.secrets.prowlarr-api-key.path;
};
services.prometheus.exporters.exportarr-lidarr = {
enable = true;
url = "http://media.lab:8686";
port = 9711;
openFirewall = true;
apiKeyFile = config.sops.secrets.lidarr-api-key.path;
};
services.prometheus.exporters.exportarr-bazarr = {
enable = true;
url = "http://media.lab:6767";
port = 9712;
openFirewall = true;
apiKeyFile = config.sops.secrets.bazarr-api-key.path;
};
}

View file

@ -0,0 +1,12 @@
{
imports = [
../../templates/base.nix
../../secrets/shared-sops.nix
./networking.nix
./storage.nix
./nixarr.nix
./exportarr.nix
./jellyfin-exporter.nix
./sops.nix
];
}

View file

@ -0,0 +1,8 @@
{config, ...}: {
services.prometheus.exporters.json = {
enable = true;
configFile = config.sops.secrets.jellyfin-exporter-config.path;
openFirewall = true;
user = "jellyfin";
};
}

View file

@ -0,0 +1,3 @@
{
networking.hostName = "media";
}

View file

@ -0,0 +1,42 @@
{config, ...}: {
nixarr = {
enable = true;
# These two values are also the default, but you can set them to whatever
# else you want
# WARNING: Do _not_ set them to `/home/user/whatever`, it will not work!
mediaDir = "/data/media";
stateDir = "/data/media/.state/nixarr";
vpn = {
enable = true;
# WARNING: This file must _not_ be in the config git directory
# You can usually get this wireguard file from your VPN provider
wgConf = config.sops.secrets.nixarr-vpn-conf.path;
};
jellyfin = {
enable = true;
};
transmission = {
enable = true;
vpn.enable = true;
peerPort = 51820; # Set this to the port forwarded by your VPN
};
# It is possible for this module to run the *Arrs through a VPN, but it
# is generally not recommended, as it can cause rate-limiting issues.
bazarr.enable = true;
lidarr.enable = true;
prowlarr.enable = true;
radarr.enable = true;
readarr.enable = true;
sonarr.enable = true;
jellyseerr.enable = true;
recyclarr = {
enable = true;
configFile = ./recyclarr.yml;
};
};
}

View file

@ -0,0 +1,183 @@
sonarr:
series:
base_url: http://localhost:8989
api_key: !env_var SONARR_API_KEY
include:
- template: sonarr-quality-definition-series
- template: sonarr-v4-quality-profile-web-2160p-alternative
- template: sonarr-v4-custom-formats-web-2160p
- template: sonarr-v4-quality-profile-anime
- template: sonarr-v4-custom-formats-anime
custom_formats:
# HDR Formats
- trash_ids:
# Comment out the next line if you and all of your users' setups are fully DV compatible
- 9b27ab6498ec0f31a3353992e19434ca # DV (WEBDL)
# HDR10+ Boost - Uncomment the next two lines if any of your devices DO support HDR10+
- 0dad0a507451acddd754fe6dc3a7f5e7 # HDR10+ Boost
- 385e9e8581d33133c3961bdcdeffb7b4 # DV HDR10+ Boost
assign_scores_to:
- name: WEB-2160p
# Unwanted
- trash_ids:
- 32b367365729d530ca1c124a0b180c64 # Bad Dual Groups
- 82d40da2bc6923f41e14394075dd4b03 # No-RlsGroup
- e1a997ddb54e3ecbfe06341ad323c458 # Obfuscated
- 06d66ab109d4d2eddb2794d21526d140 # Retags
- 1b3994c551cbb92a2c781af061f4ab44 # Scene
assign_scores_to:
- name: WEB-2160p
# Optional SDR
# Only ever use ONE of the following custom formats:
# SDR - block ALL SDR releases
# SDR (no WEBDL) - block UHD/4k Remux and Bluray encode SDR releases, but allow SDR WEB
# - trash_ids:
# - 2016d1676f5ee13a5b7257ff86ac9a93 # SDR
# # - 83304f261cf516bb208c18c54c0adf97 # SDR (no WEBDL)
# assign_scores_to:
# - name: WEB-2160p
- trash_ids:
- 026d5aadd1a6b4e550b134cb6c72b3ca # Uncensored
- b2550eb333d27b75833e25b8c2557b38 # 10bit
assign_scores_to:
- name: Remux-1080p - Anime
score: 1075 # Adjust scoring as desired
- trash_ids:
- 418f50b10f1907201b6cfdf881f467b7 # Anime Dual Audio
assign_scores_to:
- name: Remux-1080p - Anime
score: 2000 # Adjust scoring as desired
media_naming:
series: default
season: default
episodes:
rename: true
standard: default
daily: default
anime: default
delete_old_custom_formats: true
radarr:
movies:
base_url: http://localhost:7878
api_key: !env_var RADARR_API_KEY
include:
- template: radarr-quality-definition-movie
- template: radarr-custom-formats-remux-web-2160p
- template: radarr-quality-profile-anime
- template: radarr-custom-formats-anime
quality_profiles:
- name: Remux + WEB 2160p
reset_unmatched_scores:
enabled: true
upgrade:
allowed: true
until_quality: Remux-2160p
until_score: 10000
min_format_score: 0
quality_sort: top
qualities:
- name: Remux-2160p
- name: WEB 2160p
qualities:
- WEBDL-2160p
- WEBRip-2160p
- name: Remux-1080p
- name: WEB 1080p
qualities:
- WEBDL-1080p
- WEBRip-1080p
custom_formats:
- trash_ids:
# Audio
# Uncomment the next section to enable Advanced Audio Formats
- 496f355514737f7d83bf7aa4d24f8169 # TrueHD Atmos
- 2f22d89048b01681dde8afe203bf2e95 # DTS X
- 417804f7f2c4308c1f4c5d380d4c4475 # ATMOS (undefined)
- 1af239278386be2919e1bcee0bde047e # DD+ ATMOS
- 3cafb66171b47f226146a0770576870f # TrueHD
- dcf3ec6938fa32445f590a4da84256cd # DTS-HD MA
- a570d4a0e56a2874b64e5bfa55202a1b # FLAC
- e7c2fcae07cbada050a0af3357491d7b # PCM
- 8e109e50e0a0b83a5098b056e13bf6db # DTS-HD HRA
- 185f1dd7264c4562b9022d963ac37424 # DD+
- f9f847ac70a0af62ea4a08280b859636 # DTS-ES
- 1c1a4c5e823891c75bc50380a6866f73 # DTS
- 240770601cc226190c367ef59aba7463 # AAC
- c2998bd0d90ed5621d8df281e839436e # DD
assign_scores_to:
- name: Remux + WEB 2160p
# Movie Versions
- trash_ids:
# Uncomment any of the following lines to prefer these movie versions
# - 0f12c086e289cf966fa5948eac571f44 # Hybrid
# - 570bc9ebecd92723d2d21500f4be314c # Remaster
# - eca37840c13c6ef2dd0262b141a5482f # 4K Remaster
- e0c07d59beb37348e975a930d5e50319 # Criterion Collection
- 9d27d9d2181838f76dee150882bdc58c # Masters of Cinema
- db9b4c4b53d312a3ca5f1378f6440fc9 # Vinegar Syndrome
# - 957d0f44b592285f26449575e8b1167e # Special Edition
# - eecf3a857724171f968a66cb5719e152 # IMAX
# - 9f6cbff8cfe4ebbc1bde14c7b7bec0de # IMAX Enhanced
assign_scores_to:
- name: Remux + WEB 2160p
# Optional
- trash_ids:
- b6832f586342ef70d9c128d40c07b872 # Bad Dual Groups
- cc444569854e9de0b084ab2b8b1532b2 # Black and White Editions
- ae9b7c9ebde1f3bd336a8cbd1ec4c5e5 # No-RlsGroup
- 7357cf5161efbf8c4d5d0c30b4815ee2 # Obfuscated
- 5c44f52a8714fdd79bb4d98e2673be1f # Retags
- f537cf427b64c38c8e36298f657e4828 # Scene
assign_scores_to:
- name: Remux + WEB 2160p
- trash_ids:
# Comment out the next line if you and all of your users' setups are fully DV compatible
- 923b6abef9b17f937fab56cfcf89e1f1 # DV (WEBDL)
# HDR10+ Boost - Uncomment the next two lines if any of your devices DO support HDR10+
- b17886cb4158d9fea189859409975758 # HDR10Plus Boost
- 55a5b50cb416dea5a50c4955896217ab # DV HDR10+ Boost
assign_scores_to:
- name: Remux + WEB 2160p
# Optional SDR
# Only ever use ONE of the following custom formats:
# SDR - block ALL SDR releases
# SDR (no WEBDL) - block UHD/4k Remux and Bluray encode SDR releases, but allow SDR WEB
- trash_ids:
- 9c38ebb7384dada637be8899efa68e6f # SDR
# - 25c12f78430a3a23413652cbd1d48d77 # SDR (no WEBDL)
assign_scores_to:
- name: Remux + WEB 2160p
- trash_ids:
- 064af5f084a0a24458cc8ecd3220f93f # Uncensored
- a5d148168c4506b55cf53984107c396e # 10bit
assign_scores_to:
- name: Remux-1080p - Anime
score: 1075 # Adjust scoring as desired
- trash_ids:
- 4a3b087eea2ce012fcc1ce319259a3be # Anime Dual Audio
assign_scores_to:
- name: Remux-1080p - Anime
score: 2000 # Adjust scoring as desired
media_naming:
folder: default
movie:
rename: true
standard: default
delete_old_custom_formats: true

View file

@ -0,0 +1,41 @@
{
sops.secrets.nixarr-vpn-conf = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.sonarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.radarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.readarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.bazarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.lidarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.prowlarr-api-key = {
sopsFile = ../../secrets/nixarr/secrets.yml;
mode = "0440";
};
sops.secrets.jellyfin-exporter-config = {
sopsFile = ../../secrets/nixarr/secrets.yml;
owner = "jellyfin";
};
}

View file

@ -0,0 +1,23 @@
{
boot.supportedFilesystems = ["nfs"];
services.rpcbind.enable = true;
fileSystems."/data/media/library/shows" = {
device = "192.168.1.226:/volume1/Media/TV Shows";
fsType = "nfs4";
options = ["x-systemd.automount" "noatime" "_netdev"];
};
fileSystems."/data/media/library/movies" = {
device = "192.168.1.226:/volume1/Media/Movies";
fsType = "nfs4";
options = ["x-systemd.automount" "noatime" "_netdev"];
};
fileSystems."/data/media/torrents" = {
device = "192.168.1.226:/volume1/data/torrents";
fsType = "nfs4";
options = ["x-systemd.automount" "noatime" "_netdev"];
};
}

View file

@ -1,19 +1,25 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
services.prometheus.alertmanagers = [ { config,
scheme = "http"; pkgs,
# path_prefix = "/alertmanager"; ...
static_configs = [ { }: {
targets = [ services.prometheus.alertmanagers = [
"localhost:9093" {
scheme = "http";
# path_prefix = "/alertmanager";
static_configs = [
{
targets = [
"localhost:9093"
];
}
]; ];
} ]; }
} ]; ];
services.prometheus.alertmanager = { services.prometheus.alertmanager = {
enable = true; enable = true;
openFirewall = true; openFirewall = true;
webExternalUrl = "http://monitor.lab:9093"; # optional but helpful webExternalUrl = "http://monitor.lab:9093"; # optional but helpful
configuration = { configuration = {
route = { route = {
group_wait = "10s"; group_wait = "10s";
@ -39,11 +45,12 @@
telegram_configs = [ telegram_configs = [
{ {
api_url = "https://api.telegram.org"; api_url = "https://api.telegram.org";
bot_token = config.sops.secrets."telegram-alert-bot-token".path; # FIX ME!
bot_token = "7597031094:AAHjjo3HL1XdY38pSNlR66-4wCP47o4LlSw"; # config.sops.secrets."telegram-alert-bot-token".path;
chat_id = -1002642560007; chat_id = -1002642560007;
message_thread_id = 4; message_thread_id = 4;
parse_mode = "HTML"; parse_mode = "HTML";
send_resolved = false; send_resolved = true;
message = "{{ template \"telegram.message\". }}"; message = "{{ template \"telegram.message\". }}";
} }
]; ];

View file

@ -162,7 +162,7 @@
"pluginVersion": "7.3.6", "pluginVersion": "7.3.6",
"targets": [ "targets": [
{ {
"expr": "sum(count_over_time({job=\"/var/log/traefik.log\"} |= \"RequestProtocol\" [$__interval]))", "expr": "sum(count_over_time({job=\"traefik\"} |= \"RequestProtocol\" [$__interval]))",
"legendFormat": "", "legendFormat": "",
"refId": "A" "refId": "A"
} }
@ -219,7 +219,7 @@
"pluginVersion": "7.3.6", "pluginVersion": "7.3.6",
"targets": [ "targets": [
{ {
"expr": "sum by (OriginStatus) (count_over_time({job=\"/var/log/traefik.log\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval]))", "expr": "sum by (OriginStatus) (count_over_time({job=\"traefik\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval]))",
"legendFormat": "HTTP Status: {{OriginStatus}}", "legendFormat": "HTTP Status: {{OriginStatus}}",
"refId": "A" "refId": "A"
} }
@ -284,7 +284,7 @@
"pluginVersion": "7.3.6", "pluginVersion": "7.3.6",
"targets": [ "targets": [
{ {
"expr": " sum(rate({job=\"/var/log/traefik.log\"} |~ \"RequestProtocol\" | json | OriginStatus >= 400 |__error__=\"\"[$__interval])) / (sum(rate({job=\"/var/log/traefik.log\"} |~ \"RequestProtocol\" | json | __error__=\"\"[$__interval])) / 100)", "expr": " sum(rate({job=\"traefik\"} |~ \"RequestProtocol\" | json | OriginStatus >= 400 |__error__=\"\"[$__interval])) / (sum(rate({job=\"traefik\"} |~ \"RequestProtocol\" | json | __error__=\"\"[$__interval])) / 100)",
"legendFormat": "", "legendFormat": "",
"refId": "A" "refId": "A"
} }
@ -367,7 +367,7 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": " sum by (OriginStatus,ServiceName) (count_over_time({job=\"/var/log/traefik.log\"} |~ \"RequestProtocol\" | json | OriginStatus >= 400 |__error__=\"\"[$__interval]))", "expr": " sum by (OriginStatus,ServiceName) (count_over_time({job=\"traefik\"} |~ \"RequestProtocol\" | json | OriginStatus >= 400 |__error__=\"\"[$__interval]))",
"legendFormat": " {{ServiceName}} / {{OriginStatus}} ", "legendFormat": " {{ServiceName}} / {{OriginStatus}} ",
"refId": "A" "refId": "A"
} }
@ -474,7 +474,7 @@
"pluginVersion": "7.3.6", "pluginVersion": "7.3.6",
"targets": [ "targets": [
{ {
"expr": "count(sum by (ClientHost) (count_over_time({job=\"/var/log/traefik.log\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval])))", "expr": "count(sum by (ClientHost) (count_over_time({job=\"traefik\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval])))",
"legendFormat": "", "legendFormat": "",
"refId": "A" "refId": "A"
} }
@ -544,7 +544,7 @@
"pluginVersion": "7.3.6", "pluginVersion": "7.3.6",
"targets": [ "targets": [
{ {
"expr": "sum_over_time({job=\"/var/log/traefik.log\"}|= \"RequestProtocol\" | json | OriginStatus=200 | unwrap DownstreamContentSize | __error__=\"\" [$__interval])", "expr": "sum_over_time({job=\"traefik\"}|= \"RequestProtocol\" | json | OriginStatus=200 | unwrap DownstreamContentSize | __error__=\"\" [$__interval])",
"legendFormat": "Bytes sent", "legendFormat": "Bytes sent",
"refId": "A" "refId": "A"
} }
@ -638,7 +638,7 @@
"strokeWidth": 1, "strokeWidth": 1,
"targets": [ "targets": [
{ {
"expr": "sum by (RouterName) (count_over_time({job=\"/var/log/traefik.log\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval]))", "expr": "sum by (RouterName) (count_over_time({job=\"traefik\"}|= \"RequestProtocol\" | json | __error__=\"\" [$__interval]))",
"legendFormat": "{{RouterName}}", "legendFormat": "{{RouterName}}",
"refId": "A" "refId": "A"
} }
@ -675,7 +675,7 @@
}, },
"targets": [ "targets": [
{ {
"expr": "{job=\"/var/log/traefik.log\"} |= \"RequestProtocol\"| json | line_format \"Status:{{.OriginStatus}} Client From {{.ClientAddr}} {{.RequestMethod}} {{.RequestAddr}}{{.RequestPath}} Route To {{.ServiceAddr}}\"", "expr": "{job=\"traefik\"} |= \"RequestProtocol\"| json | line_format \"Status:{{.OriginStatus}} Client From {{.ClientAddr}} {{.RequestMethod}} {{.RequestAddr}}{{.RequestPath}} Route To {{.ServiceAddr}}\"",
"legendFormat": "", "legendFormat": "",
"refId": "A" "refId": "A"
} }
@ -749,7 +749,7 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "quantile_over_time(0.95,{job=\"/var/log/traefik.log\"} |= \"RequestProtocol\"| json | unwrap Duration | __error__=\"\" [$__interval]) by (ServiceName)", "expr": "quantile_over_time(0.95,{job=\"traefik\"} |= \"RequestProtocol\"| json | unwrap Duration | __error__=\"\" [$__interval]) by (ServiceName)",
"hide": false, "hide": false,
"legendFormat": " {{ ServiceName }}", "legendFormat": " {{ ServiceName }}",
"refId": "C" "refId": "C"
@ -872,7 +872,7 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "max by (ServiceName) (max_over_time({job=\"/var/log/traefik.log\"} |= \"RequestProtocol\" |json | unwrap Duration | __error__=\"\" [$__interval]))", "expr": "max by (ServiceName) (max_over_time({job=\"traefik\"} |= \"RequestProtocol\" |json | unwrap Duration | __error__=\"\" [$__interval]))",
"hide": false, "hide": false,
"legendFormat": "{{ ServiceName}}", "legendFormat": "{{ ServiceName}}",
"refId": "D" "refId": "D"
@ -995,7 +995,7 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "sum by (ServiceName) (sum_over_time({job=\"/var/log/traefik.log\"} |= \"RequestProtocol\" |json | unwrap RequestContentSize | __error__=\"\" [$__interval]))", "expr": "sum by (ServiceName) (sum_over_time({job=\"traefik\"} |= \"RequestProtocol\" |json | unwrap RequestContentSize | __error__=\"\" [$__interval]))",
"hide": false, "hide": false,
"legendFormat": "{{ ServiceName}}", "legendFormat": "{{ ServiceName}}",
"refId": "D" "refId": "D"

View file

@ -852,7 +852,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"editorMode": "code", "editorMode": "code",
"expr": "sum by(router) (rate(traefik_router_requests_total[$__rate_interval]))", "expr": "sum by(service) (rate(traefik_service_requests_total[$__rate_interval]))",
"instant": false, "instant": false,
"legendFormat": "__auto", "legendFormat": "__auto",
"range": true, "range": true,
@ -949,7 +949,7 @@
"uid": "Prometheus" "uid": "Prometheus"
}, },
"editorMode": "code", "editorMode": "code",
"expr": "sum by(service) (rate(traefik_router_request_duration_seconds_count[$__rate_interval]))", "expr": "sum by(service) (rate(traefik_service_request_duration_seconds_count[$__rate_interval]))",
"instant": false, "instant": false,
"legendFormat": "__auto", "legendFormat": "__auto",
"range": true, "range": true,

View file

@ -1,17 +1,30 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
config,
pkgs,
modulesPath,
lib,
...
}: {
services.grafana.enable = true; services.grafana.enable = true;
services.grafana.settings.server = { services.grafana.settings = {
http_port = 3000; server = {
http_addr = "0.0.0.0"; http_port = 3000;
# Grafana needs to know on which domain and URL it's running http_addr = "0.0.0.0";
domain = "grafana.lab"; # Grafana needs to know on which domain and URL it's running
# root_url = "https://monitor.local/grafana/"; # Not needed if it is `https://your.domain/` domain = "grafana.procopius.dk";
# serve_from_sub_path = true; root_url = "https://grafana.procopius.dk"; # Not needed if it is `https://your.domain/`
# serve_from_sub_path = true;
oauth_auto_login = false;
};
"auth.generic_oauth" = {
enabled = false;
};
"auth" = {
disable_login_form = false;
};
}; };
networking.firewall.allowedTCPPorts = [ 3000 ]; networking.firewall.allowedTCPPorts = [3000];
services.grafana = { services.grafana = {
# declarativePlugins = with pkgs.grafanaPlugins; [ ... ]; # declarativePlugins = with pkgs.grafanaPlugins; [ ... ];
@ -33,22 +46,32 @@
type = "loki"; type = "loki";
url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}"; url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}";
} }
# Some plugins also can - c.f. https://grafana.com/docs/plugins/yesoreyeram-infinity-datasource/latest/setup/provisioning/ {
# { uid = "influxdb";
# name = "Infinity"; name = "InfluxDB";
# type = "yesoreyeram-infinity-datasource"; type = "influxdb";
# } url = "http://127.0.0.1:8086";
# But not all - c.f. https://github.com/fr-ser/grafana-sqlite-datasource/issues/141 access = "proxy";
jsonData = {
dbName = "proxmox";
httpHeaderName1 = "Authorization";
};
secureJsonData = {
httpHeaderValue1 = "Token iY4MTuqUAVJbBkDUiMde";
};
}
]; ];
# Note: removing attributes from the above `datasources.settings.datasources` is not enough for them to be deleted on `grafana`; # Note: removing attributes from the above `datasources.settings.datasources` is not enough for them to be deleted on `grafana`;
# One needs to use the following option: # One needs to use the following option:
# datasources.settings.deleteDatasources = [ { name = "prometheus"; orgId = 1; } { name = "loki"; orgId = 1; } ]; # datasources.settings.deleteDatasources = [ { name = "prometheus"; orgId = 1; } { name = "loki"; orgId = 1; } ];
dashboards.settings.providers = [{ dashboards.settings.providers = [
name = "my dashboards"; {
options.path = "/etc/grafana-dashboards"; name = "my dashboards";
}]; options.path = "/etc/grafana-dashboards";
}
];
}; };
}; };
@ -59,6 +82,13 @@
mode = "0644"; mode = "0644";
}; };
environment.etc."grafana-dashboards/traefik-access.json" = {
source = ./dashboards/traefik-access.json;
user = "grafana";
group = "grafana";
mode = "0644";
};
environment.etc."grafana-dashboards/grafana-traefik.json" = { environment.etc."grafana-dashboards/grafana-traefik.json" = {
source = ./dashboards/grafana-traefik.json; source = ./dashboards/grafana-traefik.json;
user = "grafana"; user = "grafana";

View file

@ -1,14 +1,20 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
config,
pkgs,
modulesPath,
lib,
...
}: {
imports = [ imports = [
../../templates/base.nix ../../templates/base.nix
../../secrets/shared-sops.nix ../../secrets/shared-sops.nix
./networking.nix ./networking.nix
./prometheus.nix ./prometheus.nix
./influxdb.nix
./grafana.nix ./grafana.nix
./loki.nix ./loki.nix
./alertmanager.nix ./alertmanager.nix
./sops.nix ./sops.nix
./jellyfin-exporter.nix
]; ];
} }

View file

@ -0,0 +1,25 @@
{
config,
pkgs,
modulesPath,
lib,
...
}: {
services.influxdb2 = {
enable = true;
settings = {
};
provision = {
enable = true;
initialSetup = {
username = "plasmagoat";
passwordFile = config.sops.secrets.influxdb-password.path;
tokenFile = config.sops.secrets.influxdb-token.path;
organization = "procopius";
bucket = "proxmox";
};
};
};
networking.firewall.allowedTCPPorts = [8086];
}

View file

@ -0,0 +1,14 @@
{
virtualisation.oci-containers.containers = {
jellyfin_exporter = {
image = "rebelcore/jellyfin-exporter:latest";
ports = [
"9594:9594"
];
cmd = [
"--jellyfin.address=http://media.lab:8096"
"--jellyfin.token=f7c89e5aa307434c9b3ecb329e896335"
];
};
};
}

View file

@ -1,51 +1,137 @@
{ config, pkgs, modulesPath, lib, ... }: {
config,
pkgs,
modulesPath,
lib,
...
}: let
monitor_hostname = "monitor.lab";
traefik_hostname = "traefik.lab";
sandbox_hostname = "sandbox.lab";
forgejo_hostname = "forgejo.lab";
runner01_hostname = "forgejo-runner-01.lab";
dnsmasq_hostname = "dns.lab";
media_hostname = "media.lab";
mail_hostname = "mail.lab";
keycloak_hostname = "keycloak.lab";
let monitored_hosts = [
monitor_ip = "monitor.lab"; monitor_hostname
traefik_ip = "traefik.lab"; traefik_hostname
sandbox_ip = "sandbox.lab"; sandbox_hostname
forgejo_ip = "forgejo.lab"; forgejo_hostname
runner01_ip = "forgejo-runner-01.lab"; runner01_hostname
dnsmasq_ip = "dns.lab"; dnsmasq_hostname
media_hostname
mail_hostname
keycloak_hostname
];
generateTargets = port:
map (host: "${host}:${toString port}") monitored_hosts;
instance_relabel_config = [
{
source_labels = ["__address__"];
regex = "([^:]+):\\d+"; # Captures everything before the last colon
target_label = "instance";
replacement = "$1";
}
];
node_exporter_port = 9100;
node_exporter_job = {
job_name = "node";
static_configs = [{targets = generateTargets node_exporter_port;}];
relabel_configs = instance_relabel_config;
};
promtail_port = 9080;
promtail_job = {
job_name = "promtail";
static_configs = [{targets = generateTargets promtail_port;}];
relabel_configs = instance_relabel_config;
};
prometheus_exporter_port = 9100;
postgres_exporter_port = 9187;
prometheus_port = 9090; prometheus_port = 9090;
alertmanager_port = 9093; alertmanager_port = 9093;
grafana_port = 3000; grafana_port = 3000;
promtail_port = 9080; monitoring_infra_job = {
traefik_monitor_port = 8082; job_name = "monitoring_infra";
forgejo_monitor_port = 3000; static_configs = [
dnsmasq_exporter_port = 9153; {
targets = [
exporters = { "${monitor_hostname}:${toString prometheus_port}"
node = [ "${monitor_hostname}:${toString alertmanager_port}"
"${monitor_ip}:${toString prometheus_exporter_port}" "${monitor_hostname}:${toString grafana_port}"
"${traefik_ip}:${toString prometheus_exporter_port}" ];
"${sandbox_ip}:${toString prometheus_exporter_port}" }
"${forgejo_ip}:${toString prometheus_exporter_port}"
"${runner01_ip}:${toString prometheus_exporter_port}"
]; ];
promtail = [ relabel_configs = instance_relabel_config;
"${monitor_ip}:${toString promtail_port}"
"${traefik_ip}:${toString promtail_port}"
"${sandbox_ip}:${toString promtail_port}"
"${forgejo_ip}:${toString promtail_port}"
"${runner01_ip}:${toString promtail_port}"
];
grafana = [ "${monitor_ip}:${toString grafana_port}" ];
prometheus = [ "${monitor_ip}:${toString prometheus_port}" ];
alertmanager = [ "${monitor_ip}:${toString alertmanager_port}" ];
traefik = [ "${traefik_ip}:${toString traefik_monitor_port}" ];
gitea = [ "${forgejo_ip}:${toString forgejo_monitor_port}" ];
postgres = [ "${forgejo_ip}:${toString postgres_exporter_port}" ];
dnsmasq = [ "${dnsmasq_ip}:${toString dnsmasq_exporter_port}" ];
}; };
traefik_monitor_port = 8082;
traefik_job = {
job_name = "traefik";
static_configs = [{targets = ["${traefik_hostname}:${toString traefik_monitor_port}"];}];
relabel_configs = instance_relabel_config;
};
forgejo_monitor_port = 3000;
forgejo_job = {
job_name = "forgejo";
static_configs = [{targets = ["${forgejo_hostname}:${toString forgejo_monitor_port}"];}];
relabel_configs = instance_relabel_config;
};
postgres_exporter_port = 9187;
postgres_job = {
job_name = "postgres";
static_configs = [{targets = ["${forgejo_hostname}:${toString postgres_exporter_port}"];}];
relabel_configs = instance_relabel_config;
};
dnsmasq_exporter_port = 9153;
dnsmasq_job = {
job_name = "dnsmasq";
static_configs = [{targets = ["${dnsmasq_hostname}:${toString dnsmasq_exporter_port}"];}];
relabel_configs = instance_relabel_config;
};
# --- Media Stack Scrape Job ---
media_stack_job = {
job_name = "media_stack";
static_configs = [
{
targets = [
"${media_hostname}:9707" # sonarr
"${media_hostname}:9708" # readarr
"${media_hostname}:9709" # radarr
"${media_hostname}:9710" # prowlarr
"${media_hostname}:9711" # lidarr
"${media_hostname}:9712" # bazarr
];
}
];
relabel_configs = instance_relabel_config;
};
jellyfin_port = 8096;
jellyfin_exporter_port = 9594;
jellyfin_job = {
job_name = "jellyfin";
static_configs = [
{
targets = [
"${media_hostname}:${toString jellyfin_port}"
"${monitor_hostname}:${toString jellyfin_exporter_port}"
];
}
];
relabel_configs = instance_relabel_config;
};
in { in {
networking.firewall.allowedTCPPorts = [ 9090 ]; networking.firewall.allowedTCPPorts = [9090];
services.prometheus = { services.prometheus = {
enable = true; enable = true;
@ -61,10 +147,17 @@ in {
"--web.enable-admin-api" "--web.enable-admin-api"
]; ];
scrapeConfigs = lib.mapAttrsToList (job_name: targets: { scrapeConfigs = [
inherit job_name; node_exporter_job
static_configs = [ { inherit targets; } ]; promtail_job
}) exporters; monitoring_infra_job
traefik_job
forgejo_job
postgres_job
dnsmasq_job
media_stack_job
jellyfin_job
];
# 🔔 Alerts provisioning # 🔔 Alerts provisioning
ruleFiles = [ ruleFiles = [

View file

@ -1,11 +0,0 @@
# /etc/grafana/provisioning/notifiers/contact-points.yml
apiVersion: 1
contactPoints:
- orgId: 1
name: telegram
type: telegram
settings:
bottoken: "__YOUR_BOT_TOKEN__"
chatid: "__YOUR_CHAT_ID__"
disableResolveMessage: false

View file

@ -1,7 +1,18 @@
{ config, lib, ... }:
{ {
config,
lib,
...
}: {
sops.secrets."telegram-alert-bot-token" = { sops.secrets."telegram-alert-bot-token" = {
sopsFile = ../../secrets/telegram/secrets.yml; sopsFile = ../../secrets/telegram/secrets.yml;
owner = "prometheus"; mode = "0440";
};
sops.secrets."influxdb-password" = {
sopsFile = ../../secrets/influxdb/secrets.yml;
owner = "influxdb2";
};
sops.secrets."influxdb-token" = {
sopsFile = ../../secrets/influxdb/secrets.yml;
owner = "influxdb2";
}; };
} }

View file

@ -0,0 +1,3 @@
{
virtualisation.kvmgt.enable = true;
}

View file

@ -0,0 +1,9 @@
{
imports = [
../../templates/base.nix
../../secrets/shared-sops.nix
./networking.nix
./runner-user.nix
./builder.nix
];
}

View file

@ -0,0 +1,9 @@
{
config,
lib,
pkgs,
runnerId,
...
}: {
networking.hostName = "nixos-builder";
}

View file

@ -0,0 +1,19 @@
{
config,
lib,
pkgs,
...
}: {
users.users.runner = {
isNormalUser = true;
description = "forgejo-runner";
extraGroups = [
"wheel"
];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGlzZWik5bbH6/xjiCpwo1SQSJ/J/Cv7y4ZQ45P68GLB forgejo-runner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air"
];
};
}

View file

@ -1,10 +1,15 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
config,
pkgs,
modulesPath,
lib,
...
}: {
imports = [ imports = [
../../templates/base.nix ../../templates/base.nix
./networking.nix ./networking.nix
./storage.nix ./storage.nix
./sandbox.nix ./sandbox.nix
./warpgate.nix
]; ];
} }

View file

@ -1,11 +1,11 @@
{ {
boot.supportedFilesystems = [ "nfs" ]; boot.supportedFilesystems = ["nfs"];
services.rpcbind.enable = true; services.rpcbind.enable = true;
fileSystems."/mnt/nas" = { # fileSystems."/mnt/nas" = {
device = "192.168.1.226:/volume1/docker"; # device = "192.168.1.226:/volume1/docker";
fsType = "nfs"; # fsType = "nfs";
options = [ "noatime" "vers=4" "rsize=8192" "wsize=8192" ]; # options = [ "noatime" "vers=4" "rsize=8192" "wsize=8192" ];
}; # };
} }

View file

@ -0,0 +1,35 @@
{
virtualisation = {
containers.enable = true;
oci-containers.backend = "podman";
podman = {
enable = true;
# Create a `docker` alias for podman, to use it as a drop-in replacement
dockerCompat = true;
# Required for containers under podman-compose to be able to talk to each other.
defaultNetwork.settings.dns_enabled = true;
};
};
virtualisation.oci-containers.containers = {
warpgate = {
image = "ghcr.io/warp-tech/warpgate";
ports = [
"2222:2222"
"8888:8888"
];
volumes = [
"/srv/warpgate/data:/data"
];
};
};
systemd.tmpfiles.rules = [
"d /srv/warpgate 0755 root root -"
"d /srv/warpgate/data 0755 root root -"
];
networking.firewall.allowedTCPPorts = [8888];
}

View file

@ -0,0 +1,24 @@
{
keycloak = {
rule = "Host(`keycloak.procopius.dk`)";
service = "keycloak";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
oauth2proxy = {
rule = "Host(`radarr.procopius.dk`) && PathPrefix(`/oauth2/`)";
service = "oauth2proxy";
entryPoints = ["websecure"];
middlewares = ["auth-headers"];
tls.certResolver = "letsencrypt";
};
oauth2route = {
rule = "Host(`oauth.procopius.dk`)";
service = "oauth2proxy";
entryPoints = ["websecure"];
middlewares = ["auth-headers"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,5 @@
{
authentik.loadBalancer.servers = [{url = "http://authentik.lab:9000";}];
keycloak.loadBalancer.servers = [{url = "http://keycloak.lab:8080";}];
oauth2proxy.loadBalancer.servers = [{url = "http://localhost:4180";}];
}

View file

@ -0,0 +1,43 @@
{
traefik = {
rule = "Host(`traefik.procopius.dk`)";
service = "traefik";
entryPoints = ["websecure"];
middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
mail-acme = {
rule = "Host(`mail.procopius.dk`) && PathPrefix(`/.well-known/acme-challenge/`)";
service = "mail-acme";
entryPoints = ["web"];
priority = 1000;
middlewares = [];
};
forgejo = {
rule = "Host(`git.procopius.dk`)";
service = "forgejo";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
proxmox = {
rule = "Host(`proxmox.procopius.dk`)";
service = "proxmox";
entryPoints = ["websecure"];
middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
nas = {
rule = "Host(`nas.procopius.dk`)";
service = "nas";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
catchAll = {
rule = "HostRegexp(`.+`)";
service = "nginx";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,13 @@
{
traefik.loadBalancer.servers = [{url = "http://localhost:8080";}];
mail-acme.loadBalancer.servers = [{url = "http://mail.lab:80";}];
forgejo.loadBalancer.servers = [{url = "http://forgejo.lab:3000";}];
proxmox.loadBalancer.servers = [{url = "https://192.168.1.205:8006";}];
proxmox.loadBalancer.serversTransport = "insecureTransport";
nas.loadBalancer.servers = [{url = "https://192.168.1.226:5001";}];
nas.loadBalancer.serversTransport = "insecureTransport";
nginx.loadBalancer.servers = [{url = "https://192.168.1.226:4433";}];
nginx.loadBalancer.serversTransport = "insecureTransport";
}

View file

@ -0,0 +1,35 @@
{
jellyfin = {
rule = "Host(`jellyfin.procopius.dk`)";
service = "jellyfin";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
radarr = {
rule = "Host(`radarr.procopius.dk`)";
service = "radarr";
entryPoints = ["websecure"];
middlewares = [
"oauth-auth"
"restrict-admin"
];
tls.certResolver = "letsencrypt";
};
sonarr = {
rule = "Host(`sonarr.procopius.dk`)";
service = "sonarr";
entryPoints = ["websecure"];
middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
jellyseerr = {
rule = "Host(`jellyseerr.procopius.dk`)";
service = "jellyseerr";
entryPoints = ["websecure"];
# middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,6 @@
{
jellyfin.loadBalancer.servers = [{url = "http://media.lab:8096";}];
radarr.loadBalancer.servers = [{url = "http://media.lab:7878";}];
sonarr.loadBalancer.servers = [{url = "http://media.lab:8989";}];
jellyseerr.loadBalancer.servers = [{url = "http://media.lab:5055";}];
}

View file

@ -1,10 +1,43 @@
{ lib, config, ... }:
let let
internalNetwork = "192.168.1.0/24"; internalNetwork = "192.168.1.0/24";
in in {
{
internal-whitelist = { internal-whitelist = {
ipWhiteList.sourceRange = [ internalNetwork ]; ipWhiteList.sourceRange = [internalNetwork];
};
auth-headers = {
headers = {
sslRedirect = true;
stsSeconds = 315360000;
browserXssFilter = true;
contentTypeNosniff = true;
forceSTSHeader = true;
sslHost = "procopius.dk";
stsIncludeSubdomains = true;
stsPreload = true;
frameDeny = true;
};
};
oauth-auth = {
forwardAuth = {
address = "http://localhost:4180/";
trustForwardHeader = true;
authResponseHeaders = [
"Authorization"
"X-Auth-Request-Access-Token"
"X-Auth-Request-User"
"X-Auth-Request-Email"
"X-Auth-Request-Preferred-Username" # Recommended
"X-Auth-Request-Access-Token" # If you want to pass the token
"X-Auth-Request-Groups" # If you configured a mapper in Keycloak to emit groups
];
};
};
restrict-admin = {
forwardAuth = {
address = "http://localhost:4180/oauth2/auth?allowed_groups=role:admin";
};
}; };
} }

View file

@ -0,0 +1,8 @@
{
mesterjakob = {
rule = "Host(`mester.jakobblum.dk`)";
service = "mesterjakob";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,3 @@
{
mesterjakob.loadBalancer.servers = [{url = "http://192.168.1.226:4200";}];
}

View file

@ -0,0 +1,28 @@
{
prometheus = {
rule = "Host(`prometheus.procopius.dk`)";
service = "prometheus";
entryPoints = ["websecure"];
middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
grafana = {
rule = "Host(`grafana.procopius.dk`)";
service = "grafana";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
alertmanager = {
rule = "Host(`alertmanager.procopius.dk`)";
service = "alertmanager";
entryPoints = ["websecure"];
middlewares = ["oauth-auth"];
tls.certResolver = "letsencrypt";
};
umami = {
rule = "Host(`umami.procopius.dk`)";
service = "umami";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,6 @@
{
prometheus.loadBalancer.servers = [{url = "http://monitor.lab:9090";}];
grafana.loadBalancer.servers = [{url = "http://monitor.lab:3000";}];
alertmanager.loadBalancer.servers = [{url = "http://monitor.lab:9093";}];
umami.loadBalancer.servers = [{url = "http://192.168.1.226:3333";}];
}

View file

@ -0,0 +1,35 @@
{
ente = {
rule = "Host(`ente.procopius.dk`)";
service = "ente";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
photos = {
rule = "Host(`photos.procopius.dk`)";
service = "photos";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
account = {
rule = "Host(`account.procopius.dk`)";
service = "account";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
minio = {
rule = "Host(`minio.procopius.dk`)";
service = "minio";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
minio-api = {
rule = "Host(`minio-api.procopius.dk`)";
service = "minio-api";
entryPoints = ["websecure"];
tls.certResolver = "letsencrypt";
};
}

View file

@ -0,0 +1,7 @@
{
ente.loadBalancer.servers = [{url = "http://192.168.1.226:8087";}];
photos.loadBalancer.servers = [{url = "http://192.168.1.226:3000";}];
account.loadBalancer.servers = [{url = "http://192.168.1.226:3001";}];
minio.loadBalancer.servers = [{url = "http://192.168.1.226:3201";}];
minio-api.loadBalancer.servers = [{url = "http://192.168.1.226:3200";}];
}

View file

@ -1,140 +0,0 @@
{ lib, config, ... }:
{
traefik = {
rule = "Host(`traefik.procopius.dk`)";
service = "traefik";
entryPoints = [ "websecure" ];
middlewares = [ "internal-whitelist" ];
tls = { certResolver = "letsencrypt"; };
};
proxmox = {
rule = "Host(`proxmox.procopius.dk`)";
service = "proxmox";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
forgejo = {
rule = "Host(`git.procopius.dk`)";
service = "forgejo";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
prometheus = {
rule = "Host(`prometheus.procopius.dk`)";
service = "prometheus";
entryPoints = [ "websecure" ];
middlewares = [ "internal-whitelist" ];
tls = { certResolver = "letsencrypt"; };
};
grafana = {
rule = "Host(`grafana.procopius.dk`)";
service = "grafana";
entryPoints = [ "websecure" ];
middlewares = [ "internal-whitelist" ];
tls = { certResolver = "letsencrypt"; };
};
alertmanager = {
rule = "Host(`alertmanager.procopius.dk`)";
service = "alertmanager";
entryPoints = [ "websecure" ];
middlewares = [ "internal-whitelist" ];
tls = { certResolver = "letsencrypt"; };
};
jellyfin = {
rule = "Host(`jellyfin.procopius.dk`)";
service = "jellyfin";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
sonarr = {
rule = "Host(`sonarr.procopius.dk`)";
service = "sonarr";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
radarr = {
rule = "Host(`radarr.procopius.dk`)";
service = "radarr";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
ente = {
rule = "Host(`ente.procopius.dk`)";
service = "ente";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
photos = {
rule = "Host(`photos.procopius.dk`)";
service = "photos";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
minio = {
rule = "Host(`minio.procopius.dk`)";
service = "minio";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
minio-api = {
rule = "Host(`minio-api.procopius.dk`)";
service = "minio-api";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
account = {
rule = "Host(`account.procopius.dk`)";
service = "account";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
auth = {
rule = "Host(`auth.procopius.dk`)";
service = "auth";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
nas = {
rule = "Host(`nas.procopius.dk`)";
service = "nas";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
umami = {
rule = "Host(`umami.procopius.dk`)";
service = "umami";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
mesterjakob = {
rule = "Host(`mester.jakobblum.dk`)";
service = "mesterjakob";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
catchAll = {
rule = "HostRegexp(`.+`)";
service = "nginx";
entryPoints = [ "websecure" ];
tls = { certResolver = "letsencrypt"; };
};
}

View file

@ -1,38 +0,0 @@
{ lib, config, ... }:
{
proxmox.loadBalancer.servers = [ { url = "https://192.168.1.205:8006"; } ];
proxmox.loadBalancer.serversTransport = "insecureTransport";
traefik.loadBalancer.servers = [ { url = "http://localhost:8080"; } ];
forgejo.loadBalancer.servers = [ { url = "http://forgejo.lab:3000"; } ];
nginx.loadBalancer.servers = [ { url = "https://192.168.1.226:4433"; } ];
nginx.loadBalancer.serversTransport = "insecureTransport";
prometheus.loadBalancer.servers = [ { url = "http://monitor.lab:9090"; } ];
grafana.loadBalancer.servers = [ { url = "http://monitor.lab:3000"; } ];
alertmanager.loadBalancer.servers = [ { url = "http://monitor.lab:9093"; } ];
# from nginx
account.loadBalancer.servers = [ { url = "http://192.168.1.226:3001"; } ];
auth.loadBalancer.servers = [ { url = "http://192.168.1.226:3005"; } ];
ente.loadBalancer.servers = [ { url = "http://192.168.1.226:8087"; } ];
photos.loadBalancer.servers = [ { url = "http://192.168.1.226:3000"; } ];
minio.loadBalancer.servers = [ { url = "http://192.168.1.226:3201"; } ];
minio-api.loadBalancer.servers = [ { url = "http://192.168.1.226:3200"; } ];
nas.loadBalancer.servers = [ { url = "https://192.168.1.226:5001"; } ];
nas.loadBalancer.serversTransport = "insecureTransport";
jellyfin.loadBalancer.servers = [ { url = "http://192.168.1.226:8096"; } ];
radarr.loadBalancer.servers = [ { url = "http://192.168.1.226:7878"; } ];
sonarr.loadBalancer.servers = [ { url = "http://192.168.1.226:8989"; } ];
umami.loadBalancer.servers = [ { url = "http://192.168.1.226:3333"; } ];
mesterjakob.loadBalancer.servers = [ { url = "http://192.168.1.226:4200"; } ];
}

View file

@ -1,11 +1,11 @@
{ lib, config, ... }:
{ {
entryPoints = { entryPoints = {
web = { web = {
address = ":80"; address = ":80";
asDefault = true; asDefault = true;
allowACMEByPass = true;
http.redirections.entrypoint = { http.redirections.entrypoint = {
priority = 10;
to = "websecure"; to = "websecure";
scheme = "https"; scheme = "https";
}; };
@ -21,6 +21,8 @@
}; };
}; };
providers.file.watch = true;
api = { api = {
dashboard = true; dashboard = true;
insecure = true; insecure = true;
@ -37,7 +39,7 @@
dnsChallenge = { dnsChallenge = {
provider = "cloudflare"; provider = "cloudflare";
delayBeforeCheck = 10; delayBeforeCheck = 10;
resolvers = [ "1.1.1.1:53" "8.8.8.8:53" ]; resolvers = ["1.1.1.1:53" "8.8.8.8:53"];
}; };
}; };
}; };

View file

@ -1,10 +1,16 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
config,
pkgs,
modulesPath,
lib,
...
}: {
imports = [ imports = [
../../templates/base.nix ../../templates/base.nix
./networking.nix ./networking.nix
./traefik.nix ./traefik.nix
./promtail.nix ./promtail.nix
./sops.nix
./oauth2proxy.nix
]; ];
} }

View file

@ -0,0 +1,76 @@
# /etc/nixos/configuration.nix
{
config,
lib,
pkgs,
...
}: let
oauth2ProxyKeyFile = config.sops.secrets."oauth2-proxy-env".path;
in {
services.oauth2-proxy = {
enable = true;
package = pkgs.oauth2-proxy;
keyFile = oauth2ProxyKeyFile;
provider = "keycloak-oidc"; # Use "oidc" for standard OIDC providers like Keycloak
oidcIssuerUrl = "https://keycloak.procopius.dk/realms/homelab";
clientID = "oauth2-proxy"; # Matches the client ID in Keycloak
# Public URL for oauth2-proxy itself, where Keycloak redirects back to
redirectURL = "https://oauth.procopius.dk/oauth2/callback";
upstream = ["static://202"];
extraConfig = {
code-challenge-method = "S256";
# email-domain = "*";
auth-logging = true;
request-logging = true;
whitelist-domain = ".procopius.dk";
pass-host-header = true;
skip-provider-button = true;
};
# Cookie configuration
cookie = {
name = "_oauth2_proxy_homelab";
domain = ".procopius.dk";
secure = true;
httpOnly = true;
expire = "24h";
refresh = "1h";
};
# Listen address for oauth2-proxy internally. Traefik will forward to this.
httpAddress = "http://127.0.0.1:4180"; # Ensure this port is not blocked by your firewall internally
# Reverse proxy settings for headers
reverseProxy = true; # Set to true because it's behind Traefik
# Headers to set for the upstream applications after successful authentication
setXauthrequest = true; # Set X-Auth-Request-User, X-Auth-Request-Email etc.
passBasicAuth = true; # Pass HTTP Basic Auth headers
passHostHeader = true; # Pass the original Host header to the upstream
# Authorization rules for who can access
# You can restrict by email domain (allows everyone from that domain)
email.domains = ["*"]; # Allows any authenticated user from Keycloak
# Or restrict by specific email addresses (if you want tighter control):
# email.addresses = allowedOauth2ProxyEmails;
# Logging
requestLogging = true;
# Optional: If you use specific scopes for Keycloak (e.g., if you want groups claim)
# scope = "openid profile email";
# If you specifically added a 'groups' claim in Keycloak:
scope = "openid profile email";
# You can add extra command-line flags here if needed, e.g., for debug logging
# extraConfig = {
#
# };
};
# Expose the internal port for oauth2-proxy if needed for debugging or direct access (less common)
networking.firewall.allowedTCPPorts = [4180];
}

View file

@ -1,7 +1,9 @@
{ config, lib, pkgs, ... }:
{ {
config,
lib,
pkgs,
...
}: {
# This ensures the directory exists at boot, owned by traefik (writer) and readable by promtail. # This ensures the directory exists at boot, owned by traefik (writer) and readable by promtail.
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /var/log/traefik 0755 traefik promtail -" "d /var/log/traefik 0755 traefik promtail -"
@ -12,9 +14,9 @@
job_name = "traefik"; job_name = "traefik";
static_configs = [ static_configs = [
{ {
targets = [ "localhost" ]; targets = ["localhost"];
labels = { labels = {
job = "/var/log/traefik/*.log"; job = "traefik";
host = config.networking.hostName; host = config.networking.hostName;
env = "proxmox"; env = "proxmox";
instance = "${config.networking.hostName}.lab"; # prometheus scrape target instance = "${config.networking.hostName}.lab"; # prometheus scrape target

View file

@ -0,0 +1,10 @@
{
sops.secrets."traefik-env" = {
sopsFile = ../../secrets/traefik/secrets.yml;
mode = "0440";
};
sops.secrets."oauth2-proxy-env" = {
sopsFile = ../../secrets/traefik/secrets.yml;
mode = "0440";
};
}

View file

@ -1,23 +1,59 @@
{ config, lib, pkgs, ... }:
let
staticConfig = import ./configuration/static.nix { inherit lib config; };
middlewaresConfig = import ./configuration/middlewares.nix { inherit lib config; };
routersConfig = import ./configuration/routers.nix { inherit lib config; };
servicesConfig = import ./configuration/services.nix { inherit lib config; };
in
{ {
config,
lib,
...
}: let
# Import router and service declarations grouped in files
infraRouters = import ./configuration/infra/routers.nix;
infraServices = import ./configuration/infra/services.nix;
monitoringRouters = import ./configuration/monitoring/routers.nix;
monitoringServices = import ./configuration/monitoring/services.nix;
mediaRouters = import ./configuration/media-center/routers.nix;
mediaServices = import ./configuration/media-center/services.nix;
photosRouters = import ./configuration/photos/routers.nix;
photosServices = import ./configuration/photos/services.nix;
authRouters = import ./configuration/auth/routers.nix;
authServices = import ./configuration/auth/services.nix;
miscRouters = import ./configuration/misc/routers.nix;
miscServices = import ./configuration/misc/services.nix;
middlewares = import ./configuration/middlewares.nix;
staticConfig = import ./configuration/static.nix;
# Combine all routers and services from groups
allRouters = lib.foldl' (acc: routers: acc // routers) {} [
infraRouters
monitoringRouters
mediaRouters
photosRouters
authRouters
miscRouters
];
allServices = lib.foldl' (acc: services: acc // services) {} [
infraServices
monitoringServices
mediaServices
photosServices
authServices
miscServices
];
in {
services.traefik = { services.traefik = {
enable = true; enable = true;
environmentFiles = [config.sops.secrets."traefik-env".path];
# ==== Static Configuration ====
staticConfigOptions = staticConfig; staticConfigOptions = staticConfig;
# ==== Dynamic Configuration ====
dynamicConfigOptions.http = { dynamicConfigOptions.http = {
routers = routersConfig; routers = allRouters;
services = servicesConfig; services = allServices;
middlewares = middlewaresConfig; middlewares = middlewares;
serversTransports = { serversTransports = {
insecureTransport = { insecureTransport = {
@ -26,11 +62,4 @@ in
}; };
}; };
}; };
systemd.services.traefik.serviceConfig.Environment = [
"CLOUDFLARE_DNS_API_TOKEN=gQYyG6cRw-emp_qpsUj9TrkYgoVC1v9UUtv94ozA"
"CLOUDFLARE_ZONE_API_TOKEN=gQYyG6cRw-emp_qpsUj9TrkYgoVC1v9UUtv94ozA"
];
virtualisation.docker.enable = true;
} }

View file

View file

@ -0,0 +1,14 @@
{
virtualisation.oci-containers.containers = {
warpgate = {
image = "ghcr.io/warp-tech/warpgate";
ports = [
"2222:2222"
"8888:8888"
];
volumes = [
"/srv/warpgate/data:/data"
];
};
};
}

View file

@ -1,77 +0,0 @@
{ config, pkgs, lib, ... }:
let
# ── Adjust these to your NAS settings ──────────────────────────────────────────
nasServer = "192.168.1.100"; # your NAS IP or hostname
nasExportPath = "/export/docker-volumes"; # path on the NAS
nasMountPoint = "/mnt/nas"; # where to mount inside VM
# ── Where we drop your Compose file and run it ────────────────────────────────
composeDir = "/etc/docker-compose-app";
composeText = lib.readFile ./docker-compose.yml;
in {
##############################################################################
# A) NETWORKING
# (If you want DHCP, remove this block and let cloud-init assign an IP.)
##############################################################################
# networking.interfaces.enp0s25 = {
# useDHCP = false;
# ipv4.addresses = [{
# address = "192.168.1.50";
# prefixLength = 24;
# }];
# ipv4.gateway = "192.168.1.1";
# # optional: ipv4.dns = [ "1.1.1.1" "8.8.8.8" ];
# };
##############################################################################
# B) MOUNT YOUR NAS VIA NFS
##############################################################################
# fileSystems."${nasMountPoint}" = {
# device = "${nasServer}:${nasExportPath}";
# fsType = "nfs";
# options = [ "defaults" "nofail" "x-systemd.requires=network-online.target" ];
# };
# fileSystems."${nasMountPoint}".requiredForBoot = false;
##############################################################################
# C) INSTALL DOCKER & DOCKER-COMPOSE
##############################################################################
environment.systemPackages = with pkgs; [
docker
docker-compose
];
services.docker.enable = true;
##############################################################################
# D) DROP IN YOUR docker-compose.yml
##############################################################################
# systemd.tmpfiles.rules = [
# # Ensure directory exists before we write the file.
# "D! ${composeDir} 0755 root root - -"
# ];
# environment.etc."docker-compose-app/docker-compose.yml".text = composeText;
##############################################################################
# E) RUN DOCKER-COMPOSE AS A SYSTEMD SERVICE
##############################################################################
# systemd.services.dockerComposeApp = {
# description = "Auto-start Docker-Compose stack for home server";
# after = [ "network-online.target" "docker.service" ];
# wants = [ "network-online.target" "docker.service" ];
# serviceConfig = {
# WorkingDirectory = composeDir;
# ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f ${composeDir}/docker-compose.yml up";
# ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f ${composeDir}/docker-compose.yml down";
# Restart = "always";
# RestartSec = 10;
# };
# wantedBy = [ "multi-user.target" ];
# };
}

View file

@ -1,11 +0,0 @@
{
config,
pkgs,
inputs,
...
}: {
virtualisation.docker = {
enable = true;
enableOnBoot = false;
};
}

View file

@ -1,54 +0,0 @@
{ config, pkgs, ... }:
let
# (Optional) name your Compose apps directory on the VM:
composeDir = "/etc/docker-compose-app";
in {
# 1) Install Docker engine and DockerCompose binary:
environment.systemPackages = with pkgs; [
docker
docker-compose # pulls in the python-based compose
];
# 2) Enable the Docker daemon:
services.docker.enable = true;
# 3) Create a directory for your Compose file and copy it from the flake:
# If your flake repo has a sibling file `docker-compose.yml`, this will drop
# it into /etc/docker-compose-app/docker-compose.yml on the VM.
environment.etc."docker-compose-app/docker-compose.yml".text = builtins.readFile ./docker-compose.yml;
# 4) Make sure that directory exists with the right permissions:
systemd.tmpfiles.rules = [
# D = create directory if missing, mode 0755, owner root:root
"D! /etc/docker-compose-app 0755 root root - -"
];
# 5) Define a systemd service to run `docker-compose up`:
systemd.services.dockerComposeApp = {
description = "docker-compose stack for my application";
after = [ "network-online.target" "docker.service" ];
wants = [ "network-online.target" "docker.service" ];
serviceConfig = {
# Run in foreground but let systemd restart if it crashes
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f ${composeDir}/docker-compose.yml up";
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f ${composeDir}/docker-compose.yml down";
WorkingDirectory = composeDir;
Restart = "always";
RestartSec = 10;
};
# Make sure the directory exists before this service starts:
preStart = ''
mkdir -p ${composeDir}
chown root:root ${composeDir}
'';
wantedBy = [ "multi-user.target" ];
};
# 6) (Optional) If any volumes need to exist, define them here, for example:
# environment.etc."docker-compose-app/data".source = "/path/to/local/data";
}

View file

@ -1,9 +1,11 @@
{ config, pkgs, ... }:
let
promtail_port = 9080;
in
{ {
networking.firewall.allowedTCPPorts = [ promtail_port ]; config,
pkgs,
...
}: let
promtail_port = 9080;
in {
networking.firewall.allowedTCPPorts = [promtail_port];
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d /var/lib/promtail 0755 promtail promtail -" "d /var/lib/promtail 0755 promtail promtail -"
@ -19,26 +21,60 @@ in
positions = { positions = {
filename = "/var/lib/promtail/positions.yaml"; filename = "/var/lib/promtail/positions.yaml";
}; };
clients = [{ clients = [
url = "http://monitor.lab:3100/loki/api/v1/push"; {
}]; url = "http://monitor.lab:3100/loki/api/v1/push";
scrape_configs = [{ }
job_name = "journal"; ];
journal = { scrape_configs = [
path = "/var/log/journal"; {
labels = { job_name = "journal";
job = "promtail"; journal = {
host = config.networking.hostName; path = "/var/log/journal";
env = "proxmox"; labels = {
instance = "${config.networking.hostName}.lab"; job = "promtail";
host = config.networking.hostName;
env = "proxmox";
instance = "${config.networking.hostName}.lab";
};
}; };
}; relabel_configs = [
relabel_configs = [{ {
source_labels = ["__journal__systemd_unit"]; source_labels = ["__journal__systemd_unit"];
target_label = "unit"; target_label = "unit";
}]; }
{
}]; source_labels = ["__journal__hostname"];
target_label = "host";
}
{
source_labels = ["__journal__systemd_user_unit"];
target_label = "user_unit";
}
{
source_labels = ["__journal__transport"];
target_label = "transport";
}
{
source_labels = ["__journal_priority_keyword"];
target_label = "severity";
}
];
}
# {
# job_name = "secure";
# static_configs = {
# targets = ["localhost"];
# labels = {
# job = "secure";
# host = config.networking.hostName;
# env = "proxmox";
# instance = "${config.networking.hostName}.lab";
# __path__ = "/var/log/secure";
# };
# };
# }
];
}; };
}; };
} }

View file

@ -23,11 +23,11 @@ secrets/
### ✅ Encrypt a **new secret file** ### ✅ Encrypt a **new secret file**
```bash ```bash
sops --age <YOUR-AGE-PUBKEY> -e > secrets/myservice/secrets.yaml sops --age <YOUR-AGE-PUBKEY> secrets/myservice/secrets.yml
```` ````
Example: Example:
```bash ```bash
sops --age $(cat ~/.config/sops/age/keys.txt | grep public) -e > secrets/forgejo/secrets.yaml sops --age $(cat ~/.config/sops/age/keys.txt | grep public) -e > secrets/forgejo/secrets.yml
``` ```
> Press `i` to enter edit mode if prompted, or fill it using YAML format: > Press `i` to enter edit mode if prompted, or fill it using YAML format:
```yaml ```yaml
@ -40,7 +40,7 @@ db-password: supersecret
### ✏️ Edit secrets in an existing file ### ✏️ Edit secrets in an existing file
```bash ```bash
sops secrets/forgejo/secrets.yaml sops secrets/forgejo/secrets.yml
``` ```
--- ---

View file

@ -0,0 +1,17 @@
influxdb-password: ENC[AES256_GCM,data:dpU2unbGc0wLOwaOEbi572+gazA=,iv:wHp62USQS64nMbJm4kmJzD3kqyZlyhHKLH7kj1HDFxI=,tag:v+EeKBeh5OlxYJSP9VFsEw==,type:str]
influxdb-token: ENC[AES256_GCM,data:72bONbuZOBjwMqdDRRBHD9aQZBs=,iv:tTOYz+8c5Dm6bU7ADcYNI1o2m+o5LoVHl+PymBItV2I=,tag:jOY9PkeXGdlUN1OVI64rhg==,type:str]
sops:
age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA2SkNlWmxCYzNmd3lJUE04
d1NDbEltSVdXWW1PWHROSVU1Y0JMaHZHOWhrCm5DS1hQNTFqMnBUTUtDQXhuclNN
eGFFcWRJYXkrRTk2S1h6SDFxdnVDOEUKLS0tIDQ3MWszUlVUT3FzZkFzNkoxenBJ
aEdYYW1GOW5XL2VERGRlU3F6dWlXWG8KBtb7wu2RF6LVfMHG194CvQGoA0+9D9q8
hWw0iZlsx7AiSxTSWsBDtYLnTO4lEveBmkXegVT+nqwVLUM/I0dWPw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-15T17:41:53Z"
mac: ENC[AES256_GCM,data:U+I/dbYz+TOQ7wVo5+IFOGW/20B2UoT5vp+Iq0QLzUjLLGoBgntCK90p7kZHz4tQRl2IRADZIx5cOu88dTdl8FPWmd80iowmzg+ISvaHqiXLanaUEFYSeqeRISUPtLJf37QJnsGmJbbcbbB9gn1TpAvdcnJZG+DVIOlDyId6tgc=,iv:IkMU2sqrMzIt7A05zx45LSFiRaT2hPJ1nY+uAJNifuE=,tag:JwRjNKiM1zz1xoEJ+GgdlA==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2

View file

@ -0,0 +1,17 @@
keycloak_psql_pass: ENC[AES256_GCM,data:5qdsIrT74eME6DS1EyWmq20LJcZBz+OK1/TeNwD0,iv:srgAANq6e97jN0bFRwtQIg+KDDBsydpiBogNT2Qi5p0=,tag:iighlJLzqPXeK7kBTdoBQA==,type:str]
keycloak_admin_pass: ENC[AES256_GCM,data:Yp5yRGg/gR+jnerA5bXIFCbJ+0jRUqevKGB4tTlA,iv:Oa8rLcszJ9ijoRo9PDMdf/X+uaPnqF/I4uSZlW/8OmY=,tag:BmVQGV9y3K38n1WB4zCV7w==,type:str]
sops:
age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAvQ1gzQXdlK09LWGMyYVRo
bkNQaGgxcU1tcHJ5WUsxbnFjMlRDbE9YcW1JCjV3ZjNOb2srNm9GeHdsSVhoZDhW
N2RFbVdsQ0E2a3d6WjdacXBjVEpxWmcKLS0tIG80TzYvZXBJUC9wM2s3K3Yvampy
aG9xNDF5MERzTE11cGxaQkZFL0N5eGcKXTptz4nnwCkcCDnsM+41vbF0NyKm+S1F
AESwGFiVC67sqxo+pfK/SxgeUg0tbuL0Lh4Sgp2X7kFOuFOLBzDxfA==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-11T16:23:10Z"
mac: ENC[AES256_GCM,data:B0U5kmRutk4xE6+hX6eLu8Q5PFcmPC9MhcRaWLFxVr0Ci4M+oZqdpdnfE/u70E/yRRaXzRgAcfQjaz2gSmr51pXv236+OU+cAB4MjDHODZOiAKqJL0cLzkebv+NiN24YA9McWjrgVgRf5fdoD+fyYNZd04N1vOl4IB4pWXcRKNs=,iv:CRrTVfGfuw+Un1K0NkpwOKsNORBulv8Xt/ZfY6BIlRg=,tag:otoIb0P4JVGZ6bDbDfRezA==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2

View file

@ -0,0 +1,16 @@
mailserver-admin-pass: ENC[AES256_GCM,data:scvGZ1qXHK12hTx9rndkBQxYMzWtYII1GKGEu28D05h6LJyuUmObWuj+7o6KItoY8VnwVH59dDcJt9qB,iv:ZzgQTIX5wBKwDUBfCMmg6vBJ9Bq17vha2YRPdBzAdbs=,tag:nduE5Q+21MXg3cylv9NyvA==,type:str]
sops:
age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA1azlSQVV1clVabVcwalVy
MmpKYW9NbVBkU01ITlhrOFRuckFudTQrM1ZNCnVLUmtscFh0MGpaZEIwTDdwNHdm
VXVCaXVaUk9jMU9HUU9GT3R0RnBrdlEKLS0tIGtOK2xjaUVibWwvWHBMckFrYkI3
S2hXYjRvVGFZMHpOSWtJUXZGV2lUWTgKFcrcHpfRy8uDiPHQpoyC8I2UF5fbIpQK
h3AHOtkEUJqWA+RfEXg7XRtcVkyqV+fLZjeRlAKnUFDK3/uIAsby+A==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-12T18:29:27Z"
mac: ENC[AES256_GCM,data:G+EWfClTj7J1IFyss0HWXkrdJZtmzgQqEBlrgM+1KF9GkdXXmTBDjunD6ePicdIrqNvZ+CAXw7a+UekrZtIxU5H24msoixo8nIkM7PA/dHo7a75ANdGvgY4k/Ow5HGnWy4jU5DtkrKCns/hZclLhsNHyrp1dwCECEyx9usJ0hMA=,iv:xcMn382q85IP85p8CEbWWFuKCwYGZZON31m8Eb7XV14=,tag:f/8FjxFggKQKskwDffoNGA==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2

View file

@ -0,0 +1,24 @@
bazarr-api-key: ENC[AES256_GCM,data:QTDhVRLtp4vkaffp0EoiauHX+1a5yb+0crHnkLobCPc=,iv:p/E631awnfUzm3vXeq7JN77uFSmgT2Vf/bhRmEB0Isw=,tag:wt8v1Q6kH+mIVPZwTd3aCw==,type:str]
jellyseerr-api-key: ENC[AES256_GCM,data:0aLv9mPkq/dTKfAjVF7CeYgtI4I9dLQ9hM0nGR2VD7zP12ClRE2XLCPBrCVf/zAxVeLhhr0thp1nE7gT2pK8VCIBVJk=,iv:uiu3UOmUtBEiA0RiphTJyozfWm3J97Mgha76gLaJBPk=,tag:Wl565BonzL97Y6ZE4tWHuA==,type:str]
lidarr-api-key: ENC[AES256_GCM,data:ZUApwd9hXZ65skUV/myiC06SYMVtPCj7QkOgAxmvwiw=,iv:RZEzs1gPswdCpZUigp3Z5DUv/5tgxxSdP2sECRbmqD4=,tag:k6WNyJMvHalEDdP2P2f7uA==,type:str]
prowlarr-api-key: ENC[AES256_GCM,data:N8dqfBCTBXs+nsuJIrYoizyNuoSTCLTl9mkHT9UE5Y0=,iv:r1HSqc2D8st1Txeqqy3n65SXBBE3tJPOWpg2egGsI3c=,tag:CAXYIHM60fSEYczccUyLaw==,type:str]
radarr-api-key: ENC[AES256_GCM,data:BBl0dr1+gUawzzrgBbSNNt2mGJ4/E4lVP+k1gJqJr/Y=,iv:wXcz/KFUyk3ygbIRMzkNWI3ZhUmKrk9SffIiN6Ryifo=,tag:1zp+kqAg0Voi2Vm/P6yXlw==,type:str]
readarr-api-key: ENC[AES256_GCM,data:JzgdgaS5hXpqcR6pnC9a/Xpe6l8mZ02SzORFcs2+ct0=,iv:K1tK8F78WMk9c9x4ItCqbe0SQFH4/KiIJYY5JqJfkL0=,tag:0weodhzGOZSbaJj20Zszxg==,type:str]
sonarr-api-key: ENC[AES256_GCM,data:xTcw8QiL95iTScPyQmIRa6QJx94PLiCoLX2muQQFG+o=,iv:L+WnSc8l7R6BCK4tEyYyEgcmVTlhRCUeyjvVV+Z5xQA=,tag:4paOxP0LkyPl4qfCQLsfOg==,type:str]
jellyfin-exporter-config: ENC[AES256_GCM,data:5uXoVKlLXgoky0cHt+JSo682TJ4vHOjkFx73uvsZamlxgbpe6nlbUDKBtILzYohLKWCHnBsL0AC7r7MhD+AK5HJnkhFsDYqqt5lGETqir88MUstIKxuv/mfvzoMla6SvSwgJZnrfGaBW/72PjJ46vgiEA0NtOK6dOdnjyYXD7GNlgl1fcUjtfKAqcZuf9qea0cK7O+PRrzRfnKn9O9XBe2Tq/cff+7u/jU0ZeGSG60wEV3FBAYrbjQ0Bl/PkV28naWHpt4ThXMf4GAnJgh51Sg+zOAUlmP+8FobCFUAJ9W64jojiN9yezX6rFU+diwRMHxVV2UGSKOgOM4glkFGUHfuQzZoZN4CdOnwpeB3FEqMHu3QLDJcRKuH/gm7zgga698oENCNw367YA22J2mu+gu6+aydB0PmUO2F/F+R2ioaXqYgybB2g/DPQwwtqds6p0mEem83RycLJrKKpowG5DUhUciu/DW/Vs7iTSAZnX2MYD/Govl16rodpDzZRbBdmzpp7EZOTG+EC3ISXdjwtxH7nHqBCno0lZJeENunlFHT4/4rg54e1zLcfAvL6pBTuqAQjf93ZqxC+ylj5eNtmOvzlTmMLqv1iUaEMCYJz3WtM0VQtZyGFNCpTsDOtJMbAgDEg7zSr/LiMtpJ7MyKkrVLv+iEcomhj/6jSyYpJt6jPDEQNs69iwXUmXI4f87iKXxf2ZWZ+4DNCdn3fzVHh18l/Vfp0TBcd/00zlnJ4QKXwMMcojQhAAy/djCGUrmKF7dLmU7Bg6lODsDRoLzE+wNmAEZEqVXHNdu8knq2PKkW1UQtS/1EMndS7B+zY0eV9ZV+sJ3+TjFZSmrx2Eb4iLvT3dTonmdzbfCsl4OuMn6n0N1uRP+tRJEWaHp8g5jqDZSbQMYLjVe3XAyGvlNXmQaVqyME8NhNfT60Af8jHir7PgThwZxxVB+K3uyYy2GY04o9o9WiREVZusau8cUUFRUC6xKsLogB4t1LPyMtfZS7xNsXDHFW2cvQhm5b8IqhfEoMLvwMt7FZm/oZ+HMkGFHMaCi3C1PWU3J6H9Z+fz0TTC2rg9JWr0j/Dk5gUHvTZplgiU4ynbSCATCDSYPrveipqgIcMlfCfs4ztPsObnzKEt+Be1SU9j+8Hq8IpEdhGXQEt7Y0tQcuYoM/YuRs7BCd+ib/KotP1ZWDmL92bqwY4NowH357qBMv28pP2H0ilssZCGeKffutOljzBJntysmmvDnOUj92QBRBopx2zq8avcFBIZqtxJq/1Yk6LAcDZYvkOVzT+shv//UJ8H8tzCr7XQZEbkdDdF00cpvS3tNdMFjho7JPsN49RLZh39/RXnwFkp3noJ2iHp/X1laafSx4UNflR7QqSo59yjdDh9Q66dZBc5JeOCVhKon5Nq2CVQDPT27dlP8lteOnRwuWPE85f1amAnLCNipgFIX5ImobejXSzXv41GzNdcC01xJeGDcOP8HlFCyXOF3UgtBEyq+9PPanhiKUALRlytBBzV6s7klRGJ+qKsJ9yO3Ef62X7AyT6iz4ElXp42esKrJXdRbwTPo9l0KfwxEDli0bYiQSmuqkedMiJtaQc2ISGP8+ZHVNO33h0gPr41dN0/6tiei6NTsOiOTMGxpgIchY4mX+RkwfDcV+OmIXPxflUGf6mb0bsCmzBJfNXWiMh1Gz+xlyxkdTdx33Jr26gFr2rsLzioAU89N7pkVZ3wFbuir80y15LWQ6/oOHz8q/cMBFz55HdgDeHflckg1sx+MF2ECl+spu6ZtaUJANldDEH5qUblDsXlL2JrRW0LebwoRKyJJl0oGqQR2ZtgFjcABjstkJgmdyBnIuyZczqp+YEbGl6uuqpIh+h+8Y9AzZUYSv7Tz7thH4oSkkQfjOruIljqb7xD2y6/g9bJKjnCFfzB5wZHQpHtqeNucgGNORzno6Wo+qDERr/veeIeZSvCw0JnrKw5wf+e7pxIpDkgBnDtXhtwvsqRjywEBYbj2K2ieirB7qcQTz4Q5ZWosDNIZKOBOTx9RKv,iv:bG+NpKvflSZUf02iZ6tIi6jHeFOO4a9YW/BlHfwBuk4=,tag:dVHnB7zy9HXiYlvba6/Fug==,type:str]
nixarr-vpn-conf: ENC[AES256_GCM,data:SGg0p4lEWrqUu3g1hIkYskk970mxx6W6d+tbFC6y4gsvg3O6dmApkm2jDU2BWppkbQ4FQvcq/xz8t6tJhFGUCTJ5OOGJJ6vm+1jLf7FVe+2MPIwLhvr6plfTfkFwOXBb/YUBoxNE5PaiKZG2BTckt/cFkM1QZjTt+IAP+1xHgWCUjk5BPEIvbZp0A95gDmExhPTunZPHH4eoG9aB4SMtmjbzs/F6LiFH/F7S74r6p0y/K5lXWvVrH5t0s+rcuwljoGq6WiiOn6AFLq5NUBoQ0zeW/Cr8pPwcDCeKeBaHiqlVFPCNdXTe/p2Z9g+ZO7qA9EaPS0dhD2ANso9mfiCXFfIFW4ir1+5kOs7ckSXGNSLeWw22CLoumyJ4wb/hOjNC2NrCDG2o986WODmkAjlO83FKlr2Tc45/VRPrslUb/JBzIQqvb6d61Bt6xc78Dh5he7NSsEkHrrheQAyp03+fCnvAIINAd5xtreI=,iv:ep9F4DMjOIxg8cEzehNEFZQy0ybVFkcOHzUcF79OrAE=,tag:vt1HDE7Kdb7QiiyvsY6BYQ==,type:str]
sops:
age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6empmamp2ckVjaVB1RDgx
RHJlWGNvYmg2dzBXVGc5K29OTWVGNk9oNEFVCnhMbk1ib3ZFMFJIN1ZJZUNsR3ky
ZXRrS1pYNzNvRUVzd0x3ZUJ2eis3VW8KLS0tIDlxa3cwMXI0blVFcFlZR0hDUklQ
cnBmWXlhYlNNQmMrQ3ZPelhsdlNnMEkKqCUzSjxZFLKy35B5CcFjkv0jW+EBKGSP
oleRKQSVsyraOVmnZ8UjBIRh8X5jCEWbMQ9VbH6aqsvo0ESk18XXbw==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-18T17:00:52Z"
mac: ENC[AES256_GCM,data:e/BhM6dLTjsmuXXpXGRHVVcKMkGKJeLD78MXFsJ9QcDMXwazImPCuwsIAHBoJU2Krwdp9HWX2wRpNy1r1WIUCCGsnPK5KACyfNCBUlaMQS0XxfwGYiebS36+Si/gQfJ6hW3ue+TSJDH4zJ2+rMptcjCmMtzNbNnmi+Fij6Q1Zk0=,iv:TSFlP7vuBX6ly1V2vsKEf9KQKfZsskPDCswalts1tFA=,tag:1i7DVs3T+8Xpg+cbtIwyzA==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2

View file

@ -10,7 +10,7 @@ sops:
UHl6emN0My8wcFZWYlZEaElrb2NidjgKlZols9SJQxgaoOdJJxghqlACBcwuFs94 UHl6emN0My8wcFZWYlZEaElrb2NidjgKlZols9SJQxgaoOdJJxghqlACBcwuFs94
IGAOoQVUSFhMCWzyXqAQ/1/VkbWqfiUmvqDa3ulEK2Ri+1F+u3mB1Q== IGAOoQVUSFhMCWzyXqAQ/1/VkbWqfiUmvqDa3ulEK2Ri+1F+u3mB1Q==
-----END AGE ENCRYPTED FILE----- -----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-06T21:21:32Z" lastmodified: "2025-06-17T21:24:16Z"
mac: ENC[AES256_GCM,data:YS7BLFXkQ/A5PVLVOyMaqRHGavY0YttFps3njzSiYgBUa4VfPHqMcl2fW5vMec5MwM3GKPFGtrSEZKK1NVqLxUWZrfIF6ugAZ4vhRCyWe1Kze2Zs2S0ia2C3mUdhQR2wb7M7YzohI/e7PDZo0UcrcG3YeEzS5NL7qb0hzFsrGLY=,iv:kqzD06q5X0ZkZ1sIoUQz05b6QRDWQVsPqQYxPP2OAl8=,tag:eexvJspUxpDpwJqU1zEMnA==,type:str] mac: ENC[AES256_GCM,data:t7sdBMtlRupTD2oF3kAlBiWuxuZNMEeLu/xLi5QUmVVdcnb0bednZrXUtRti0mTSqXhwsgEM6j67SVb+vRZh7l1ONixoWrQ+EUcKkCSPcw7fFOwBjCY2jy8ttMWNRHqimM93MmxhX0E5KkoS6ZaB9GDi1vDCI51gCR07y/CBCk8=,iv:A+dG00zh9UkIRxsGm6nYjsZAKfCY6VMTwOqsLoFJLcc=,tag:wB8e4/JfwysRjvRyPuP/Pw==,type:str]
unencrypted_suffix: _unencrypted unencrypted_suffix: _unencrypted
version: 3.10.2 version: 3.10.2

View file

@ -0,0 +1,17 @@
traefik-env: ENC[AES256_GCM,data:FH0VWUoGKtijZpU6tpOLkhc40/KPAbs4rTIBLchOWBF1cnWCMBoR8F1xn6AROUuW6OTtXjBTwvPzM+8qHYOR07kGawMATtRHAhy5Qd0AYObrjZRfw96atpEkC8lb6t3Fwwu+4MekX7IFcHU3hlYoS0RUWkx5FdZGiF4fHxz/0fRYOL8jfw==,iv:9SkU5yZNejjIMBf5KsycYek3z3X4KRN9fFor4oYdGZM=,tag:6U+cT8wxFaqrJbWNKWGcog==,type:str]
oauth2-proxy-env: ENC[AES256_GCM,data:SvjM/SOzuYUms98hBYWUivD87ERd26VQOowmQS1/SZfcwiziaxHx9bzj/i0a+ta5aJ7qLYj6a5+KZwo+W5HzgCNs4dIExJW/7N8c+MB6dk64ILqXwfPt7PuOXA+txJGj0dKHDz1nYJkE/E9U1CpDDr3bzRATSQJoujsTUISHMJUBAo0DUBeM6A==,iv:XRz6hgU67S1HfLWDjpc83nekm3bSQ1MEtnsvyXsONU4=,tag:Up54ZP6b61/js042C2py9w==,type:str]
sops:
age:
- recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPUkJJaXpFdmllZHFjRlgr
OS9XaStIWXhPb3J1bXU4QkQ4cWN4NnBGdVF3CnJkQk5TWHFtOGxnaXJzNlN1dWgv
YjNhQVhvY1l0SE43ZE9NSi9sSCttak0KLS0tIDdZTTIrcGhLMW1aY2tucWhmQkhI
OXhMbmNUbEQ5M251YjBQTmprekZ4U2cK7V4F30TK9fucNhlgyCjsU2mQUVtWWSvu
ZAssFXCXI9mABbyXzf/sDYwdBWuPWkoSdRnScnbKbzPzzniYINGqeg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-06-11T23:36:11Z"
mac: ENC[AES256_GCM,data:lpnAgSEzyPgdfR2IdVHwqK1S6rZ3Bcp5DZwixNdzzGCVUzhTblFIPczN+ogWTx5lXwZf8C7QmbQ9n5lMsh3xURkH7xgZO21BPfU/s9lKsHvE+QwPXrfg7MSe7wr6iuDouXImRsMXbpn36Lo6ocx8OW9vMGjBk2ZBfJa0UxCsl/U=,iv:3HJWy/yh+vUYYl1qEiuDymiFncyDWyD6JZJ44FhCrXA=,tag:XbWhDQZrIaVKlUoxk3lwXg==,type:str]
unencrypted_suffix: _unencrypted
version: 3.10.2

View file

@ -1,11 +1,16 @@
{ config, pkgs, modulesPath, lib, ... }:
{ {
config,
pkgs,
modulesPath,
lib,
...
}: {
# Pull in all the shared settings from configuration.nix # Pull in all the shared settings from configuration.nix
imports = [ imports = [
../configuration.nix ../configuration.nix
../modules/node-exporter.nix ../modules/node-exporter.nix
../modules/promtail.nix ../modules/promtail.nix
../users/plasmagoat.nix ../users/plasmagoat.nix
../secrets/shared-sops.nix
]; ];
} }

View file

@ -1,15 +0,0 @@
{ config, pkgs, modulesPath, lib, ... }:
{
# Pull in all the shared settings from configuration.nix
imports = [
./base.nix
];
config = {
environment.systemPackages = with pkgs; [
docker
docker-compose
];
};
}

7
proxmox-infra/.gitignore vendored Normal file
View file

@ -0,0 +1,7 @@
# proxmox-infra/.gitignore
.terraform/
*.tfstate
.tfstate.
crash.log
*.tfvars

24
proxmox-infra/.terraform.lock.hcl generated Normal file
View file

@ -0,0 +1,24 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/telmate/proxmox" {
version = "3.0.2-rc01"
constraints = "3.0.2-rc01"
hashes = [
"h1:571ROPuTMC0w5lr9hbUXi7NVLsG3SpmZxXXZx8cAT+Q=",
"zh:34d264243a4513f4e30c01fb37cc6a3e592d7823dfd182c5edfb170ac7b7de3a",
"zh:544428311ad20fbb3ad2cd854e893bbf036023cb57c3acc5093d141976dac670",
"zh:5c2396b328edee8de7ac144c15a6b7e668e81063699bc8c110d7c39fb8da70e9",
"zh:5ca8e33476ad06a0259071120a59477e8f107f30c1178ea7b9f6cafe1a461ade",
"zh:5ea56eb8275edc754a01a0180750e9c939cd997d3a50659617770211f4337da9",
"zh:9dd3482df6bbe00a4a6152be3567b6c08d35c3644a327a1f5ac30fd95ccd449f",
"zh:a76075fafadcc94a825151aff169bae4e0c05e3c7717e16dcdcf16ffa61a0780",
"zh:b1d95f97b22f671db762f7adf428b409e6736c078bcf267d8391985b8847d6e3",
"zh:cc94255cd1b18e6a341c15089015c457c8c639c25c426b07f278d5ea9850b3b5",
"zh:ce991103cb69b0b3e275127e3ab92c88bb3b6b0f4e5a2cb082aeaef70a7f7d61",
"zh:d24838bce87b38e12544a1329f5ad30e2be045968e639a3f4ddd5c84aa648e04",
"zh:e106ebd4eea8d62d62e62f261a262febc615e17466b54ac18f7e65c7e79e0008",
"zh:e254ca76c95e6e92da973b7bddc36bfa0a1e31d7c7e758ef4b01315db969388b",
"zh:f1d1d5f4c39267cacebe0ab7e9e06caf9692707f3b5369685541b65bc8b840ce",
]
}

52
proxmox-infra/main.tf Normal file
View file

@ -0,0 +1,52 @@
# # This calls the module to define a new VM (e.g., if you were creating one)
# resource "proxmox_vm_qemu" "sandbox" {
# name = "sandbox"
# desc = "OpenTofu testing"
# target_nodes = [var.proxmox_node]
# vmid= 100
# full_clone = true
# clone_id = 9100
# agent = 1
# scsihw = "virtio-scsi-single"
# ciuser = "root"
# ipconfig0 = "ip=dhcp"
# cpu {
# cores = 2
# }
# memory = 2048
# disks {
# virtio {
# virtio0 {
# disk {
# size = "9452M"
# storage = "local-lvm"
# }
# }
# }
# ide {
# ide2 {
# cloudinit {
# storage = "local-lvm"
# }
# }
# }
# }
# network {
# id = 0
# bridge = "vmbr0"
# model = "virtio"
# }
# serial {
# id = 0
# }
# }
# output "sandbox_vmid" {
# description = "sandbox VM ID"
# value = proxmox_vm_qemu.sandbox.id
# }
# output "sandbox_ipv4" {
# description = "sandbox public IPv4 address"
# value = proxmox_vm_qemu.sandbox.default_ipv4_address
# }

0
proxmox-infra/outputs.tf Normal file
View file

View file

@ -0,0 +1,9 @@
provider "proxmox" {
pm_tls_insecure = true
pm_api_url = var.proxmox_api_url
pm_user = var.proxmox_user
pm_password = var.proxmox_password
# Or use API token for better security:
# pm_api_token_id = var.proxmox_api_token_id
# pm_api_token_secret = var.proxmox_api_token_secret
}

106
proxmox-infra/sandbox.tf Normal file
View file

@ -0,0 +1,106 @@
# proxmox_vm_qemu.sandbox:
resource "proxmox_vm_qemu" "sandbox" {
agent = 1
bios = "seabios"
boot = " "
ciuser = "root"
cores = 0
current_node = "proxmox-01"
define_connection_info = false
desc = " generated by NixOS"
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "proxmox-01/qemu/100"
ipconfig0 = "ip=dhcp"
kvm = true
linked_vmid = 0
memory = 2048
name = "sandbox"
numa = false
onboot = true
protection = false
qemu_os = "l26"
reboot_required = false
scsihw = "virtio-scsi-single"
sockets = 0
sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCljEOf8Lv7Ptgsc1+CYzXpnrctPy7LFXXOyVZTI9uN7R4HY5aEdZTKEGSsU/+p+JtXWzzI65fnrZU8pTMG/wvCK+gYyNZcEM4g/TXMVa+CWZR3y13zGky88R7dKiBl5L00U4BePDD1ci3EU3/Mjr/GVTQHtkbJfLtvhR9zkCNZzxbu+rySWDroUPWPvE3y60/iLjBsh5ZmHo59CW67lh1jgbAlZjKWZzLWo0Bc5wgbxoQPWcO4BCh17N4g8llrRxGOwJzHeaipBnXn9J1AGIm9Zls6pxT9j6MKltcCOb7tQZwc3hlPOW2ku6f7OHTrziKw37drIDM0UDublAOcnIfBjE+XuWsp5t6ojdIzIDMrzaYW2MyMA3PHuf7VESUQdP4TZ1XUwtRRzOjn5AZJi9DPoowPaxKL92apRpFG+ovaFpWZsG7s8NWXHAC79IpgMUzscEmM15OMQ36RQ5xeytGDVCmVT8DbHGrMT9HUfR5fBSWD3aDQiOOiIIhrbY35m+U65Sz/GpZMk6HlaiV3tKNB0m+xE+84MUEmm4fFzt3B/0N4kscMArnLAm/OMUblihPwbKAUAUWErGRBfP+u+zjRCi1D9/pffpl2OQ2QIuVM82g6/EPa1ZsXZP+4iHooQoJbrqVGzkfiA1EKLfcdGfkP/O4nRl+D5UgkGdqqvm20NQ== root@proxmox-01
EOT
tablet = true
target_nodes = [
"proxmox-01",
]
unused_disk = []
vcpus = 0
vm_state = "running"
vmid = 100
cpu {
cores = 2
limit = 0
numa = false
sockets = 1
type = "host"
units = 0
vcores = 0
}
disks {
ide {
ide2 {
cloudinit {
storage = "local-lvm"
}
}
}
virtio {
virtio0 {
disk {
backup = true
discard = false
format = "raw"
id = 0
iops_r_burst = 0
iops_r_burst_length = 0
iops_r_concurrent = 0
iops_wr_burst = 0
iops_wr_burst_length = 0
iops_wr_concurrent = 0
iothread = false
linked_disk_id = -1
mbps_r_burst = 0
mbps_r_concurrent = 0
mbps_wr_burst = 0
mbps_wr_concurrent = 0
readonly = false
replicate = true
size = "9452M"
storage = "local-lvm"
}
}
}
}
network {
bridge = "vmbr0"
firewall = true
id = 0
link_down = false
macaddr = "bc:24:11:a7:e8:2a"
model = "virtio"
mtu = 0
queues = 0
rate = 0
tag = 0
}
serial {
id = 0
type = "socket"
}
smbios {
uuid = "37cd09d5-29a5-42e2-baba-f21b691130e8"
}
}

View file

@ -0,0 +1 @@
{"version":4,"terraform_version":"1.9.1","serial":2,"lineage":"ecd6c5f8-5352-bf30-6117-d55763366399","outputs":{"sandbox_ipv4":{"value":"192.168.1.206","type":"string"},"sandbox_vmid":{"value":"proxmox-01/qemu/999","type":"string"}},"resources":[{"mode":"managed","type":"proxmox_vm_qemu","name":"sandbox","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"","ciupgrade":false,"ciuser":"root","clone":null,"clone_id":9100,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.206","default_ipv6_address":"2a05:f6c7:2030:0:be24:11ff:feb9:919f","define_connection_info":true,"desc":"OpenTofu testing","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[],"ide2":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"9452M","storage":"local-lvm","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/999","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":2048,"name":"sandbox2","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:b9:91:9f","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":false,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"51a93ec4-4afa-428b-911a-daab70390a8c","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.206","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":null,"startup":"","tablet":true,"tags":"v0.0.2","target_node":null,"target_nodes":["proxmox-01"],"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":999},"sensitive_attributes":[[{"type":"get_attr","value":"cipassword"}],[{"type":"get_attr","value":"ssh_private_key"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null}

View file

@ -0,0 +1,30 @@
# proxmox-infra/variables.tf
variable "proxmox_api_url" {
description = "The URL of the Proxmox API (e.g., https://192.168.1.10:8006/api2/json)"
type = string
# No default here, so OpenTofu will prompt or expect a .tfvars file/env var
}
variable "proxmox_user" {
description = "Proxmox user (e.g., root@pam or user@pve)"
type = string
}
variable "proxmox_password" {
description = "Proxmox user password"
type = string
sensitive = true # Mark as sensitive to hide in logs
}
variable "proxmox_node" {
description = "The Proxmox node name where VMs will be deployed (e.g., 'pve')"
type = string
}
# Example for templates - you might have different templates
variable "nixos_template_id" {
description = "VMID of the nixos cloud-init template"
type = number
# Example: default = 100
}

View file

@ -0,0 +1,9 @@
# versions.tf
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.2-rc01"
}
}
}