cleanup
Some checks failed
Test / tests (push) Failing after 2m13s
/ OpenTofu (push) Successful in 13s

This commit is contained in:
plasmagoat 2025-07-28 12:08:32 +02:00
parent bcbcc8b17b
commit 3362c47211
29 changed files with 44 additions and 2138 deletions

41
hosts/monitor/default.nix Normal file
View file

@ -0,0 +1,41 @@
{
config,
name,
...
}: {
sops.secrets."restic/default-password" = {};
homelab = {
enable = true;
hostname = name;
tags = [name];
monitoring.enable = true;
motd.enable = true;
backups = {
enable = true;
backends = {
restic = {
enable = true;
repository = "/srv/restic-repo";
passwordFile = config.sops.secrets."restic/default-password".path;
};
};
};
services.prometheus = {
enable = true;
};
services.gatus = {
enable = true;
ui = {
title = "Homelab Status Dashboard";
header = "My Homelab Services";
};
};
};
system.stateVersion = "25.05";
}

View file

@ -1,126 +0,0 @@
# modules/lib/helpers.nix
{lib, ...}:
with lib; rec {
# Helper to merge global configurations from multiple sources
mergeGlobalConfigs = configs: let
mergeEndpoints = foldl' (acc: cfg: acc ++ cfg.monitoring.endpoints) [];
mergeBackups = foldl' (acc: cfg: acc ++ cfg.backups.jobs) [];
mergeProxyEntries = foldl' (acc: cfg: acc ++ cfg.reverseProxy.entries) [];
in {
monitoring.endpoints = mergeEndpoints configs;
backups.jobs = mergeBackups configs;
reverseProxy.entries = mergeProxyEntries configs;
};
# Helper to create a service module template
createServiceModule = {
name,
port,
hasMetrics ? true,
hasWebUI ? true,
dataDir ? "/var/lib/${name}",
}: {
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.${name};
in {
options.services.${name} = {
enable = mkEnableOption "${name} service";
port = mkOption {
type = types.port;
default = port;
description = "Port for ${name}";
};
dataDir = mkOption {
type = types.str;
default = dataDir;
description = "Data directory for ${name}";
};
enableMetrics = mkOption {
type = types.bool;
default = hasMetrics;
description = "Enable metrics endpoint";
};
exposeWeb = mkOption {
type = types.bool;
default = hasWebUI;
description = "Expose web interface";
};
};
config = mkIf cfg.enable {
homelab.global = {
backups.jobs = [
{
name = "${name}-data";
backend = "restic";
paths = [cfg.dataDir];
schedule = "daily";
}
];
reverseProxy.entries = mkIf cfg.exposeWeb [
{
subdomain = name;
port = cfg.port;
}
];
monitoring.endpoints = mkIf cfg.enableMetrics [
{
name = name;
port = cfg.port;
path = "/metrics";
jobName = name;
}
];
};
};
};
# Helper to generate nginx configuration from proxy entries
generateNginxConfig = proxyEntries: domain: let
createVHost = entry: {
"${entry.subdomain}.${domain}" = {
enableACME = entry.enableSSL;
forceSSL = entry.enableSSL;
locations."${entry.path}" = {
proxyPass = "http://${entry.targetHost}:${toString entry.port}";
proxyWebsockets = entry.websockets;
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
${concatStringsSep "\n" (mapAttrsToList (
name: value: "proxy_set_header ${name} ${value};"
)
entry.customHeaders)}
'';
};
};
};
in
foldl' (acc: entry: acc // (createVHost entry)) {} proxyEntries;
# Helper to generate Prometheus scrape configs
generatePrometheusConfig = endpoints: let
endpointsByJob = groupBy (e: e.jobName) endpoints;
createJobConfig = jobName: jobEndpoints: {
job_name = jobName;
scrape_interval = (head jobEndpoints).scrapeInterval;
metrics_path = (head jobEndpoints).path;
static_configs = [
{
targets = map (e: "${e.targetHost}:${toString e.port}") jobEndpoints;
labels = foldl' (acc: e: acc // e.labels) {} jobEndpoints;
}
];
};
in
mapAttrsToList createJobConfig endpointsByJob;
}

View file

@ -1,187 +0,0 @@
# modules/backup-manager.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.backups;
globalCfg = config.homelab.global;
# Create systemd services for backup jobs
createBackupService = job: let
serviceName = "backup-${job.name}";
allExcludes = globalCfg.backups.globalExcludes ++ job.excludePatterns;
excludeArgs = map (pattern: "--exclude '${pattern}'") allExcludes;
backupScript =
if job.backend == "restic"
then ''
#!/bin/bash
set -euo pipefail
${optionalString (job.preHook != null) job.preHook}
# Restic backup
${pkgs.restic}/bin/restic backup \
${concatStringsSep " " (map (path: "'${path}'") job.paths)} \
${concatStringsSep " " excludeArgs} \
--tag "host:${globalCfg.hostname}" \
--tag "job:${job.name}" \
--tag "env:${globalCfg.environment}"
# Apply retention policy
${pkgs.restic}/bin/restic forget \
--keep-daily ${job.retention.daily} \
--keep-weekly ${job.retention.weekly} \
--keep-monthly ${job.retention.monthly} \
--keep-yearly ${job.retention.yearly} \
--prune
${optionalString (job.postHook != null) job.postHook}
''
else if job.backend == "borg"
then ''
#!/bin/bash
set -euo pipefail
${optionalString (job.preHook != null) job.preHook}
# Borg backup
${pkgs.borgbackup}/bin/borg create \
--stats --progress \
${concatStringsSep " " excludeArgs} \
"::${globalCfg.hostname}-${job.name}-{now}" \
${concatStringsSep " " (map (path: "'${path}'") job.paths)}
# Apply retention policy
${pkgs.borgbackup}/bin/borg prune \
--keep-daily ${job.retention.daily} \
--keep-weekly ${job.retention.weekly} \
--keep-monthly ${job.retention.monthly} \
--keep-yearly ${job.retention.yearly}
${optionalString (job.postHook != null) job.postHook}
''
else throw "Unsupported backup backend: ${job.backend}";
in {
${serviceName} = {
description = "Backup job: ${job.name}";
after = ["network-online.target"];
wants = ["network-online.target"];
serviceConfig = {
Type = "oneshot";
User = "backup";
Group = "backup";
ExecStart = pkgs.writeScript "backup-${job.name}" backupScript;
EnvironmentFile = "/etc/backup/environment";
};
};
};
# Create systemd timers for backup jobs
createBackupTimer = job: let
serviceName = "backup-${job.name}";
timerName = "${serviceName}.timer";
in {
${timerName} = {
description = "Timer for backup job: ${job.name}";
wantedBy = ["timers.target"];
timerConfig = {
OnCalendar =
if job.schedule == "daily"
then "daily"
else if job.schedule == "weekly"
then "weekly"
else if job.schedule == "hourly"
then "hourly"
else job.schedule; # Assume it's a cron expression
Persistent = true;
RandomizedDelaySec = "15min";
};
};
};
in {
options.homelab.backups = {
enable = mkEnableOption "Backup management";
restic = {
repository = mkOption {
type = types.str;
description = "Restic repository URL";
};
passwordFile = mkOption {
type = types.str;
default = "/etc/backup/restic-password";
description = "Path to file containing restic password";
};
};
borg = {
repository = mkOption {
type = types.str;
description = "Borg repository path";
};
sshKey = mkOption {
type = types.str;
default = "/etc/backup/borg-ssh-key";
description = "Path to SSH key for borg repository";
};
};
};
config = mkIf (cfg.enable && globalCfg.enable && (length globalCfg.backups.jobs) > 0) {
# Create backup user
users.users.backup = {
isSystemUser = true;
group = "backup";
home = "/var/lib/backup";
createHome = true;
};
users.groups.backup = {};
# Install backup tools
environment.systemPackages = with pkgs; [
restic
borgbackup
rclone
(pkgs.writeScriptBin "backup-status" ''
#!/bin/bash
echo "=== Backup Status ==="
echo
${concatStringsSep "\n" (map (job: ''
echo "Job: ${job.name}"
systemctl is-active backup-${job.name}.timer || echo "Timer inactive"
systemctl status backup-${job.name}.timer --no-pager -l | grep -E "(Active|Trigger)" || true
echo
'')
globalCfg.backups.jobs)}
'')
];
# Create systemd services and timers
systemd.services = lib.foldl' (acc: job: acc // (createBackupService job)) {} globalCfg.backups.jobs;
systemd.timers = lib.foldl' (acc: job: acc // (createBackupTimer job)) {} globalCfg.backups.jobs;
# Environment file template
environment.etc."backup/environment.example".text = ''
# Restic configuration
RESTIC_REPOSITORY=${cfg.restic.repository}
RESTIC_PASSWORD_FILE=${cfg.restic.passwordFile}
# AWS S3 credentials (if using S3 backend)
AWS_ACCESS_KEY_ID=your-access-key
AWS_SECRET_ACCESS_KEY=your-secret-key
# Borg configuration
BORG_REPO=${cfg.borg.repository}
BORG_RSH="ssh -i ${cfg.borg.sshKey}"
# Notification settings
NOTIFICATION_URL=your-webhook-url
'';
};
}

View file

@ -1,8 +1,3 @@
{
ente = import ./ente.nix;
global-config = import ./global-config.nix;
backup-manager = import ./backup-manager.nix;
# Service modules
services = import ./services;
}

View file

@ -1,462 +0,0 @@
# modules/global-config.nix
{
config,
lib,
outputs,
...
}:
with lib; let
cfg = config.homelab.global;
# Service type definition
serviceType = types.submodule {
options = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable this service";
};
description = mkOption {
type = types.str;
description = "Human-readable description of the service";
};
category = mkOption {
type = types.enum ["monitoring" "networking" "storage" "security" "media" "development" "backup" "other"];
default = "other";
description = "Service category for organization";
};
dependencies = mkOption {
type = types.listOf types.str;
default = [];
description = "List of other homelab services this depends on";
};
ports = mkOption {
type = types.listOf types.port;
default = [];
description = "Ports this service uses";
};
tags = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional tags for this service";
};
priority = mkOption {
type = types.int;
default = 100;
description = "Service priority (lower numbers start first)";
};
};
};
# Type definitions
monitoringEndpointType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the monitoring endpoint";
};
port = mkOption {
type = types.port;
description = "Port number for the endpoint";
};
path = mkOption {
type = types.str;
default = "/metrics";
description = "Path for the metrics endpoint";
};
jobName = mkOption {
type = types.str;
description = "Prometheus job name";
};
scrapeInterval = mkOption {
type = types.str;
default = "30s";
description = "Prometheus scrape interval";
};
labels = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Additional labels for this endpoint";
};
};
};
backupJobType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Name of the backup job";
};
backend = mkOption {
type = types.enum ["restic" "borg" "rclone"];
description = "Backup backend to use";
};
paths = mkOption {
type = types.listOf types.str;
description = "List of paths to backup";
};
schedule = mkOption {
type = types.str;
default = "daily";
description = "Backup schedule (cron format or preset)";
};
retention = mkOption {
type = types.attrsOf types.str;
default = {
daily = "7";
weekly = "4";
monthly = "6";
yearly = "2";
};
description = "Retention policy";
};
excludePatterns = mkOption {
type = types.listOf types.str;
default = [];
description = "Patterns to exclude from backup";
};
preHook = mkOption {
type = types.nullOr types.str;
default = null;
description = "Script to run before backup";
};
postHook = mkOption {
type = types.nullOr types.str;
default = null;
description = "Script to run after backup";
};
};
};
reverseProxyEntryType = types.submodule {
options = {
subdomain = mkOption {
type = types.str;
description = "Subdomain for the service";
};
port = mkOption {
type = types.port;
description = "Internal port to proxy to";
};
path = mkOption {
type = types.str;
default = "/";
description = "Path prefix for the service";
};
enableAuth = mkOption {
type = types.bool;
default = false;
description = "Enable authentication for this service";
};
enableSSL = mkOption {
type = types.bool;
default = true;
description = "Enable SSL for this service";
};
customHeaders = mkOption {
type = types.attrsOf types.str;
default = {};
description = "Custom headers to add";
};
websockets = mkOption {
type = types.bool;
default = false;
description = "Enable websocket support";
};
};
};
# Helper functions for services
enabledServices = filterAttrs (name: service: service.enable) cfg.services;
servicesByCategory = category: filterAttrs (name: service: service.enable && service.category == category) cfg.services;
in {
imports = [
./motd
];
options.homelab.global = {
enable = mkEnableOption "Global homelab configuration";
hostname = mkOption {
type = types.str;
description = "Hostname for this system";
};
domain = mkOption {
type = types.str;
default = "procopius.dk";
description = "Base domain for the homelab";
};
environment = mkOption {
type = types.enum ["production" "staging" "development"];
default = "production";
description = "Environment type";
};
location = mkOption {
type = types.str;
default = "homelab";
description = "Physical location identifier";
};
tags = mkOption {
type = types.listOf types.str;
default = [];
description = "Tags for this system";
};
services = mkOption {
type = types.attrsOf serviceType;
default = {};
description = "Homelab services configuration";
example = literalExpression ''
{
prometheus = {
enable = true;
description = "Metrics collection and monitoring";
category = "monitoring";
ports = [ 9090 ];
tags = [ "metrics" "alerting" ];
};
traefik = {
enable = true;
description = "Reverse proxy and load balancer";
category = "networking";
ports = [ 80 443 8080 ];
tags = [ "proxy" "loadbalancer" ];
priority = 10;
};
}
'';
};
monitoring = {
endpoints = mkOption {
type = types.listOf monitoringEndpointType;
default = [];
description = "Monitoring endpoints exposed by this system";
};
nodeExporter = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable node exporter";
};
port = mkOption {
type = types.port;
default = 9100;
description = "Node exporter port";
};
};
};
backups = {
jobs = mkOption {
type = types.listOf backupJobType;
default = [];
description = "Backup jobs for this system";
};
globalExcludes = mkOption {
type = types.listOf types.str;
default = [
"*.tmp"
"*.cache"
"*/.git"
"*/node_modules"
"*/target"
];
description = "Global exclude patterns for all backup jobs";
};
};
reverseProxy = {
entries = mkOption {
type = types.listOf reverseProxyEntryType;
default = [];
description = "Reverse proxy entries for this system";
};
};
# Helper function to add monitoring endpoint
addMonitoringEndpoint = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = name: endpoint: {
homelab.global.monitoring.endpoints = [
(endpoint // {inherit name;})
];
};
description = "Helper function to add monitoring endpoints";
};
# Helper function to add backup job
addBackupJob = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = name: job: {
homelab.global.backups.jobs = [
(job // {inherit name;})
];
};
description = "Helper function to add backup jobs";
};
# Helper function to add reverse proxy entry
addReverseProxyEntry = mkOption {
type = types.functionTo (types.functionTo types.anything);
default = subdomain: entry: {
homelab.global.reverseProxy.entries = [
(entry // {inherit subdomain;})
];
};
description = "Helper function to add reverse proxy entries";
};
# Helper functions
enabledServicesList = mkOption {
type = types.listOf types.str;
default = attrNames enabledServices;
description = "List of enabled service names";
readOnly = true;
};
servicesByPriority = mkOption {
type = types.listOf types.str;
default =
map (x: x.name) (sort (a: b: a.priority < b.priority)
(mapAttrsToList (name: service: service // {inherit name;}) enabledServices));
description = "Services sorted by priority";
readOnly = true;
};
};
config = mkIf cfg.enable {
# Set hostname
networking.hostName = cfg.hostname;
# Configure node exporter if enabled
services.prometheus.exporters.node = mkIf cfg.monitoring.nodeExporter.enable {
enable = true;
port = cfg.monitoring.nodeExporter.port;
enabledCollectors = [
"systemd"
"textfile"
"filesystem"
"loadavg"
"meminfo"
"netdev"
"stat"
];
};
# Automatically add node exporter to monitoring endpoints
homelab.global.monitoring.endpoints = mkIf cfg.monitoring.nodeExporter.enable [
{
name = "node-exporter";
port = cfg.monitoring.nodeExporter.port;
path = "/metrics";
jobName = "node";
labels = {
instance = cfg.hostname;
environment = cfg.environment;
location = cfg.location;
};
}
];
# Export configuration for external consumption
environment.etc."homelab/config.json".text = builtins.toJSON {
inherit (cfg) hostname domain environment location tags;
services =
mapAttrs (name: service: {
inherit (service) enable description category dependencies ports tags priority;
})
cfg.services;
enabledServices = enabledServices;
servicesByCategory = {
monitoring = servicesByCategory "monitoring";
networking = servicesByCategory "networking";
storage = servicesByCategory "storage";
security = servicesByCategory "security";
media = servicesByCategory "media";
development = servicesByCategory "development";
backup = servicesByCategory "backup";
other = servicesByCategory "other";
};
monitoring = {
endpoints =
map (endpoint: {
name = endpoint.name;
url = "http://${cfg.hostname}:${toString endpoint.port}${endpoint.path}";
port = endpoint.port;
path = endpoint.path;
jobName = endpoint.jobName;
scrapeInterval = endpoint.scrapeInterval;
labels =
endpoint.labels
// {
hostname = cfg.hostname;
environment = cfg.environment;
};
})
cfg.monitoring.endpoints;
};
backups = {
jobs = cfg.backups.jobs;
};
reverseProxy = {
entries =
map (entry: {
subdomain = entry.subdomain;
url = "http://${cfg.hostname}:${toString entry.port}";
port = entry.port;
path = entry.path;
domain = "${entry.subdomain}.${cfg.domain}";
enableAuth = entry.enableAuth;
enableSSL = entry.enableSSL;
customHeaders = entry.customHeaders;
websockets = entry.websockets;
})
cfg.reverseProxy.entries;
};
};
# Create a status command that shows service information
environment.systemPackages = [
# (pkgs.writeScriptBin "homelab-services" ''
# #!/bin/bash
# echo "🏠 Homelab Services Status"
# echo "=========================="
# echo
# ${concatStringsSep "\n" (mapAttrsToList (name: service: ''
# echo "${name}: ${service.description}"
# echo " Category: ${service.category}"
# echo " Status: $(systemctl is-active ${name} 2>/dev/null || echo "not found")"
# ${optionalString (service.ports != []) ''
# echo " Ports: ${concatStringsSep ", " (map toString service.ports)}"
# ''}
# ${optionalString (service.tags != []) ''
# echo " Tags: ${concatStringsSep ", " service.tags}"
# ''}
# echo
# '')
# enabledServices)}
# '')
];
};
}

View file

@ -1,304 +0,0 @@
# modules/motd/default.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.motd;
globalCfg = config.homelab.global;
enabledServices = filterAttrs (name: service: service.enable) globalCfg.services;
homelab-motd = pkgs.writeShellScriptBin "homelab-motd" ''
#! /usr/bin/env bash
source /etc/os-release
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
WHITE='\033[1;37m'
NC='\033[0m' # No Color
BOLD='\033[1m'
# Helper functions
print_header() {
echo -e "''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}''${WHITE} 🏠 HOMELAB STATUS ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
}
print_section() {
echo -e "\n''${BOLD}''${CYAN} $1''${NC}"
echo -e "''${CYAN}''${NC}"
}
get_service_status() {
local service="$1"
if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then
echo -e "''${GREEN}''${NC} Active"
elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then
echo -e "''${YELLOW}''${NC} Inactive"
else
echo -e "''${RED}''${NC} Disabled"
fi
}
get_timer_status() {
local timer="$1"
if ${pkgs.systemd}/bin/systemctl is-active --quiet "$timer" 2>/dev/null; then
local next_run=$(${pkgs.systemd}/bin/systemctl show "$timer" --property=NextElapseUSecRealtime --value 2>/dev/null || echo "0")
if [[ "$next_run" != "0" && "$next_run" != "n/a" ]]; then
local next_readable=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager "$timer" 2>/dev/null | tail -n +2 | head -n 1 | awk '{print $1, $2}' || echo "Unknown")
echo -e "''${GREEN}''${NC} Next: ''${next_readable}"
else
echo -e "''${GREEN}''${NC} Active"
fi
else
echo -e "''${RED}''${NC} Inactive"
fi
}
# Main script
${optionalString cfg.clearScreen "clear"}
print_header
# Check if global config exists
CONFIG_FILE="/etc/homelab/config.json"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "''${RED} Global homelab configuration not found at $CONFIG_FILE''${NC}"
exit 1
fi
# Parse global configuration
HOSTNAME=$(${pkgs.jq}/bin/jq -r '.hostname' "$CONFIG_FILE" 2>/dev/null || hostname)
DOMAIN=$(${pkgs.jq}/bin/jq -r '.domain' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
ENVIRONMENT=$(${pkgs.jq}/bin/jq -r '.environment' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
LOCATION=$(${pkgs.jq}/bin/jq -r '.location' "$CONFIG_FILE" 2>/dev/null || echo "unknown")
TAGS=$(${pkgs.jq}/bin/jq -r '.tags[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ' || echo "none")
print_section "SYSTEM INFO"
echo -e " ''${BOLD}Hostname:''${NC} $HOSTNAME"
echo -e " ''${BOLD}Domain:''${NC} $DOMAIN"
echo -e " ''${BOLD}Environment:''${NC} $ENVIRONMENT"
echo -e " ''${BOLD}Location:''${NC} $LOCATION"
echo -e " ''${BOLD}Tags:''${NC} ''${TAGS:-none}"
echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p)"
echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)"
${optionalString cfg.showServices ''
# Enabled services from homelab config
print_section "HOMELAB SERVICES"
${concatStringsSep "\n" (mapAttrsToList (name: service: ''
status=$(get_service_status "${service.systemdService}")
printf " %-25s %s\n" "${name}" "$status"
'')
cfg.services)}
''}
${optionalString cfg.showMonitoring ''
# Monitoring endpoints
print_section "MONITORING ENDPOINTS"
ENDPOINTS=$(${pkgs.jq}/bin/jq -c '.monitoring.endpoints[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$ENDPOINTS" ]]; then
while IFS= read -r endpoint; do
name=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.name')
port=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.port')
path=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.path')
job=$(echo "$endpoint" | ${pkgs.jq}/bin/jq -r '.jobName')
# Check if port is accessible
if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then
status="''${GREEN}''${NC}"
else
status="''${RED}''${NC}"
fi
printf " %-20s %s %s:%s%s (job: %s)\n" "$name" "$status" "$HOSTNAME" "$port" "$path" "$job"
done <<< "$ENDPOINTS"
else
echo -e " ''${YELLOW}No monitoring endpoints configured''${NC}"
fi
''}
${optionalString cfg.showBackups ''
# Backup jobs status
print_section "BACKUP JOBS"
BACKUP_JOBS=$(${pkgs.jq}/bin/jq -c '.backups.jobs[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$BACKUP_JOBS" ]]; then
while IFS= read -r job; do
name=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.name')
backend=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.backend')
schedule=$(echo "$job" | ${pkgs.jq}/bin/jq -r '.schedule')
service_name="backup-''${name}"
timer_name="''${service_name}.timer"
timer_status=$(get_timer_status "$timer_name")
# Get last backup info
last_run="Unknown"
if ${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | grep -q "^[^n]"; then
last_run=$(${pkgs.systemd}/bin/systemctl show "$service_name" --property=ExecMainStartTimestamp --value 2>/dev/null | head -1)
if [[ "$last_run" != "n/a" && -n "$last_run" ]]; then
last_run=$(${pkgs.coreutils}/bin/date -d "$last_run" "+%Y-%m-%d %H:%M" 2>/dev/null || echo "Unknown")
fi
fi
printf " %-20s %s (%s, %s) Last: %s\n" "$name" "$timer_status" "$backend" "$schedule" "$last_run"
done <<< "$BACKUP_JOBS"
# Show backup-status command output if available
if command -v backup-status >/dev/null 2>&1; then
echo -e "\n ''${BOLD}Quick Status:''${NC}"
backup-status 2>/dev/null | tail -n +3 | head -10 | sed 's/^/ /'
fi
else
echo -e " ''${YELLOW}No backup jobs configured''${NC}"
fi
''}
${optionalString cfg.showReverseProxy ''
# Reverse proxy entries
print_section "REVERSE PROXY ENTRIES"
PROXY_ENTRIES=$(${pkgs.jq}/bin/jq -c '.reverseProxy.entries[]?' "$CONFIG_FILE" 2>/dev/null || echo "")
if [[ -n "$PROXY_ENTRIES" ]]; then
while IFS= read -r entry; do
subdomain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.subdomain')
port=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.port')
domain=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.domain')
auth=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableAuth')
ssl=$(echo "$entry" | ${pkgs.jq}/bin/jq -r '.enableSSL')
# Check if service is running on the port
if ${pkgs.netcat}/bin/nc -z localhost "$port" 2>/dev/null; then
status="''${GREEN}''${NC}"
else
status="''${RED}''${NC}"
fi
auth_indicator=""
[[ "$auth" == "true" ]] && auth_indicator=" 🔐"
ssl_indicator=""
[[ "$ssl" == "true" ]] && ssl_indicator=" 🔒"
printf " %-25s %s :%s %s%s%s\n" "''${domain}" "$status" "$port" "$domain" "$auth_indicator" "$ssl_indicator"
done <<< "$PROXY_ENTRIES"
else
echo -e " ''${YELLOW}No reverse proxy entries configured''${NC}"
fi
''}
${optionalString cfg.showResources ''
# Resource usage
print_section "RESOURCE USAGE"
echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s (%.1f%%)", $3, $2, ($3/$2)*100}')"
echo -e " ''${BOLD}Disk (root):''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')"
echo -e " ''${BOLD}CPU Usage:''${NC} $(${pkgs.procps}/bin/top -bn1 | grep "Cpu(s)" | awk '{printf "%.1f%%", $2+$4}' | sed 's/%us,//')%"
''}
${optionalString cfg.showRecentIssues ''
# Recent logs (errors only)
print_section "RECENT ISSUES"
error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | wc -l)
if [[ "$error_count" -gt 0 ]]; then
echo -e " ''${RED} $error_count errors in last 24h''${NC}"
${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q | tail -3 | sed 's/^/ /'
else
echo -e " ''${GREEN} No critical errors in last 24h''${NC}"
fi
''}
echo -e "\n''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${WHITE}Run 'backup-status' for detailed backup info ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC} ''${WHITE}Config: /etc/homelab/config.json ''${NC}''${BOLD}''${BLUE}''${NC}"
echo -e "''${BOLD}''${BLUE}''${NC}"
echo
'';
in {
options.homelab.motd = {
enable = mkEnableOption "Dynamic homelab MOTD";
clearScreen = mkOption {
type = types.bool;
default = true;
description = "Clear screen before showing MOTD";
};
showServices = mkOption {
type = types.bool;
default = true;
description = "Show enabled homelab services";
};
showMonitoring = mkOption {
type = types.bool;
default = true;
description = "Show monitoring endpoints";
};
showBackups = mkOption {
type = types.bool;
default = true;
description = "Show backup jobs status";
};
showReverseProxy = mkOption {
type = types.bool;
default = true;
description = "Show reverse proxy entries";
};
showResources = mkOption {
type = types.bool;
default = true;
description = "Show system resource usage";
};
showRecentIssues = mkOption {
type = types.bool;
default = true;
description = "Show recent system issues";
};
services = mkOption {
type = types.attrsOf (types.submodule {
options = {
systemdService = mkOption {
type = types.str;
description = "Name of the systemd service to monitor";
};
description = mkOption {
type = types.str;
default = "";
description = "Human-readable description of the service";
};
};
});
default = {};
description = "Homelab services to monitor in MOTD";
};
};
config = mkIf (cfg.enable && globalCfg.enable) {
# Register services with MOTD
homelab.motd.services =
mapAttrs (name: service: {
systemdService = name;
description = service.description;
})
enabledServices;
# Create a command to manually run the MOTD
environment.systemPackages = with pkgs; [
jq
netcat
homelab-motd
];
};
}

View file

@ -1,4 +0,0 @@
{
jellyfin = import ./jellyfin.nix;
grafana = import ./grafana.nix;
}

View file

@ -1 +0,0 @@

View file

@ -1,72 +0,0 @@
# modules/services/grafana.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.grafana;
helpers = import ../lib/helpers.nix {inherit lib;};
in {
options.services.grafana = {
enable = mkEnableOption "Grafana monitoring dashboard";
port = mkOption {
type = types.port;
default = 3000;
description = "Grafana web interface port";
};
adminPassword = mkOption {
type = types.str;
description = "Admin password for Grafana";
};
};
config = mkIf cfg.enable {
services.grafana = {
enable = true;
settings = {
server = {
http_port = cfg.port;
domain = "${config.homelab.global.hostname}.${config.homelab.global.domain}";
};
security = {
admin_password = cfg.adminPassword;
};
};
};
homelab.global = {
backups.jobs = [
{
name = "grafana-data";
backend = "restic";
paths = ["/var/lib/grafana"];
schedule = "daily";
excludePatterns = ["*/plugins/*" "*/png/*"];
}
];
reverseProxy.entries = [
{
subdomain = "grafana";
port = cfg.port;
enableAuth = false; # Grafana handles its own auth
}
];
monitoring.endpoints = [
{
name = "grafana";
port = cfg.port;
path = "/metrics";
jobName = "grafana";
labels = {
service = "grafana";
type = "monitoring";
};
}
];
};
};
}

View file

@ -1,125 +0,0 @@
# modules/services/jellyfin.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.services.jellyfin;
in {
options.services.jellyfin = {
enable = mkEnableOption "Jellyfin media server";
port = mkOption {
type = types.port;
default = 8096;
description = "Port for Jellyfin web interface";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/jellyfin";
description = "Directory to store Jellyfin data";
};
mediaDir = mkOption {
type = types.str;
default = "/media";
description = "Directory containing media files";
};
enableMetrics = mkOption {
type = types.bool;
default = true;
description = "Enable Prometheus metrics";
};
exposeWeb = mkOption {
type = types.bool;
default = true;
description = "Expose web interface through reverse proxy";
};
};
config = mkIf cfg.enable {
# Enable the service
services.jellyfin = {
enable = true;
dataDir = cfg.dataDir;
};
# Configure global settings
homelab.global = {
# Add backup job for Jellyfin data
backups.jobs = [
{
name = "jellyfin-config";
backend = "restic";
paths = ["${cfg.dataDir}/config" "${cfg.dataDir}/data"];
schedule = "0 2 * * *"; # Daily at 2 AM
excludePatterns = [
"*/cache/*"
"*/transcodes/*"
"*/logs/*"
];
preHook = ''
# Stop jellyfin for consistent backup
systemctl stop jellyfin
'';
postHook = ''
# Restart jellyfin after backup
systemctl start jellyfin
'';
}
{
name = "jellyfin-media";
backend = "restic";
paths = [cfg.mediaDir];
schedule = "0 3 * * 0"; # Weekly on Sunday at 3 AM
excludePatterns = [
"*.tmp"
"*/.@__thumb/*" # Synology thumbnails
];
}
];
# Add reverse proxy entry if enabled
reverseProxy.entries = mkIf cfg.exposeWeb [
{
subdomain = "jellyfin";
port = cfg.port;
enableAuth = false; # Jellyfin has its own auth
websockets = true;
customHeaders = {
"X-Forwarded-Proto" = "$scheme";
"X-Forwarded-Host" = "$host";
};
}
];
# Add monitoring endpoint if metrics enabled
monitoring.endpoints = mkIf cfg.enableMetrics [
{
name = "jellyfin";
port = cfg.port;
path = "/metrics"; # Assuming you have a metrics plugin
jobName = "jellyfin";
scrapeInterval = "60s";
labels = {
service = "jellyfin";
type = "media-server";
};
}
];
};
# Open firewall
networking.firewall.allowedTCPPorts = [cfg.port];
# Create media directory
systemd.tmpfiles.rules = [
"d ${cfg.mediaDir} 0755 jellyfin jellyfin -"
];
};
}

View file

@ -1,208 +0,0 @@
# modules/services/prometheus.nix
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.homelab.services.prometheus;
globalCfg = config.homelab.global;
in {
options.homelab.services.prometheus = {
enable = mkEnableOption "Prometheus monitoring server";
port = mkOption {
type = types.port;
default = 9090;
description = "Prometheus server port";
};
webExternalUrl = mkOption {
type = types.str;
default = "http://${globalCfg.hostname}:${toString cfg.port}";
description = "External URL for Prometheus";
};
retention = mkOption {
type = types.str;
default = "30d";
description = "Data retention period";
};
scrapeConfigs = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Additional scrape configurations";
};
alertmanager = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable Alertmanager integration";
};
url = mkOption {
type = types.str;
default = "http://localhost:9093";
description = "Alertmanager URL";
};
};
};
config = mkIf cfg.enable {
# Register service with global homelab config
homelab.global.services.prometheus = {
enable = true;
description = "Metrics collection and monitoring server";
category = "monitoring";
ports = [cfg.port];
tags = ["metrics" "monitoring" "alerting"];
priority = 20;
dependencies = ["node-exporter"];
};
# Configure the actual Prometheus service
services.prometheus = {
enable = true;
port = cfg.port;
webExternalUrl = cfg.webExternalUrl;
retentionTime = cfg.retention;
scrapeConfigs =
[
# Auto-discover monitoring endpoints from global config
{
job_name = "homelab-auto";
static_configs = [
{
targets =
map (
endpoint: "${globalCfg.hostname}:${toString endpoint.port}"
)
globalCfg.monitoring.endpoints;
}
];
scrape_interval = "30s";
metrics_path = "/metrics";
}
]
++ cfg.scrapeConfigs;
# Alertmanager configuration
alertmanagers = mkIf cfg.alertmanager.enable [
{
static_configs = [
{
targets = [cfg.alertmanager.url];
}
];
}
];
rules = [
# Basic homelab alerting rules
(pkgs.writeText "homelab-alerts.yml" ''
groups:
- name: homelab
rules:
- alert: ServiceDown
expr: up == 0
for: 5m
labels:
severity: critical
annotations:
summary: "Service {{ $labels.instance }} is down"
description: "{{ $labels.job }} on {{ $labels.instance }} has been down for more than 5 minutes."
- alert: HighMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9
for: 10m
labels:
severity: warning
annotations:
summary: "High memory usage on {{ $labels.instance }}"
description: "Memory usage is above 90% on {{ $labels.instance }}"
- alert: HighDiskUsage
expr: (node_filesystem_size_bytes - node_filesystem_free_bytes) / node_filesystem_size_bytes > 0.85
for: 5m
labels:
severity: warning
annotations:
summary: "High disk usage on {{ $labels.instance }}"
description: "Disk usage is above 85% on {{ $labels.instance }} for filesystem {{ $labels.mountpoint }}"
'')
];
};
# Add monitoring endpoint to global config
homelab.global.monitoring.endpoints = [
{
name = "prometheus";
port = cfg.port;
path = "/metrics";
jobName = "prometheus";
scrapeInterval = "30s";
labels = {
service = "prometheus";
role = "monitoring";
};
}
];
# Add reverse proxy entry if configured
homelab.global.reverseProxy.entries = mkIf (globalCfg.domain != null) [
{
subdomain = "prometheus";
port = cfg.port;
path = "/";
enableAuth = true;
enableSSL = true;
customHeaders = {
"X-Frame-Options" = "DENY";
"X-Content-Type-Options" = "nosniff";
};
}
];
# Add backup job for Prometheus data
homelab.global.backups.jobs = [
{
name = "prometheus-data";
backend = "restic";
paths = ["/var/lib/prometheus2"];
schedule = "daily";
retention = {
daily = "7";
weekly = "4";
monthly = "3";
yearly = "1";
};
excludePatterns = [
"*.tmp"
"*/wal/*"
];
preHook = ''
# Stop prometheus temporarily for consistent backup
systemctl stop prometheus
'';
postHook = ''
# Restart prometheus after backup
systemctl start prometheus
'';
}
];
# Open firewall port
networking.firewall.allowedTCPPorts = [cfg.port];
# Create prometheus configuration directory
systemd.tmpfiles.rules = [
"d /var/lib/prometheus2 0755 prometheus prometheus -"
"d /etc/prometheus 0755 root root -"
];
};
}

View file

@ -1,4 +0,0 @@
{
# TODO
# https://github.com/L-Trump/nixos-configs/blob/ab3fb16e330b8a2904b9967e46af8c061b56266e/modules/nixos/server/backrest.nix#L7
}

View file

@ -1,95 +0,0 @@
# backups-option.nix
cfg: let
inherit (cfg.lib) mkOption types mkEnableOption attrNames;
in
mkOption {
type = types.attrsOf (
types.submodule (
{
name,
config,
...
} @ args: {
options = {
backend = mkOption {
type = types.enum (attrNames cfg.backends);
description = "The backup backend to use";
};
paths = mkOption {
type = types.listOf types.str;
default = [];
description = "Paths to backup";
};
enable = mkOption {
type = types.bool;
default = true;
description = "Whether to enable this backup job";
};
timerConfig = mkOption {
type = with types; nullOr attrs;
default = null;
example = {
OnCalendar = "00:05";
Persistent = true;
RandomizedDelaySec = "5h";
};
description = ''
When to run the backup. If null, inherits from backend's default timerConfig.
Set to null to disable automatic scheduling.
'';
};
backendOptions = mkOption {
type = let
backupConfig = config;
backupName = name;
in
types.submodule (
{config, ...} @ args'':
cfg.backends.${args.config.backend} (args'' // {inherit backupConfig backupName;})
);
default = {};
description = "Backend-specific options";
};
preBackupScript = mkOption {
type = types.lines;
default = "";
description = "Script to run before backing up";
};
postBackupScript = mkOption {
type = types.lines;
default = "";
description = ''
Script to run after backing up. Runs even if the backup fails.
'';
};
notifications = {
failure = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable failure notifications";
};
};
success = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable success notifications";
};
};
};
};
}
)
);
default = {};
description = "Backup job definitions";
}

View file

@ -1,6 +0,0 @@
{
imports = [
./root.nix
./restic.nix
];
}

View file

@ -1,234 +0,0 @@
# restic.nix - Restic backend implementation
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.system.backups;
resticCfg = cfg.restic;
# Get only restic backups that are enabled
resticBackups = filterAttrs (_: backup: backup.backend == "restic" && backup.enable) cfg.backups;
# Create restic service configurations
createResticServices =
mapAttrs (
name: backup: let
# Merge global defaults with backup-specific options
serviceConfig =
recursiveUpdate resticCfg.defaultBackendOptions backup.backendOptions
// {
inherit (backup) paths;
# Use backup-specific timer or fall back to global default
timerConfig =
if backup.timerConfig != null
then backup.timerConfig
else resticCfg.timerConfig;
};
in
serviceConfig
)
resticBackups;
in {
options.system.backups.restic = {
enable = mkEnableOption "restic backup backend";
timerConfig = mkOption {
type = types.attrs;
default = {
OnCalendar = "*-*-* 05:00:00";
Persistent = true;
};
description = "Default systemd timer configuration for restic backups";
};
defaultBackendOptions = mkOption {
type = types.attrs;
default = {};
example = {
repository = "/backup/restic";
passwordFile = "/etc/nixos/secrets/restic-password";
initialize = true;
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 75"
];
};
description = "Default backend options applied to all restic backup jobs";
};
# Advanced options
runMaintenance = mkOption {
type = types.bool;
default = true;
description = "Whether to run repository maintenance after backups";
};
maintenanceTimer = mkOption {
type = types.attrs;
default = {
OnCalendar = "*-*-* 06:00:00";
Persistent = true;
};
description = "Timer configuration for maintenance tasks";
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 6"
"--keep-yearly 3"
];
description = "Default pruning options for maintenance";
};
};
config = mkIf resticCfg.enable {
# Register restic backend
system.backups.backends.restic = {
backupConfig,
backupName,
...
}: {
# Define the proper options schema for restic backendOptions
options = {
repository = mkOption {
type = types.str;
description = "Restic repository path or URL";
};
passwordFile = mkOption {
type = types.str;
description = "Path to file containing the repository password";
};
initialize = mkOption {
type = types.bool;
default = true;
description = "Whether to initialize the repository if it doesn't exist";
};
exclude = mkOption {
type = types.listOf types.str;
default = [];
description = "Patterns to exclude from backup";
};
extraBackupArgs = mkOption {
type = types.listOf types.str;
default = [];
description = "Additional arguments passed to restic backup command";
};
user = mkOption {
type = types.str;
default = "root";
description = "User to run the backup as";
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = resticCfg.pruneOpts;
description = "Pruning options for this backup";
};
};
# Default config merged with global defaults
config = {
extraBackupArgs =
[
"--tag ${backupName}"
"--verbose"
]
++ (resticCfg.defaultBackendOptions.extraBackupArgs or []);
};
};
# Create actual restic backup services
services.restic.backups = createResticServices;
# Add restic package
environment.systemPackages = [pkgs.restic];
# Systemd service customizations for restic backups
systemd.services =
(mapAttrs' (
name: backup:
nameValuePair "restic-backups-${name}" {
# Custom pre/post scripts
preStart = mkBefore backup.preBackupScript;
postStop = mkAfter backup.postBackupScript;
# Enhanced service configuration
serviceConfig = {
# Restart configuration
Restart = "on-failure";
RestartSec = "5m";
RestartMaxDelaySec = "30m";
RestartSteps = 3;
# Rate limiting
StartLimitBurst = 4;
StartLimitIntervalSec = "2h";
};
# Failure handling could be extended here for notifications
# onFailure = optional backup.notifications.failure.enable "restic-backup-${name}-failure-notify.service";
}
)
resticBackups)
// optionalAttrs resticCfg.runMaintenance {
# Repository maintenance service
restic-maintenance = {
description = "Restic repository maintenance";
after = map (name: "restic-backups-${name}.service") (attrNames resticBackups);
environment =
resticCfg.defaultBackendOptions
// {
RESTIC_CACHE_DIR = "/var/cache/restic-maintenance";
};
serviceConfig = {
Type = "oneshot";
ExecStart = [
"${pkgs.restic}/bin/restic forget --prune ${concatStringsSep " " resticCfg.pruneOpts}"
"${pkgs.restic}/bin/restic check --read-data-subset=500M"
];
User = "root";
CacheDirectory = "restic-maintenance";
CacheDirectoryMode = "0700";
};
};
};
# Maintenance timer
systemd.timers = mkIf resticCfg.runMaintenance {
restic-maintenance = {
description = "Timer for restic repository maintenance";
wantedBy = ["timers.target"];
timerConfig = resticCfg.maintenanceTimer;
};
};
# Helpful shell aliases
programs.zsh.shellAliases =
{
restic-snapshots = "restic snapshots --compact --group-by tags";
restic-repo-size = "restic stats --mode raw-data";
}
// (mapAttrs' (
name: _:
nameValuePair "backup-${name}" "systemctl start restic-backups-${name}"
)
resticBackups);
};
}

View file

@ -1,66 +0,0 @@
# root.nix - Main backup system module
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.system.backups;
# Filter backups by backend
getBackupsByBackend = backend:
filterAttrs (_: backup: backup.backend == backend && backup.enable) cfg.backups;
in {
options.system.backups = {
# Backend registration system - backends register themselves here
backends = mkOption {
type = with types; attrsOf (functionTo attrs);
internal = true;
default = {};
description = ''
Attribute set of backends where the value is a function that accepts
backend-specific arguments and returns an attribute set for the backend's options.
'';
};
# Import the backups option from separate file, passing cfg for backend inference
backups = import ./backups-option.nix cfg;
# Pass lib to the backups-option for access to mkOption, types, etc.
lib = mkOption {
type = types.attrs;
internal = true;
default = lib;
};
};
config = {
# Re-export backups at root level for convenience
# backups = cfg.backups;
# Common backup packages
environment.systemPackages = with pkgs; [
# Add common backup utilities here
];
# Common systemd service modifications for all backup services
systemd.services = let
allBackupServices = flatten (
mapAttrsToList (
backendName: backups:
mapAttrsToList (name: backup: "${backendName}-backups-${name}") backups
) (genAttrs (attrNames cfg.backends) (backend: getBackupsByBackend backend))
);
in
genAttrs allBackupServices (serviceName: {
serviceConfig = {
# Common hardening for all backup services
ProtectSystem = "strict";
ProtectHome = "read-only";
PrivateTmp = true;
NoNewPrivileges = true;
};
});
};
}

View file

@ -1,7 +0,0 @@
# proxmox-infra/.gitignore
.terraform/
*.tfstate
.tfstate.
crash.log
*.tfvars

View file

@ -1,24 +0,0 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/telmate/proxmox" {
version = "3.0.2-rc01"
constraints = "3.0.2-rc01"
hashes = [
"h1:571ROPuTMC0w5lr9hbUXi7NVLsG3SpmZxXXZx8cAT+Q=",
"zh:34d264243a4513f4e30c01fb37cc6a3e592d7823dfd182c5edfb170ac7b7de3a",
"zh:544428311ad20fbb3ad2cd854e893bbf036023cb57c3acc5093d141976dac670",
"zh:5c2396b328edee8de7ac144c15a6b7e668e81063699bc8c110d7c39fb8da70e9",
"zh:5ca8e33476ad06a0259071120a59477e8f107f30c1178ea7b9f6cafe1a461ade",
"zh:5ea56eb8275edc754a01a0180750e9c939cd997d3a50659617770211f4337da9",
"zh:9dd3482df6bbe00a4a6152be3567b6c08d35c3644a327a1f5ac30fd95ccd449f",
"zh:a76075fafadcc94a825151aff169bae4e0c05e3c7717e16dcdcf16ffa61a0780",
"zh:b1d95f97b22f671db762f7adf428b409e6736c078bcf267d8391985b8847d6e3",
"zh:cc94255cd1b18e6a341c15089015c457c8c639c25c426b07f278d5ea9850b3b5",
"zh:ce991103cb69b0b3e275127e3ab92c88bb3b6b0f4e5a2cb082aeaef70a7f7d61",
"zh:d24838bce87b38e12544a1329f5ad30e2be045968e639a3f4ddd5c84aa648e04",
"zh:e106ebd4eea8d62d62e62f261a262febc615e17466b54ac18f7e65c7e79e0008",
"zh:e254ca76c95e6e92da973b7bddc36bfa0a1e31d7c7e758ef4b01315db969388b",
"zh:f1d1d5f4c39267cacebe0ab7e9e06caf9692707f3b5369685541b65bc8b840ce",
]
}

View file

@ -1,52 +0,0 @@
# # This calls the module to define a new VM (e.g., if you were creating one)
# resource "proxmox_vm_qemu" "sandbox" {
# name = "sandbox"
# desc = "OpenTofu testing"
# target_nodes = [var.proxmox_node]
# vmid= 100
# full_clone = true
# clone_id = 9100
# agent = 1
# scsihw = "virtio-scsi-single"
# ciuser = "root"
# ipconfig0 = "ip=dhcp"
# cpu {
# cores = 2
# }
# memory = 2048
# disks {
# virtio {
# virtio0 {
# disk {
# size = "9452M"
# storage = "local-lvm"
# }
# }
# }
# ide {
# ide2 {
# cloudinit {
# storage = "local-lvm"
# }
# }
# }
# }
# network {
# id = 0
# bridge = "vmbr0"
# model = "virtio"
# }
# serial {
# id = 0
# }
# }
# output "sandbox_vmid" {
# description = "sandbox VM ID"
# value = proxmox_vm_qemu.sandbox.id
# }
# output "sandbox_ipv4" {
# description = "sandbox public IPv4 address"
# value = proxmox_vm_qemu.sandbox.default_ipv4_address
# }

View file

@ -1,9 +0,0 @@
provider "proxmox" {
pm_tls_insecure = true
pm_api_url = var.proxmox_api_url
pm_user = var.proxmox_user
pm_password = var.proxmox_password
# Or use API token for better security:
# pm_api_token_id = var.proxmox_api_token_id
# pm_api_token_secret = var.proxmox_api_token_secret
}

View file

@ -1,106 +0,0 @@
# proxmox_vm_qemu.sandbox:
resource "proxmox_vm_qemu" "sandbox" {
agent = 1
bios = "seabios"
boot = " "
ciuser = "root"
cores = 0
current_node = "proxmox-01"
define_connection_info = false
desc = " generated by NixOS"
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "proxmox-01/qemu/100"
ipconfig0 = "ip=dhcp"
kvm = true
linked_vmid = 0
memory = 2048
name = "sandbox"
numa = false
onboot = true
protection = false
qemu_os = "l26"
reboot_required = false
scsihw = "virtio-scsi-single"
sockets = 0
sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCljEOf8Lv7Ptgsc1+CYzXpnrctPy7LFXXOyVZTI9uN7R4HY5aEdZTKEGSsU/+p+JtXWzzI65fnrZU8pTMG/wvCK+gYyNZcEM4g/TXMVa+CWZR3y13zGky88R7dKiBl5L00U4BePDD1ci3EU3/Mjr/GVTQHtkbJfLtvhR9zkCNZzxbu+rySWDroUPWPvE3y60/iLjBsh5ZmHo59CW67lh1jgbAlZjKWZzLWo0Bc5wgbxoQPWcO4BCh17N4g8llrRxGOwJzHeaipBnXn9J1AGIm9Zls6pxT9j6MKltcCOb7tQZwc3hlPOW2ku6f7OHTrziKw37drIDM0UDublAOcnIfBjE+XuWsp5t6ojdIzIDMrzaYW2MyMA3PHuf7VESUQdP4TZ1XUwtRRzOjn5AZJi9DPoowPaxKL92apRpFG+ovaFpWZsG7s8NWXHAC79IpgMUzscEmM15OMQ36RQ5xeytGDVCmVT8DbHGrMT9HUfR5fBSWD3aDQiOOiIIhrbY35m+U65Sz/GpZMk6HlaiV3tKNB0m+xE+84MUEmm4fFzt3B/0N4kscMArnLAm/OMUblihPwbKAUAUWErGRBfP+u+zjRCi1D9/pffpl2OQ2QIuVM82g6/EPa1ZsXZP+4iHooQoJbrqVGzkfiA1EKLfcdGfkP/O4nRl+D5UgkGdqqvm20NQ== root@proxmox-01
EOT
tablet = true
target_nodes = [
"proxmox-01",
]
unused_disk = []
vcpus = 0
vm_state = "running"
vmid = 100
cpu {
cores = 2
limit = 0
numa = false
sockets = 1
type = "host"
units = 0
vcores = 0
}
disks {
ide {
ide2 {
cloudinit {
storage = "local-lvm"
}
}
}
virtio {
virtio0 {
disk {
backup = true
discard = false
format = "raw"
id = 0
iops_r_burst = 0
iops_r_burst_length = 0
iops_r_concurrent = 0
iops_wr_burst = 0
iops_wr_burst_length = 0
iops_wr_concurrent = 0
iothread = false
linked_disk_id = -1
mbps_r_burst = 0
mbps_r_concurrent = 0
mbps_wr_burst = 0
mbps_wr_concurrent = 0
readonly = false
replicate = true
size = "9452M"
storage = "local-lvm"
}
}
}
}
network {
bridge = "vmbr0"
firewall = true
id = 0
link_down = false
macaddr = "bc:24:11:a7:e8:2a"
model = "virtio"
mtu = 0
queues = 0
rate = 0
tag = 0
}
serial {
id = 0
type = "socket"
}
smbios {
uuid = "37cd09d5-29a5-42e2-baba-f21b691130e8"
}
}

View file

@ -1 +0,0 @@
{"version":4,"terraform_version":"1.9.1","serial":2,"lineage":"ecd6c5f8-5352-bf30-6117-d55763366399","outputs":{"sandbox_ipv4":{"value":"192.168.1.206","type":"string"},"sandbox_vmid":{"value":"proxmox-01/qemu/999","type":"string"}},"resources":[{"mode":"managed","type":"proxmox_vm_qemu","name":"sandbox","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"","ciupgrade":false,"ciuser":"root","clone":null,"clone_id":9100,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.206","default_ipv6_address":"2a05:f6c7:2030:0:be24:11ff:feb9:919f","define_connection_info":true,"desc":"OpenTofu testing","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[],"ide2":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"9452M","storage":"local-lvm","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/999","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":2048,"name":"sandbox2","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:b9:91:9f","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":false,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"51a93ec4-4afa-428b-911a-daab70390a8c","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.206","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":null,"startup":"","tablet":true,"tags":"v0.0.2","target_node":null,"target_nodes":["proxmox-01"],"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":999},"sensitive_attributes":[[{"type":"get_attr","value":"cipassword"}],[{"type":"get_attr","value":"ssh_private_key"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null}

View file

@ -1,30 +0,0 @@
# proxmox-infra/variables.tf
variable "proxmox_api_url" {
description = "The URL of the Proxmox API (e.g., https://192.168.1.10:8006/api2/json)"
type = string
# No default here, so OpenTofu will prompt or expect a .tfvars file/env var
}
variable "proxmox_user" {
description = "Proxmox user (e.g., root@pam or user@pve)"
type = string
}
variable "proxmox_password" {
description = "Proxmox user password"
type = string
sensitive = true # Mark as sensitive to hide in logs
}
variable "proxmox_node" {
description = "The Proxmox node name where VMs will be deployed (e.g., 'pve')"
type = string
}
# Example for templates - you might have different templates
variable "nixos_template_id" {
description = "VMID of the nixos cloud-init template"
type = number
# Example: default = 100
}

View file

@ -1,9 +0,0 @@
# versions.tf
terraform {
required_providers {
proxmox = {
source = "Telmate/proxmox"
version = "3.0.2-rc01"
}
}
}

3
users/default.nix Normal file
View file

@ -0,0 +1,3 @@
{
defaultUser = import ./plasmagoat.nix;
}

View file

@ -1,4 +1,3 @@
# users/plasmagoat.nix - Your user configuration
{
config,
lib,