diff --git a/.forgejo/workflows/colmena-apply.yml b/.forgejo/workflows/colmena-apply.yml index dbfd93d..6882556 100644 --- a/.forgejo/workflows/colmena-apply.yml +++ b/.forgejo/workflows/colmena-apply.yml @@ -41,6 +41,11 @@ jobs: ssh-keyscan -H "$NIXOS_BUILER_HOST" >> ~/.ssh/known_hosts chmod 600 ~/.ssh/known_hosts + - name: Test SSH connection to NixOS Builder + run: | + echo "Testing SSH connection to $NIXOS_BUILER_HOST..." + ssh -o StrictHostKeyChecking=yes "$NIXOS_BUILER_USER"@"$NIXOS_BUILER_HOST" "echo 'SSH success. Hostname:' && hostname" + - name: Apply Colmena id: apply run: colmena apply diff --git a/colmena.nix b/colmena.nix deleted file mode 100644 index bb1d099..0000000 --- a/colmena.nix +++ /dev/null @@ -1,59 +0,0 @@ -# colmena.nix - Separate file to keep flake.nix clean -{ - inputs, - outputs, -}: let - inherit (inputs.nixpkgs) lib; - - # Helper to create a host configuration - mkHost = { - hostname, - profile ? "proxmox-vm", - modules ? [], - specialArgs ? {}, - }: { - imports = - [ - # Base profile (determines hardware/platform specifics) - (./. + "/profiles/${profile}.nix") - # Host-specific configuration - (./. + "/hosts/${hostname}") - # Additional modules - ] - ++ modules; - - # Pass through special args and our outputs - _module.args = - specialArgs - // { - inherit inputs outputs; - }; - }; -in { - meta = { - nixpkgs = import inputs.nixpkgs { - system = "x86_64-linux"; - overlays = [ - outputs.overlays.additions - outputs.overlays.modifications - outputs.overlays.unstable-packages - inputs.colmena.overlays.default - ]; - }; - - specialArgs = {inherit inputs outputs;}; - }; - - defaults = import ./hosts/default.nix; - - # Define your hosts - sandbox = mkHost { - hostname = "sandbox"; - profile = "proxmox-vm"; - }; - - photos = mkHost { - hostname = "photos"; - profile = "proxmox-vm"; - }; -} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 1572f61..0000000 --- a/docs/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Homelab Documentation - -> Auto-generated documentation for the homelab deployment -> -> Generated on: Wed, 30 Jul 2025 02:30:55 +0200 -> Source: /home/plasmagoat/homelab - -## 📚 Documentation Files - -This documentation is automatically generated from your colmena flake configuration. - -### 📊 Overview Documents -- **[Fleet Overview](fleet-overview.md)** - High-level fleet statistics and service distribution -- **[Current Deployment](current-deployment.md)** - Current deployment state and node status - -### 📖 Detailed Configuration -- **[Node Configurations](nodes.md)** - Per-node detailed configuration and services -- **[Service Configurations](services.md)** - Service configurations across the fleet - -## 🚀 Quick Actions - -### View Current Status -```bash -# Service status across fleet (if homelab CLI is available) -homelab services --global - -# Backup status -homelab backups --global - -# Overall status -homelab status -``` - -### Update Documentation -```bash -# Regenerate all documentation -homelab-generate-docs ./docs - -# Generate in different directory -homelab-generate-docs /path/to/output -``` - -## 📋 Quick Stats - -- **Total Nodes**: 2 -- **Homelab-Enabled Nodes**: 2 -- **Generated**: Wed, 30 Jul 2025 02:30:59 +0200 - -## 🛠️ Management Tools - -### Documentation Commands -- `homelab-generate-docs` - Regenerate this documentation -- `homelab-docs-fleet` - Generate fleet overview only -- `homelab-docs-nodes` - Generate node configurations only -- `homelab-docs-services` - Generate service configurations only -- `homelab-docs-deployment` - Generate deployment state only - -### Colmena Commands -- `colmena eval` - Evaluate flake expressions -- `colmena apply` - Deploy configuration changes -- `colmena build` - Build configurations without deploying - -## 🎯 Integration with CI/CD - -### GitHub Actions Example - -```yaml -name: Generate Documentation -on: - push: - branches: [ main ] - -jobs: - docs: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: cachix/install-nix-action@v24 - - name: Generate docs - run: nix develop --command homelab-generate-docs ./docs - - name: Commit docs - run: | - git add docs/ - git commit -m "docs: update homelab documentation" || exit 0 - git push -``` - -### Manual Generation - -```bash -# From your homelab directory -nix develop -homelab-generate-docs ./docs -git add docs/ && git commit -m "Update docs" -``` - ---- - -*This documentation reflects the live state of your homelab deployment as evaluated by colmena.* diff --git a/docs/current-deployment.md b/docs/current-deployment.md deleted file mode 100644 index 03b9e71..0000000 --- a/docs/current-deployment.md +++ /dev/null @@ -1,26 +0,0 @@ -# Current Deployment State - -> Current homelab deployment configuration -> -> Generated on: Wed, 30 Jul 2025 02:30:45 +0200 -> Working directory: /home/plasmagoat/homelab - -## Deployment Summary - -| Metric | Count | -|--------|-------| -| Total Nodes | 2 | -| Homelab-Enabled Nodes | 2 | -| Unique Services | 4 | -| Service Instances | 4 | - -## Node Status - -| Node | Homelab | Environment | Services | Monitoring | Backups | Proxy | -|------|---------|-------------|----------|------------|---------|-------| -| `photos` | ✅ | production | 1 | ✅ | ❌ | ❌ | -| `sandbox` | ✅ | production | 3 | ✅ | ✅ | ❌ | - ---- - -*Deployment state extracted from live colmena configuration* diff --git a/docs/fleet-overview.md b/docs/fleet-overview.md deleted file mode 100644 index 186fc31..0000000 --- a/docs/fleet-overview.md +++ /dev/null @@ -1,33 +0,0 @@ -# Homelab Fleet Overview - -> Auto-generated fleet overview -> -> Generated on: Wed, 30 Jul 2025 02:30:24 +0200 -> Source: /home/plasmagoat/homelab - -## Fleet Statistics - -### Basic Information - -| Metric | Value | -|--------|-------| -| Total Nodes | 2 | -| Node Names | photos sandbox | - -### Homelab Configuration - -| Node | Homelab Enabled | Hostname | Environment | -|------|----------------|----------|-------------| -| `photos` | ✅ | photos | production | -| `sandbox` | ✅ | sandbox | production | - -### Service Distribution - -| Node | Service Count | Services | -|------|---------------|----------| -| `photos` | 1 | minio | -| `sandbox` | 3 | gatus, grafana, prometheus | - ---- - -*Fleet overview generated from colmena evaluation* diff --git a/docs/nodes.md b/docs/nodes.md deleted file mode 100644 index f4efc67..0000000 --- a/docs/nodes.md +++ /dev/null @@ -1,72 +0,0 @@ -# Node Configurations - -> Detailed per-node configuration -> -> Generated on: Wed, 30 Jul 2025 02:30:33 +0200 - -## Node: photos - -### System Information - -| Property | Value | -|----------|-------| -| NixOS Version | `25.11pre-git` | -| Hostname | `photos` | -| System | `x86_64-linux` | - -### Homelab Configuration - -| Property | Value | -|----------|-------| -| Homelab Hostname | `photos` | -| Domain | `lab` | -| External Domain | `procopius.dk` | -| Environment | `production` | -| Location | `proxmox-cluster` | -| Tags | photos | - -### Services - -| Service | Enabled | Port | Description | Tags | -|---------|---------|------|-------------|------| -| `example` | ❌ | 1234 | Example Homelab Service | | -| `gatus` | ❌ | 8080 | Gatus Status Page | | -| `grafana` | ❌ | 3000 | Grafana Metrics Dashboard | | -| `minio` | ✅ | 9000 | minio | | -| `prometheus` | ❌ | 9090 | Prometheus Monitoring Server | | - ---- - -## Node: sandbox - -### System Information - -| Property | Value | -|----------|-------| -| NixOS Version | `25.11pre-git` | -| Hostname | `sandbox` | -| System | `x86_64-linux` | - -### Homelab Configuration - -| Property | Value | -|----------|-------| -| Homelab Hostname | `sandbox` | -| Domain | `lab` | -| External Domain | `procopius.dk` | -| Environment | `production` | -| Location | `proxmox-cluster` | -| Tags | sandbox | - -### Services - -| Service | Enabled | Port | Description | Tags | -|---------|---------|------|-------------|------| -| `example` | ❌ | 1234 | Example Homelab Service | | -| `gatus` | ✅ | 8080 | Gatus Status Page | | -| `grafana` | ✅ | 3000 | Grafana Metrics Dashboard | | -| `minio` | ❌ | 9000 | minio | | -| `prometheus` | ✅ | 9090 | Prometheus Monitoring Server | | - ---- - diff --git a/docs/services.md b/docs/services.md deleted file mode 100644 index 04cf779..0000000 --- a/docs/services.md +++ /dev/null @@ -1,506 +0,0 @@ -# Service Catalog - -> Complete service documentation with core options, feature integrations, and smart defaults -> -> Generated on: Wed, 30 Jul 2025 02:30:36 +0200 - -This document provides comprehensive documentation for homelab services, organized by: -- **Core Service Options**: The main service configuration -- **Feature Integrations**: Available monitoring, logging, and proxy features -- **Service Defaults**: What this service configures by default for each feature - -## Overview - -**Total Available Services:** 5 - -## Service Integration Matrix - -| Service | Core Options | Monitoring | Logging | Proxy | Deployments | -|---------|--------------|------------|---------|-------|-------------| -| `example` | 5 | 📊 | 📝 | 🔀 | 0 | -| `gatus` | 11 | 📊 | 📝 | 🔀 | 1 | -| `grafana` | 3 | 📊 | 📝 | 🔀 | 1 | -| `minio` | 4 | ❌ | ❌ | ❌ | 1 | -| `prometheus` | 12 | 📊 | 📝 | 🔀 | 1 | - -**Legend:** 📊📝🔀 = Feature available, ❌ = Feature not available - -## Service Documentation - -### example - -**Deployment Status:** 0/2 nodes have this service enabled - -#### Core Service Options - -The main configuration options for example: - -```nix -homelab.services.example = { - description = Example Homelab Service; # No description - enable = false; # Whether to enable Example Homelab Service. - openFirewall = true; # Whether to open the ports specified in `port` and `webPort` in the firewall. - port = 1234; # No description - systemdServices = [ - "example.service", - "example" -]; # Systemd services to monitor -}; -``` - -#### Feature Integrations - -##### 📊 Monitoring Integration - -Available monitoring options: - -```nix -homelab.services.example = { - # ... core options above ... - - monitoring.enable = true; # Enable monitoring for example - monitoring.extraLabels = {}; # No description - monitoring.healthCheck.conditions = [ - "[STATUS] == 200" -]; # Health check conditions. Setting conditions enables health checks. - monitoring.healthCheck.enable = true; # No description - monitoring.healthCheck.extraChecks = []; # Additional health checks. Adding checks enables health monitoring. - # monitoring.healthCheck.path = ; # Health check endpoint path. Setting this enables health checks. - monitoring.metrics.enable = false; # No description - monitoring.metrics.extraEndpoints = []; # Additional metrics endpoints. Adding endpoints enables metrics collection. - # monitoring.metrics.path = ; # Metrics endpoint path. Setting this enables metrics collection. -}; -``` - -**example sets these monitoring defaults:** -```nix - enable = true; - extraLabels = {}; - healthCheck = {"conditions":["[STATUS] == 200"],"enable":true,"extraChecks":[],"path":null}; - metrics = {"enable":false,"extraEndpoints":[],"path":null}; -``` - -##### 📝 Logging Integration - -Available logging options: - -```nix -homelab.services.example = { - # ... core options above ... - - logging.enable = false; # Enable logging for example - logging.extraLabels = {}; # No description - logging.extraSources = []; # No description - logging.files = []; # No description - # logging.multiline = ; # No description - logging.parsing.extractFields = []; # No description - # logging.parsing.regex = ; # No description -}; -``` - -**example sets these logging defaults:** -```nix - enable = false; - extraLabels = {}; - extraSources = []; - files = []; - multiline = null; - parsing = {"extractFields":[],"regex":null}; -``` - -##### 🔀 Proxy Integration - -Available proxy options: - -```nix -homelab.services.example = { - # ... core options above ... - - proxy.additionalSubdomains = []; # No description - proxy.enable = true; # Enable reverse proxy for example - proxy.enableAuth = false; # No description - proxy.subdomain = example; # No description -}; -``` - -**example sets these proxy defaults:** -```nix - additionalSubdomains = []; - enable = true; - enableAuth = false; - subdomain = example; -``` - ---- - -### gatus - -**Deployment Status:** 1/2 nodes have this service enabled - -#### Core Service Options - -The main configuration options for gatus: - -```nix -homelab.services.gatus = { - alerting = {}; # Gatus alerting configuration - description = Gatus Status Page; # No description - enable = false; # Whether to enable Gatus Status Page. - extraConfig = {}; # Additional Gatus configuration options - port = 8080; # No description - storage = { - "type": "memory" -}; # Gatus storage configuration - ui.buttons = [ - { - "link": "https://grafana.procopius.dk", - "name": "Grafana" - }, - { - "link": "https://prometheus.procopius.dk", - "name": "Prometheus" - } -]; # Navigation buttons in the Gatus interface - ui.header = Homelab Services Status; # Header text for the Gatus interface - ui.link = https://status.procopius.dk; # Link in the Gatus header - ui.title = Homelab Status; # Title for the Gatus web interface - web.address = 0.0.0.0; # Web interface bind address -}; -``` - -#### Feature Integrations - -##### 📊 Monitoring Integration - -Available monitoring options: - -```nix -homelab.services.gatus = { - # ... core options above ... - - monitoring.enable = true; # Enable monitoring for gatus - monitoring.extraLabels = {}; # No description - monitoring.healthCheck.conditions = [ - "[STATUS] == 200" -]; # Health check conditions. Setting conditions enables health checks. - monitoring.healthCheck.enable = true; # No description - monitoring.healthCheck.extraChecks = []; # Additional health checks. Adding checks enables health monitoring. - # monitoring.healthCheck.path = ; # Health check endpoint path. Setting this enables health checks. - monitoring.metrics.enable = false; # No description - monitoring.metrics.extraEndpoints = []; # Additional metrics endpoints. Adding endpoints enables metrics collection. - # monitoring.metrics.path = ; # Metrics endpoint path. Setting this enables metrics collection. -}; -``` - -**gatus sets these monitoring defaults:** -```nix - enable = true; - extraLabels = {}; - healthCheck = {"conditions":["[STATUS] == 200"],"enable":true,"extraChecks":[],"path":null}; - metrics = {"enable":false,"extraEndpoints":[],"path":null}; -``` - -##### 📝 Logging Integration - -Available logging options: - -```nix -homelab.services.gatus = { - # ... core options above ... - - logging.enable = false; # Enable logging for gatus - logging.extraLabels = {}; # No description - logging.extraSources = []; # No description - logging.files = []; # No description - # logging.multiline = ; # No description - logging.parsing.extractFields = []; # No description - # logging.parsing.regex = ; # No description -}; -``` - -**gatus sets these logging defaults:** -```nix - enable = false; - extraLabels = {}; - extraSources = []; - files = []; - multiline = null; - parsing = {"extractFields":[],"regex":null}; -``` - -##### 🔀 Proxy Integration - -Available proxy options: - -```nix -homelab.services.gatus = { - # ... core options above ... - - proxy.additionalSubdomains = []; # No description - proxy.enable = true; # Enable reverse proxy for gatus - proxy.enableAuth = false; # No description - proxy.subdomain = gatus; # No description -}; -``` - -**gatus sets these proxy defaults:** -```nix - additionalSubdomains = []; - enable = true; - enableAuth = false; - subdomain = gatus; -``` - ---- - -### grafana - -**Deployment Status:** 1/2 nodes have this service enabled - -#### Core Service Options - -The main configuration options for grafana: - -```nix -homelab.services.grafana = { - description = Grafana Metrics Dashboard; # No description - enable = false; # Whether to enable Grafana Dashboard. - port = 3000; # No description -}; -``` - -#### Feature Integrations - -##### 📊 Monitoring Integration - -Available monitoring options: - -```nix -homelab.services.grafana = { - # ... core options above ... - - monitoring.enable = true; # Enable monitoring for grafana - monitoring.extraLabels = {}; # No description - monitoring.healthCheck.conditions = [ - "[STATUS] == 200" -]; # Health check conditions. Setting conditions enables health checks. - monitoring.healthCheck.enable = true; # No description - monitoring.healthCheck.extraChecks = []; # Additional health checks. Adding checks enables health monitoring. - # monitoring.healthCheck.path = ; # Health check endpoint path. Setting this enables health checks. - monitoring.metrics.enable = false; # No description - monitoring.metrics.extraEndpoints = []; # Additional metrics endpoints. Adding endpoints enables metrics collection. - # monitoring.metrics.path = ; # Metrics endpoint path. Setting this enables metrics collection. -}; -``` - -**grafana sets these monitoring defaults:** -```nix - enable = true; - extraLabels = {}; - healthCheck = {"conditions":["[STATUS] == 200"],"enable":true,"extraChecks":[],"path":null}; - metrics = {"enable":false,"extraEndpoints":[],"path":null}; -``` - -##### 📝 Logging Integration - -Available logging options: - -```nix -homelab.services.grafana = { - # ... core options above ... - - logging.enable = false; # Enable logging for grafana - logging.extraLabels = {}; # No description - logging.extraSources = []; # No description - logging.files = []; # No description - # logging.multiline = ; # No description - logging.parsing.extractFields = []; # No description - # logging.parsing.regex = ; # No description -}; -``` - -**grafana sets these logging defaults:** -```nix - enable = false; - extraLabels = {}; - extraSources = []; - files = []; - multiline = null; - parsing = {"extractFields":[],"regex":null}; -``` - -##### 🔀 Proxy Integration - -Available proxy options: - -```nix -homelab.services.grafana = { - # ... core options above ... - - proxy.additionalSubdomains = []; # No description - proxy.enable = true; # Enable reverse proxy for grafana - proxy.enableAuth = false; # No description - proxy.subdomain = grafana; # No description -}; -``` - -**grafana sets these proxy defaults:** -```nix - additionalSubdomains = []; - enable = true; - enableAuth = false; - subdomain = grafana; -``` - ---- - -### minio - -**Deployment Status:** 1/2 nodes have this service enabled - -#### Core Service Options - -The main configuration options for minio: - -```nix -homelab.services.minio = { - enable = false; # Whether to enable Minio Object Storage. - openFirewall = true; # Whether to open the ports specified in `port` and `webPort` in the firewall. - port = 9000; # Port of the server. - webPort = 9001; # Port of the web UI (console). -}; -``` - ---- - -### prometheus - -**Deployment Status:** 1/2 nodes have this service enabled - -#### Core Service Options - -The main configuration options for prometheus: - -```nix -homelab.services.prometheus = { - alertmanager.enable = true; # Enable integration with Alertmanager - alertmanager.url = alertmanager.lab:9093; # Alertmanager URL - description = Prometheus Monitoring Server; # No description - enable = false; # Whether to enable Prometheus Monitoring Server. - extraAlertingRules = []; # Additional alerting rules - extraFlags = []; # Extra command line flags - extraScrapeConfigs = []; # Additional scrape configurations - globalConfig = { - "evaluation_interval": "15s", - "scrape_interval": "15s" -}; # Global Prometheus configuration - port = 9090; # No description - retention = 15d; # How long to retain metrics data - ruleFiles = []; # Additional rule files to load - systemdServices = [ - "prometheus.service", - "prometheus" -]; # Systemd services to monitor -}; -``` - -#### Feature Integrations - -##### 📊 Monitoring Integration - -Available monitoring options: - -```nix -homelab.services.prometheus = { - # ... core options above ... - - monitoring.enable = true; # Enable monitoring for prometheus - monitoring.extraLabels = {}; # No description - monitoring.healthCheck.conditions = [ - "[STATUS] == 200" -]; # Health check conditions. Setting conditions enables health checks. - monitoring.healthCheck.enable = true; # No description - monitoring.healthCheck.extraChecks = []; # Additional health checks. Adding checks enables health monitoring. - # monitoring.healthCheck.path = ; # Health check endpoint path. Setting this enables health checks. - monitoring.metrics.enable = false; # No description - monitoring.metrics.extraEndpoints = []; # Additional metrics endpoints. Adding endpoints enables metrics collection. - # monitoring.metrics.path = ; # Metrics endpoint path. Setting this enables metrics collection. -}; -``` - -**prometheus sets these monitoring defaults:** -```nix - enable = true; - extraLabels = {}; - healthCheck = {"conditions":["[STATUS] == 200"],"enable":true,"extraChecks":[],"path":null}; - metrics = {"enable":false,"extraEndpoints":[],"path":null}; -``` - -##### 📝 Logging Integration - -Available logging options: - -```nix -homelab.services.prometheus = { - # ... core options above ... - - logging.enable = false; # Enable logging for prometheus - logging.extraLabels = {}; # No description - logging.extraSources = []; # No description - logging.files = []; # No description - # logging.multiline = ; # No description - logging.parsing.extractFields = []; # No description - # logging.parsing.regex = ; # No description -}; -``` - -**prometheus sets these logging defaults:** -```nix - enable = false; - extraLabels = {}; - extraSources = []; - files = []; - multiline = null; - parsing = {"extractFields":[],"regex":null}; -``` - -##### 🔀 Proxy Integration - -Available proxy options: - -```nix -homelab.services.prometheus = { - # ... core options above ... - - proxy.additionalSubdomains = []; # No description - proxy.enable = true; # Enable reverse proxy for prometheus - proxy.enableAuth = false; # No description - proxy.subdomain = prometheus; # No description -}; -``` - -**prometheus sets these proxy defaults:** -```nix - additionalSubdomains = []; - enable = true; - enableAuth = false; - subdomain = prometheus; -``` - ---- - -## Feature Reference - -### Integration Features - -Homelab services can integrate with three main features: - -- **📊 Monitoring**: Prometheus metrics and health checks -- **📝 Logging**: Centralized log collection with Promtail/Loki -- **🔀 Proxy**: Reverse proxy with SSL and authentication - -Each service can import these features and set service-specific defaults. - ---- - -*This documentation is generated from actual NixOS module evaluations.* diff --git a/flake.lock b/flake.lock index e46dbf1..39e8693 100644 --- a/flake.lock +++ b/flake.lock @@ -25,11 +25,11 @@ "stable": "stable" }, "locked": { - "lastModified": 1753701727, - "narHash": "sha256-tgiPAFXoSGIm3wUAuKwjk2fgTgZ0rpT90RNfhU5QKJA=", + "lastModified": 1752287590, + "narHash": "sha256-U1IqFnxlgCRrPaeT5IGCdH0j9CNLPFcI/fRAidi0aDQ=", "owner": "zhaofengli", "repo": "colmena", - "rev": "342054695f53c4a27c8dce0a8c9f35ade6d963d6", + "rev": "d2beb694d54db653399b8597c0f6e15e20b26405", "type": "github" }, "original": { @@ -156,11 +156,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1753694789, - "narHash": "sha256-cKgvtz6fKuK1Xr5LQW/zOUiAC0oSQoA9nOISB0pJZqM=", + "lastModified": 1750134718, + "narHash": "sha256-v263g4GbxXv87hMXMCpjkIxd/viIF7p3JpJrwgKdNiI=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dc9637876d0dcc8c9e5e22986b857632effeb727", + "rev": "9e83b64f727c88a7711a2c463a7b16eedb69a84c", "type": "github" }, "original": { @@ -188,11 +188,11 @@ }, "nixpkgs-unstable": { "locked": { - "lastModified": 1753694789, - "narHash": "sha256-cKgvtz6fKuK1Xr5LQW/zOUiAC0oSQoA9nOISB0pJZqM=", + "lastModified": 1752480373, + "narHash": "sha256-JHQbm+OcGp32wAsXTE/FLYGNpb+4GLi5oTvCxwSoBOA=", "owner": "nixos", "repo": "nixpkgs", - "rev": "dc9637876d0dcc8c9e5e22986b857632effeb727", + "rev": "62e0f05ede1da0d54515d4ea8ce9c733f12d9f08", "type": "github" }, "original": { @@ -204,11 +204,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1753795159, - "narHash": "sha256-0fOuNh5MefjES+ie0zV3mVMSs1RwXhVIxcNQuu+Q4g4=", + "lastModified": 1752624097, + "narHash": "sha256-mQCof2VccFzF7cmXy43n3GCwSN2+m8TVhZpGLx9sxVc=", "owner": "nixos", "repo": "nixpkgs", - "rev": "5a012ffbe2494cb777ec3dbace5811f927bddc72", + "rev": "d7c8095791ce3aafe97d9c16c1dc2f4e3d69a3ba", "type": "github" }, "original": { @@ -237,11 +237,11 @@ "nixpkgs-25_05": "nixpkgs-25_05" }, "locked": { - "lastModified": 1753285640, - "narHash": "sha256-ofa021NeHDXAxg5J8mSnn8rHa393PAlD85ZCetP4Qa0=", + "lastModified": 1752060039, + "narHash": "sha256-MqcbN/PgfXOv8S4q6GcmlORd6kJZ3UlFNhzCvLOEe4I=", "owner": "simple-nixos-mailserver", "repo": "nixos-mailserver", - "rev": "ce87c8a9771d1a20c3fa3b60113b9b0821627dcb", + "rev": "80d21ed7a1ab8007597f7cd9adc26ebc98b9611f", "type": "gitlab" }, "original": { diff --git a/flake.nix b/flake.nix index 961a270..8918622 100644 --- a/flake.nix +++ b/flake.nix @@ -25,21 +25,20 @@ self, nixpkgs, # systems, - colmena, sops-nix, # home-manager, + colmena, simple-nixos-mailserver, ... } @ inputs: let inherit (self) outputs; - lib = nixpkgs.lib; # Supported systems for your flake packages, shell, etc. systems = [ "x86_64-linux" ]; # This is a function that generates an attribute by calling a function you # pass to it, with each system as an argument - forAllSystems = lib.genAttrs systems; + forAllSystems = nixpkgs.lib.genAttrs systems; in { # Custom packages # Accessible through 'nix build', 'nix shell', etc @@ -56,34 +55,32 @@ nixosModules = import ./modules/nixos; colmenaHive = colmena.lib.makeHive self.outputs.colmena; - colmena = import ./colmena.nix {inherit inputs outputs;}; + colmena = { + meta = { + nixpkgs = import nixpkgs { + system = "x86_64-linux"; + overlays = [ + outputs.overlays.additions + outputs.overlays.modifications + outputs.overlays.unstable-packages - # Development shells - devShells = forAllSystems ( - system: let - pkgs = nixpkgs.legacyPackages.${system}; - in { - default = pkgs.mkShell { - packages = with pkgs; [ - self.packages.${system}.homelab-docs - colmena.packages.${system}.colmena - sops - age - nix-output-monitor - jq - ssh-to-age # For converting SSH keys to age keys + colmena.overlays.default ]; - - shellHook = '' - echo "🏠 Homelab Development Environment" - echo "Available commands:" - echo " colmena apply - Deploy all hosts" - echo " colmena apply --on @tag - Deploy specific tagged hosts" - echo " sops secrets/secrets.yaml - Edit secrets" - echo "" - ''; + config.allowUnfree = true; }; - } - ); + + specialArgs = { + inherit inputs outputs; + }; + }; + + defaults = import ./machines/_default/configuration.nix; + + sandbox = import ./machines/sandbox/configuration.nix; + auth = import ./machines/auth/configuration.nix; + mail = import ./machines/mail/configuration.nix; + monitor = import ./machines/monitor/configuration.nix; + photos = import ./machines/photos/configuration.nix; + }; }; } diff --git a/hive.nix b/hive.nix new file mode 100644 index 0000000..e0fc390 --- /dev/null +++ b/hive.nix @@ -0,0 +1,37 @@ +inputs @ { + self, + nixpkgs, + sops-nix, + simple-nixos-mailserver, + # home-manager, + outputs, + ... +}: { + sandbox = {name, ...}: { + imports = [./machines/${name}/definition.nix]; + deployment.tags = ["sandbox"]; + }; + + monitor = {name, ...}: { + imports = [./machines/${name}/definition.nix]; + deployment.tags = ["grafana" "prometheus"]; + }; + + auth = {name, ...}: { + imports = [./machines/${name}/definition.nix]; + deployment.tags = ["zitadel" "sso" "ldap"]; + }; + + mail = {name, ...}: { + imports = [ + ./machines/${name}/definition.nix + simple-nixos-mailserver.nixosModule + ]; + deployment.tags = ["mail"]; + }; + + photos = {name, ...}: { + imports = [./machines/${name}/definition.nix]; + deployment.tags = ["ente"]; + }; +} diff --git a/hosts/default.nix b/hosts/default.nix deleted file mode 100644 index 57da4f5..0000000 --- a/hosts/default.nix +++ /dev/null @@ -1,106 +0,0 @@ -{ - config, - lib, - pkgs, - inputs, - outputs, - ... -}: { - imports = [ - # Essential modules for all systems - inputs.sops-nix.nixosModules.sops - ../modules/homelab - # User configurations - ../users/plasmagoat.nix - - # Secrets management - ../secrets - ]; - - # Colmena deployment defaults - deployment = { - targetHost = lib.mkDefault "${config.homelab.hostname}.${config.homelab.domain}"; - tags = [config.nixpkgs.system config.networking.hostName]; - replaceUnknownProfiles = lib.mkDefault true; - buildOnTarget = lib.mkDefault false; - }; - - # Basic system configuration that applies to ALL systems - nix = { - settings = { - experimental-features = ["nix-command" "flakes"]; - auto-optimise-store = true; - allowed-users = ["@wheel"]; - trusted-users = ["root" "@wheel"]; - }; - - gc = { - automatic = true; - options = "--delete-older-than 15d"; - dates = "daily"; - }; - - optimise.automatic = true; - - extraOptions = '' - keep-outputs = true - keep-derivations = true - ''; - }; - - # Basic security - security.sudo.wheelNeedsPassword = false; - - # SSH configuration - services.openssh = { - enable = true; - openFirewall = true; - settings = { - PasswordAuthentication = false; - PermitRootLogin = "prohibit-password"; - KbdInteractiveAuthentication = false; - }; - }; - - services.sshguard.enable = true; - programs.ssh.startAgent = true; - - # Basic packages for all systems - environment.systemPackages = with pkgs; [ - dig - nmap - traceroute - vim - git - curl - python3 - htop - tree - ]; - - # Timezone and locale - time.timeZone = lib.mkDefault "Europe/Copenhagen"; - console.keyMap = lib.mkDefault "dk-latin1"; - i18n.defaultLocale = lib.mkDefault "en_US.UTF-8"; - - # System backup job (applies to all systems) - # homelab.global.backups.jobs = [ - # { - # name = "system-config"; - # backend = "restic"; - # paths = [ - # "/etc/nixos" - # "/etc/sops" - # "/var/lib/nixos" - # ]; - # schedule = "daily"; - # excludePatterns = [ - # "*/cache/*" - # "*/tmp/*" - # ]; - # } - # ]; - - # Default state version - system.stateVersion = lib.mkDefault "25.05"; -} diff --git a/hosts/monitor/default.nix b/hosts/monitor/default.nix deleted file mode 100644 index bd72a58..0000000 --- a/hosts/monitor/default.nix +++ /dev/null @@ -1,41 +0,0 @@ -{ - config, - name, - ... -}: { - sops.secrets."restic/default-password" = {}; - - homelab = { - enable = true; - hostname = name; - tags = [name]; - - monitoring.enable = true; - motd.enable = true; - - backups = { - enable = true; - backends = { - restic = { - enable = true; - repository = "/srv/restic-repo"; - passwordFile = config.sops.secrets."restic/default-password".path; - }; - }; - }; - - services.prometheus = { - enable = true; - }; - - services.gatus = { - enable = true; - ui = { - title = "Homelab Status Dashboard"; - header = "My Homelab Services"; - }; - }; - }; - - system.stateVersion = "25.05"; -} diff --git a/hosts/photos/default.nix b/hosts/photos/default.nix deleted file mode 100644 index d3b3038..0000000 --- a/hosts/photos/default.nix +++ /dev/null @@ -1,28 +0,0 @@ -{ - outputs, - name, - ... -}: let -in { - imports = [ - outputs.nixosModules.ente - ./ente.nix - # ./minio.nix - ]; - - homelab = { - enable = true; - hostname = name; - tags = [name]; - - monitoring.enable = true; - motd.enable = true; - services = { - minio.enable = true; - }; - }; - - deployment.tags = ["ente"]; - - system.stateVersion = "25.05"; -} diff --git a/hosts/photos/ente.nix b/hosts/photos/ente.nix deleted file mode 100644 index e1e9a0c..0000000 --- a/hosts/photos/ente.nix +++ /dev/null @@ -1,73 +0,0 @@ -{ - config, - pkgs, - ... -}: { - sops.secrets."ente/minio/root_password".owner = "ente"; - sops.secrets."ente/minio/root_user".owner = "ente"; - sops.secrets."service_accounts/ente/password".owner = "ente"; - - environment.systemPackages = with pkgs; [ - ente-cli - ]; - - services.ente.api = { - enable = true; - enableLocalDB = true; - - domain = "ente-museum.procopius.dk"; - settings = { - # apps = { - # accounts = "https://accounts.procopius.dk"; - # cast = "https://cast.procopius.dk"; - # public-albums = "https://albums.procopius.dk"; - # }; - - smtp = { - host = "mail.procopius.dk"; - port = "465"; - username = "ente@procopius.dk"; - password._secret = config.sops.secrets."service_accounts/ente/password".path; - # The email address from which to send the email. Set this to an email - # address whose credentials you're providing. - email = "ente@procopius.dk"; - # Optional override for the sender name in the emails. If specified, it will - # be used for all emails sent by the instance (default is email specific). - sender-name = "ente"; - }; - internal.admins = [ - 1580559962386438 - ]; - s3 = { - use_path_style_urls = true; - b2-eu-cen = { - endpoint = "https://ente-minio-api.procopius.dk"; - region = "us-east-1"; - bucket = "ente"; - key._secret = config.sops.secrets."ente/minio/root_user".path; - secret._secret = config.sops.secrets."ente/minio/root_password".path; - }; - }; - }; - }; - services.ente.web = { - enable = true; - domains = { - api = "ente-museum.procopius.dk"; - accounts = "ente-accounts.procopius.dk"; - albums = "ente-albums.procopius.dk"; - cast = "ente-cast.procopius.dk"; - photos = "ente-photos.procopius.dk"; - auth = "ente-auth.procopius.dk"; - }; - }; - - networking.firewall.allowedTCPPorts = [ - 3000 - 3001 - 3002 - 3003 - 3004 - 8080 - ]; -} diff --git a/hosts/photos/minio.nix b/hosts/photos/minio.nix deleted file mode 100644 index 1326b15..0000000 --- a/hosts/photos/minio.nix +++ /dev/null @@ -1,35 +0,0 @@ -{ - config, - pkgs, - lib, - ... -}: { - sops.secrets."ente/minio/root_user" = {}; - sops.secrets."ente/minio/root_password" = {}; - - sops.templates."minio-root-credentials".content = '' - MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} - MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} - ''; - - services.minio = { - enable = true; - rootCredentialsFile = config.sops.templates."minio-root-credentials".path; - }; - - systemd.services.minio = { - environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk"; - postStart = '' - # Wait until minio is up - ${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live" - - # Make sure bucket exists - mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente - ''; - }; - - networking.firewall.allowedTCPPorts = [ - 9000 - 9001 - ]; -} diff --git a/hosts/sandbox/default.nix b/hosts/sandbox/default.nix deleted file mode 100644 index efad52b..0000000 --- a/hosts/sandbox/default.nix +++ /dev/null @@ -1,53 +0,0 @@ -{ - config, - name, - ... -}: { - sops.secrets."restic/default-password" = {}; - - homelab = { - enable = true; - hostname = name; - tags = [name]; - - monitoring.enable = true; - logging.enable = true; - motd.enable = true; - - backups = { - enable = true; - backends = { - restic = { - enable = true; - repository = "/srv/restic-repo"; - passwordFile = config.sops.secrets."restic/default-password".path; - }; - }; - jobs = [ - { - name = "sandbox-home"; - backend = "restic"; - backendOptions = { - paths = ["/home/plasmagoat"]; - repository = "/srv/restic-repo"; - pruneOpts = [ - "--keep-daily 7" - "--keep-weekly 4" - "--keep-monthly 6" - "--keep-yearly 3" - ]; - }; - } - ]; - }; - - # services.loki.enable = true; - # services.prometheus.enable = true; - # services.grafana.enable = true; - # services.gatus.enable = true; - services.vaultwarden.enable = true; - services.caddy.enable = true; - }; - - system.stateVersion = "25.05"; -} diff --git a/infrastructure/nixos-cloud-init/README.md b/infrastructure/nixos-cloud-init/README.md index f733ea9..f2b880b 100644 --- a/infrastructure/nixos-cloud-init/README.md +++ b/infrastructure/nixos-cloud-init/README.md @@ -5,7 +5,7 @@ nix run github:nix-community/nixos-generators -- -f proxmox -c configuration.nix ``` -## Upload to proxmox +## Update to proxmox ``` scp /nix/store/jvwxp7agny9979fglf76s0ca9m2h6950-proxmox-nixos-cloud-init/vzdump-qemu-nixos-cloud-init.vma.zst root@192.168.1.206:/var/lib/vz/dump ``` @@ -16,6 +16,3 @@ qmrestore /var/lib/vz/dump/vzdump-qemu-nixos-cloud-init.vma.zst 9000 --unique tr qm template 9000 ``` - -## Future -Maybe look into nixos-everywhere like done here https://github.com/solomon-b/nixos-config diff --git a/infrastructure/proxmox/main.tf b/infrastructure/proxmox/main.tf index 62ac638..ca08aca 100644 --- a/infrastructure/proxmox/main.tf +++ b/infrastructure/proxmox/main.tf @@ -1,13 +1,13 @@ module "sandbox_vm" { source = "./modules/nixos-vm" - vmid = 123 - name = "sandbox" - target_node = var.pm_node - sshkeys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" - cipassword = "$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/" + vmid = 123 + name = "sandbox" + target_node = var.pm_node + sshkeys = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" + cipassword = "$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/" # You can override any default variable here: # cpu_cores = 4 # memory = 2048 - disk_size = "10G" + # disk_size = "10G" } diff --git a/infrastructure/proxmox/providers.tf b/infrastructure/proxmox/providers.tf index 2d5b30c..07ff31b 100644 --- a/infrastructure/proxmox/providers.tf +++ b/infrastructure/proxmox/providers.tf @@ -1,6 +1,6 @@ provider "proxmox" { - pm_tls_insecure = true - pm_api_url = var.pm_api_url - pm_api_token_id = var.pm_api_token_id + pm_tls_insecure = true + pm_api_url = var.pm_api_url + pm_api_token_id = var.pm_api_token_id pm_api_token_secret = var.pm_api_token_secret } diff --git a/infrastructure/proxmox/terraform.tfstate b/infrastructure/proxmox/terraform.tfstate index 59a4f97..9b4fec3 100644 --- a/infrastructure/proxmox/terraform.tfstate +++ b/infrastructure/proxmox/terraform.tfstate @@ -1 +1 @@ -{"version":4,"terraform_version":"1.9.1","serial":17,"lineage":"c76b2921-285f-1904-f2ab-e6a410d16442","outputs":{},"resources":[{"module":"module.sandbox_vm","mode":"managed","type":"proxmox_vm_qemu","name":"nixos-vm","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/","ciupgrade":true,"ciuser":"root","clone":null,"clone_id":9000,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.228","default_ipv6_address":"2a05:f6c7:2030:0:be24:11ff:fe46:6c00","define_connection_info":true,"desc":"Managed by Terraform.","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide2":[],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"10G","storage":"pv1","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/123","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":1024,"name":"sandbox","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:46:6c:00","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":true,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"5ae92cdd-a036-4602-af8c-358197f958d9","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.228","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air\n","startup":"","tablet":true,"tags":"","target_node":"proxmox-01","target_nodes":null,"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":123},"sensitive_attributes":[[{"type":"get_attr","value":"cipassword"}],[{"type":"get_attr","value":"ssh_private_key"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null} +{"version":4,"terraform_version":"1.9.1","serial":16,"lineage":"c76b2921-285f-1904-f2ab-e6a410d16442","outputs":{},"resources":[{"module":"module.sandbox_vm","mode":"managed","type":"proxmox_vm_qemu","name":"nixos-vm","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/","ciupgrade":true,"ciuser":"root","clone":null,"clone_id":9000,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.228","default_ipv6_address":"","define_connection_info":true,"desc":"Managed by Terraform.","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide2":[],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"5G","storage":"pv1","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/123","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":1024,"name":"sandbox","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:46:6c:00","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":true,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"5ae92cdd-a036-4602-af8c-358197f958d9","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.228","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air\n","startup":"","tablet":true,"tags":"","target_node":"proxmox-01","target_nodes":null,"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":123},"sensitive_attributes":[[{"type":"get_attr","value":"ssh_private_key"}],[{"type":"get_attr","value":"cipassword"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null} diff --git a/infrastructure/proxmox/terraform.tfstate.backup b/infrastructure/proxmox/terraform.tfstate.backup index 9b4fec3..848b2ff 100644 --- a/infrastructure/proxmox/terraform.tfstate.backup +++ b/infrastructure/proxmox/terraform.tfstate.backup @@ -1 +1 @@ -{"version":4,"terraform_version":"1.9.1","serial":16,"lineage":"c76b2921-285f-1904-f2ab-e6a410d16442","outputs":{},"resources":[{"module":"module.sandbox_vm","mode":"managed","type":"proxmox_vm_qemu","name":"nixos-vm","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"$6$rounds=4096$h9zcOYHvB.sy0Ff/$M4cbXjzqmJZ7xRTl3ILWXrg9PePqNzpv.L7MnvMrhcGieK3hrPniU5YEY2Z5/NC1n4QM7VLRSwyP9g9zdjp67/","ciupgrade":true,"ciuser":"root","clone":null,"clone_id":9000,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.228","default_ipv6_address":"","define_connection_info":true,"desc":"Managed by Terraform.","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide2":[],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"5G","storage":"pv1","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/123","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":1024,"name":"sandbox","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:46:6c:00","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":true,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"5ae92cdd-a036-4602-af8c-358197f958d9","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.228","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air\n","startup":"","tablet":true,"tags":"","target_node":"proxmox-01","target_nodes":null,"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":123},"sensitive_attributes":[[{"type":"get_attr","value":"ssh_private_key"}],[{"type":"get_attr","value":"cipassword"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null} +{"version":4,"terraform_version":"1.9.1","serial":15,"lineage":"c76b2921-285f-1904-f2ab-e6a410d16442","outputs":{},"resources":[{"module":"module.sandbox_vm","mode":"managed","type":"proxmox_vm_qemu","name":"nixos-vm","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"","ciupgrade":true,"ciuser":"root","clone":null,"clone_id":9000,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.228","default_ipv6_address":"2a05:f6c7:2030:0:be24:11ff:fe46:6c00","define_connection_info":true,"desc":"Managed by Terraform.","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide2":[],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"5G","storage":"pv1","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/123","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":1024,"name":"sandbox","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:46:6c:00","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":true,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"5ae92cdd-a036-4602-af8c-358197f958d9","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.228","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air\n","startup":"","tablet":true,"tags":"","target_node":"proxmox-01","target_nodes":null,"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":123},"sensitive_attributes":[[{"type":"get_attr","value":"ssh_private_key"}],[{"type":"get_attr","value":"cipassword"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null} diff --git a/infrastructure/proxmox/terraform.tfvars b/infrastructure/proxmox/terraform.tfvars index 2460b7a..32006a4 100644 --- a/infrastructure/proxmox/terraform.tfvars +++ b/infrastructure/proxmox/terraform.tfvars @@ -1,6 +1,3 @@ -pm_api_url = "https://192.168.1.205:8006/api2/json" -pm_api_token_id = "root@pam!opentofu" -pm_api_token_secret = "7660e962-9240-44ea-b1dc-e5176caba450" pm_node = "proxmox-01" # nixos_template_id = 9100 diff --git a/infrastructure/proxmox/versions.tf b/infrastructure/proxmox/versions.tf index 3330c3b..a2341f9 100644 --- a/infrastructure/proxmox/versions.tf +++ b/infrastructure/proxmox/versions.tf @@ -1,7 +1,7 @@ terraform { required_providers { proxmox = { - source = "Telmate/proxmox" + source = "Telmate/proxmox" version = "3.0.2-rc01" } } diff --git a/machines/_default/configuration.nix b/machines/_default/configuration.nix index 6489a29..3d38005 100644 --- a/machines/_default/configuration.nix +++ b/machines/_default/configuration.nix @@ -18,17 +18,11 @@ replaceUnknownProfiles = lib.mkDefault true; buildOnTarget = lib.mkDefault false; targetHost = lib.mkDefault "${name}.lab"; - tags = [config.nixpkgs.system name "homelab"]; - keys = { - "age.key" = { - destDir = "/run/keys"; - keyFile = "/home/plasmagoat/.config/age/age.key"; - }; - }; + tags = lib.mkDefault [config.nixpkgs.system name "homelab"]; }; sops = { - age.keyFile = "/run/keys/age.key"; + age.keyFile = "/etc/sops/age.key"; defaultSopsFile = ../../secrets/secrets.yaml; }; diff --git a/machines/auth/authelia.nix b/machines/auth/authelia.nix index 01a5a04..dc57c96 100644 --- a/machines/auth/authelia.nix +++ b/machines/auth/authelia.nix @@ -9,164 +9,166 @@ in { 9091 ]; - services.authelia.instances.procopius = { - enable = true; - settings = { - theme = "auto"; - server = { - buffers = { - read = 16384; - write = 16384; + services = { + authelia.instances.procopius = { + enable = true; + settings = { + theme = "auto"; + server = { + buffers = { + read = 16384; + write = 16384; + }; }; - }; - authentication_backend.ldap = { - implementation = "lldap"; - address = "ldap://localhost:3890"; - base_dn = "dc=procopius,dc=dk"; - user = "uid=authelia,ou=people,dc=procopius,dc=dk"; - }; - definitions = { - network = { - internal = [ - "192.168.1.0/24" + authentication_backend.ldap = { + implementation = "lldap"; + address = "ldap://localhost:3890"; + base_dn = "dc=procopius,dc=dk"; + user = "uid=authelia,ou=people,dc=procopius,dc=dk"; + }; + definitions = { + network = { + internal = [ + "192.168.1.0/24" + ]; + }; + }; + access_control = { + default_policy = "deny"; + # We want this rule to be low priority so it doesn't override the others + rules = lib.mkAfter [ + { + domain = [ + "proxmox.procopius.dk" + "traefik.procopius.dk" + "prometheus.procopius.dk" + "alertmanager.procopius.dk" + ]; + policy = "one_factor"; + subject = [ + ["group:server-admin"] + ]; + } + # bypass /api and /ping + { + domain = ["*.procopius.dk"]; + policy = "bypass"; + resources = [ + "^/api$" + "^/api/" + "^/ping$" + ]; + } + # media + { + domain = [ + "sonarr.procopius.dk" + "radarr.procopius.dk" + "readarr.procopius.dk" + "lidarr.procopius.dk" + "bazarr.procopius.dk" + "prowlarr.procopius.dk" + ]; + policy = "one_factor"; + subject = [ + ["group:media-admin"] + ]; + } + # authenticated + { + domain = [ + "gatus.procopius.dk" + ]; + policy = "one_factor"; + } + # bypass auth internally + # { + # domain = [ + # "gatus.procopius.dk" + # "prometheus.procopius.dk" + # "alertmanager.procopius.dk" + # "sonarr.procopius.dk" + # "radarr.procopius.dk" + # "readarr.procopius.dk" + # "lidarr.procopius.dk" + # "bazarr.procopius.dk" + # "prowlarr.procopius.dk" + # ]; + # policy = "bypass"; + # networks = [ + # "internal" + # ]; + # } ]; }; + storage.postgres = { + address = "unix:///run/postgresql"; + database = authelia; + username = authelia; + # I'm using peer authentication, so this doesn't actually matter, but Authelia + # complains if I don't have it. + # https://github.com/authelia/authelia/discussions/7646 + password = authelia; + }; + session = { + redis.host = "/var/run/redis-procopius/redis.sock"; + cookies = [ + { + domain = "procopius.dk"; + authelia_url = "https://authelia.procopius.dk"; + # The period of time the user can be inactive for before the session is destroyed + inactivity = "1M"; + # The period of time before the cookie expires and the session is destroyed + expiration = "3M"; + # The period of time before the cookie expires and the session is destroyed + # when the remember me box is checked + remember_me = "1y"; + } + ]; + }; + notifier.smtp = { + address = "smtp://mail.procopius.dk"; + username = "authelia@procopius.dk"; + sender = "authelia@procopius.dk"; + }; + log.level = "info"; + # identity_providers.oidc = { + # # https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter + # claims_policies = { + # # karakeep.id_token = ["email"]; + # }; + # cors = { + # endpoints = ["token"]; + # allowed_origins_from_client_redirect_uris = true; + # }; + # authorization_policies.default = { + # default_policy = "one_factor"; + # rules = [ + # { + # policy = "deny"; + # subject = "group:lldap_strict_readonly"; + # } + # ]; + # }; + # }; + # Necessary for Traefik integration + # See https://www.authelia.com/integration/proxies/traefik/#implementation + server.endpoints.authz.forward-auth.implementation = "ForwardAuth"; }; - access_control = { - default_policy = "deny"; - # We want this rule to be low priority so it doesn't override the others - rules = lib.mkAfter [ - { - domain = [ - "proxmox.procopius.dk" - "traefik.procopius.dk" - "prometheus.procopius.dk" - "alertmanager.procopius.dk" - ]; - policy = "one_factor"; - subject = [ - ["group:server-admin"] - ]; - } - # bypass /api and /ping - { - domain = ["*.procopius.dk"]; - policy = "bypass"; - resources = [ - "^/api$" - "^/api/" - "^/ping$" - ]; - } - # media - { - domain = [ - "sonarr.procopius.dk" - "radarr.procopius.dk" - "readarr.procopius.dk" - "lidarr.procopius.dk" - "bazarr.procopius.dk" - "prowlarr.procopius.dk" - ]; - policy = "one_factor"; - subject = [ - ["group:media-admin"] - ]; - } - # authenticated - { - domain = [ - "gatus.procopius.dk" - ]; - policy = "one_factor"; - } - # bypass auth internally - # { - # domain = [ - # "gatus.procopius.dk" - # "prometheus.procopius.dk" - # "alertmanager.procopius.dk" - # "sonarr.procopius.dk" - # "radarr.procopius.dk" - # "readarr.procopius.dk" - # "lidarr.procopius.dk" - # "bazarr.procopius.dk" - # "prowlarr.procopius.dk" - # ]; - # policy = "bypass"; - # networks = [ - # "internal" - # ]; - # } - ]; + # Templates don't work correctly when parsed from Nix, so our OIDC clients are defined here + # settingsFiles = [./oidc_clients.yaml]; + secrets = with config.sops; { + jwtSecretFile = secrets."authelia/jwt_secret".path; + # oidcIssuerPrivateKeyFile = secrets."authelia/jwks".path; + # oidcHmacSecretFile = secrets."authelia/hmac_secret".path; + sessionSecretFile = secrets."authelia/session_secret".path; + storageEncryptionKeyFile = secrets."authelia/storage_encryption_key".path; }; - storage.postgres = { - address = "unix:///run/postgresql"; - database = authelia; - username = authelia; - # I'm using peer authentication, so this doesn't actually matter, but Authelia - # complains if I don't have it. - # https://github.com/authelia/authelia/discussions/7646 - password = authelia; + environmentVariables = with config.sops; { + AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = + secrets."authelia/lldap_authelia_password".path; + AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path; }; - session = { - redis.host = "/var/run/redis-procopius/redis.sock"; - cookies = [ - { - domain = "procopius.dk"; - authelia_url = "https://authelia.procopius.dk"; - # The period of time the user can be inactive for before the session is destroyed - inactivity = "1M"; - # The period of time before the cookie expires and the session is destroyed - expiration = "3M"; - # The period of time before the cookie expires and the session is destroyed - # when the remember me box is checked - remember_me = "1y"; - } - ]; - }; - notifier.smtp = { - address = "smtp://mail.procopius.dk"; - username = "authelia@procopius.dk"; - sender = "authelia@procopius.dk"; - }; - log.level = "info"; - # identity_providers.oidc = { - # # https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter - # claims_policies = { - # # karakeep.id_token = ["email"]; - # }; - # cors = { - # endpoints = ["token"]; - # allowed_origins_from_client_redirect_uris = true; - # }; - # authorization_policies.default = { - # default_policy = "one_factor"; - # rules = [ - # { - # policy = "deny"; - # subject = "group:lldap_strict_readonly"; - # } - # ]; - # }; - # }; - # Necessary for Traefik integration - # See https://www.authelia.com/integration/proxies/traefik/#implementation - server.endpoints.authz.forward-auth.implementation = "ForwardAuth"; - }; - # Templates don't work correctly when parsed from Nix, so our OIDC clients are defined here - # settingsFiles = [./oidc_clients.yaml]; - secrets = with config.sops; { - jwtSecretFile = secrets."authelia/jwt_secret".path; - # oidcIssuerPrivateKeyFile = secrets."authelia/jwks".path; - # oidcHmacSecretFile = secrets."authelia/hmac_secret".path; - sessionSecretFile = secrets."authelia/session_secret".path; - storageEncryptionKeyFile = secrets."authelia/storage_encryption_key".path; - }; - environmentVariables = with config.sops; { - AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE = - secrets."authelia/lldap_authelia_password".path; - AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE = secrets.smtp-password_authelia.path; }; }; diff --git a/machines/auth/bootstrap/service-accounts.nix b/machines/auth/bootstrap/service-accounts.nix index e4a295e..dc37282 100644 --- a/machines/auth/bootstrap/service-accounts.nix +++ b/machines/auth/bootstrap/service-accounts.nix @@ -3,7 +3,6 @@ sops.secrets."service_accounts/forgejo/password" = {}; sops.secrets."service_accounts/jellyfin/password" = {}; sops.secrets."service_accounts/mail/password" = {}; - sops.secrets."service_accounts/ente/password" = {}; sops.templates."service-accounts.json" = { content = '' { @@ -45,16 +44,6 @@ "mail" ] } - { - "id": "ente", - "email": "ente@procopius.dk", - "password": "${config.sops.placeholder."service_accounts/ente/password"}", - "displayName": "ente", - "groups": [ - "lldap_password_manager", - "mail" - ] - } ''; path = "/bootstrap/user-configs/service-accounts.json"; owner = "lldap"; diff --git a/machines/auth/configuration.nix b/machines/auth/configuration.nix index 37900b9..9f51678 100644 --- a/machines/auth/configuration.nix +++ b/machines/auth/configuration.nix @@ -4,7 +4,6 @@ ./authelia.nix ./postgres.nix ./redis.nix - ../modules/pgbackrest.nix ]; deployment.tags = ["authelia" "sso" "ldap" "lldap"]; diff --git a/machines/auth/postgres.nix b/machines/auth/postgres.nix index 5b28cbe..f73a57a 100644 --- a/machines/auth/postgres.nix +++ b/machines/auth/postgres.nix @@ -18,7 +18,6 @@ authentication = lib.mkForce '' # TYPE DATABASE USER ADDRESS METHOD local all all trust - host all all 127.0.0.1/32 trust ''; }; } diff --git a/machines/mail/configuration.nix b/machines/mail/configuration.nix index 9582a67..15700a3 100644 --- a/machines/mail/configuration.nix +++ b/machines/mail/configuration.nix @@ -2,7 +2,6 @@ imports = [ ./mailserver.nix ./networking.nix - ./roundcube.nix inputs.simple-nixos-mailserver.nixosModule ]; diff --git a/machines/mail/mailserver.nix b/machines/mail/mailserver.nix index 6cb0872..5b0563a 100644 --- a/machines/mail/mailserver.nix +++ b/machines/mail/mailserver.nix @@ -1,14 +1,10 @@ {config, ...}: { sops.secrets."service_accounts/mail/password" = {}; - sops.secrets."cloudflare/dns-api-token" = {}; - sops.secrets."cloudflare/zone-api-token" = {}; - mailserver = { enable = true; stateVersion = 3; fqdn = "mail.procopius.dk"; domains = ["procopius.dk"]; - dmarcReporting.enable = true; localDnsResolver = false; ldap = { enable = true; @@ -32,17 +28,10 @@ searchBase = "ou=people,dc=procopius,dc=dk"; }; - certificateScheme = "acme"; - acmeCertificateName = "mail.procopius.dk"; + # Use Let's Encrypt certificates. Note that this needs to set up a stripped + # down nginx and opens port 80. + certificateScheme = "acme-nginx"; }; security.acme.acceptTerms = true; security.acme.defaults.email = "david.mikael@proton.me"; - security.acme.defaults = { - dnsProvider = "cloudflare"; - dnsResolver = "1.1.1.1:53"; - credentialFiles = { - "CF_DNS_API_TOKEN_FILE" = config.sops.secrets."cloudflare/dns-api-token".path; - "CF_ZONE_API_TOKEN_FILE" = config.sops.secrets."cloudflare/zone-api-token".path; - }; - }; } diff --git a/machines/mail/roundcube.nix b/machines/mail/roundcube.nix deleted file mode 100644 index 447f8b0..0000000 --- a/machines/mail/roundcube.nix +++ /dev/null @@ -1,22 +0,0 @@ -{ - lib, - config, - ... -}: { - services.roundcube = { - enable = true; - hostName = "roundcube.procopius.dk"; - extraConfig = '' - # starttls needed for authentication, so the fqdn required to match - # the certificate - $config['smtp_host'] = "tls://${config.mailserver.fqdn}"; - $config['smtp_user'] = "%u"; - $config['smtp_pass'] = "%p"; - ''; - }; - - services.nginx.virtualHosts."roundcube.procopius.dk" = { - forceSSL = lib.mkForce false; - enableACME = lib.mkForce false; - }; -} diff --git a/machines/modules/README.md b/machines/modules/README.md deleted file mode 100644 index b775dd2..0000000 --- a/machines/modules/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Homelab nixos global config - -A global module config for my homelab, where we gather: -* Monitoring endpoints (/metrics + port + host) -* Promtail log files -* Reverse proxy configuration -* Postgres backups (pgbackrest) -* Restic backups -* ...? -* LDAP config -* OIDC configs diff --git a/machines/modules/pgbackrest.nix b/machines/modules/pgbackrest.nix deleted file mode 100644 index 4c4cf12..0000000 --- a/machines/modules/pgbackrest.nix +++ /dev/null @@ -1,43 +0,0 @@ -{ - lib, - config, - name, - # meta, - ... -}: { - fileSystems."/mnt/pgdumps" = { - device = "192.168.1.226:/volume1/database_backups/${name}"; - fsType = "nfs4"; - options = ["x-systemd.automount" "noatime" "_netdev"]; - }; - services.postgresqlBackup = { - enable = true; - # We trigger this through restic - startAt = []; - # startAt = "*-*-* 01:15:00"; - compression = "zstd"; - databases = [ - "authelia-procopius" - "lldap" - ]; - }; - - # services.restic.backups.b2 = { - # environmentFile = config.sops.templates.restic_floofs_env.path; - # repositoryFile = config.sops.secrets.b2_floofs_server_repository.path; - # passwordFile = config.sops.secrets.b2_floofs_server_password.path; - - # paths = ["/var/backup/postgresql"]; - # initialize = true; - # pruneOpts = [ - # "--keep-daily 7" - # "--keep-weekly 3" - # "--keep-monthly 3" - # ]; - # timerConfig = { - # OnCalendar = "04:45"; - # Persistent = true; - # }; - # }; - # systemd.services.restic-backups-b2.wants = ["postgresqlBackup.service"]; -} diff --git a/machines/monitor/loki.nix b/machines/monitor/loki.nix index 63be626..8cd9cc7 100644 --- a/machines/monitor/loki.nix +++ b/machines/monitor/loki.nix @@ -1,5 +1,5 @@ { - networking.firewall.allowedTCPPorts = [3100]; + networking.firewall.allowedTCPPorts = [ 3100 ]; services.loki = { enable = true; diff --git a/machines/photos/configuration.nix b/machines/photos/configuration.nix index 3369b76..db54baf 100644 --- a/machines/photos/configuration.nix +++ b/machines/photos/configuration.nix @@ -2,7 +2,6 @@ imports = [ outputs.nixosModules.ente ./ente.nix - ./minio.nix ]; deployment.tags = ["ente"]; diff --git a/machines/photos/ente.nix b/machines/photos/ente.nix index e1e9a0c..abca1d9 100644 --- a/machines/photos/ente.nix +++ b/machines/photos/ente.nix @@ -1,73 +1,25 @@ { - config, - pkgs, - ... -}: { - sops.secrets."ente/minio/root_password".owner = "ente"; - sops.secrets."ente/minio/root_user".owner = "ente"; - sops.secrets."service_accounts/ente/password".owner = "ente"; - - environment.systemPackages = with pkgs; [ - ente-cli - ]; - services.ente.api = { enable = true; enableLocalDB = true; - domain = "ente-museum.procopius.dk"; + domain = "ente-v2.procopius.dk"; settings = { # apps = { # accounts = "https://accounts.procopius.dk"; # cast = "https://cast.procopius.dk"; # public-albums = "https://albums.procopius.dk"; # }; - - smtp = { - host = "mail.procopius.dk"; - port = "465"; - username = "ente@procopius.dk"; - password._secret = config.sops.secrets."service_accounts/ente/password".path; - # The email address from which to send the email. Set this to an email - # address whose credentials you're providing. - email = "ente@procopius.dk"; - # Optional override for the sender name in the emails. If specified, it will - # be used for all emails sent by the instance (default is email specific). - sender-name = "ente"; - }; - internal.admins = [ - 1580559962386438 - ]; - s3 = { - use_path_style_urls = true; - b2-eu-cen = { - endpoint = "https://ente-minio-api.procopius.dk"; - region = "us-east-1"; - bucket = "ente"; - key._secret = config.sops.secrets."ente/minio/root_user".path; - secret._secret = config.sops.secrets."ente/minio/root_password".path; - }; - }; }; }; services.ente.web = { enable = true; domains = { - api = "ente-museum.procopius.dk"; - accounts = "ente-accounts.procopius.dk"; - albums = "ente-albums.procopius.dk"; - cast = "ente-cast.procopius.dk"; - photos = "ente-photos.procopius.dk"; - auth = "ente-auth.procopius.dk"; + api = "ente-v2.procopius.dk"; + accounts = "accounts.procopius.dk"; + albums = "albums.procopius.dk"; + cast = "cast.procopius.dk"; + photos = "photos.procopius.dk"; }; }; - - networking.firewall.allowedTCPPorts = [ - 3000 - 3001 - 3002 - 3003 - 3004 - 8080 - ]; } diff --git a/machines/photos/minio.nix b/machines/photos/minio.nix index 1326b15..c3d6ee2 100644 --- a/machines/photos/minio.nix +++ b/machines/photos/minio.nix @@ -1,35 +1,6 @@ { - config, - pkgs, - lib, - ... -}: { - sops.secrets."ente/minio/root_user" = {}; - sops.secrets."ente/minio/root_password" = {}; - - sops.templates."minio-root-credentials".content = '' - MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} - MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} - ''; - services.minio = { enable = true; - rootCredentialsFile = config.sops.templates."minio-root-credentials".path; + rootCredentialsFile = "/etc/nixos/minio-root-credentials"; }; - - systemd.services.minio = { - environment.MINIO_SERVER_URL = "https://ente-minio-api.procopius.dk"; - postStart = '' - # Wait until minio is up - ${lib.getExe pkgs.curl} --retry 5 --retry-connrefused --fail --no-progress-meter -o /dev/null "http://localhost:9000/minio/health/live" - - # Make sure bucket exists - mkdir -p ${lib.escapeShellArg config.services.minio.dataDir}/ente - ''; - }; - - networking.firewall.allowedTCPPorts = [ - 9000 - 9001 - ]; } diff --git a/machines/sandbox/configuration.nix b/machines/sandbox/configuration.nix index 62d1715..6c1ca72 100644 --- a/machines/sandbox/configuration.nix +++ b/machines/sandbox/configuration.nix @@ -1,18 +1,5 @@ -{outputs, ...}: { +{ deployment.tags = ["sandbox"]; - imports = [ - outputs.nixosModules.global-config - ]; - - homelab.global = { - enable = true; - hostname = "sandbox"; - domain = "sandbox.local"; - environment = "production"; - location = "proxmox"; - tags = ["sandbox"]; - }; - system.stateVersion = "25.05"; } diff --git a/modules/homelab/README.md b/modules/homelab/README.md deleted file mode 100644 index 6fd7d72..0000000 --- a/modules/homelab/README.md +++ /dev/null @@ -1,577 +0,0 @@ -# Homelab Configuration Documentation - -## Overview - -This homelab configuration system provides a unified way to manage services across multiple nodes with automatic aggregation of monitoring, logging, backup, and reverse proxy configurations. The system is built on NixOS and follows a modular architecture with both local and global configuration scopes. - -## Core Homelab Options - -### Basic Configuration (`homelab.*`) - -```nix -homelab = { - enable = true; # Enable homelab fleet configuration - hostname = "node-01"; # Hostname for this system - domain = "lab"; # Base domain for the homelab (default: "lab") - externalDomain = "procopius.dk"; # External domain to the homelab - environment = "production"; # Environment type: "production" | "staging" | "development" - location = "homelab"; # Physical location identifier - tags = ["web" "database"]; # Tags for this system -}; -``` - -### Monitoring System (`homelab.monitoring.*`) - -```nix -homelab.monitoring = { - enable = true; # Enable monitoring system - - # Node exporter (automatically enabled) - nodeExporter = { - enable = true; # Enable node exporter (default: true) - port = 9100; # Node exporter port (default: 9100) - }; - - # Manual metrics endpoints - metrics = [ - { - name = "custom-app"; # Metric endpoint name - host = "localhost"; # Host (default: homelab.hostname) - port = 8080; # Port for metrics endpoint - path = "/metrics"; # Metrics path (default: "/metrics") - jobName = "custom"; # Prometheus job name - scrapeInterval = "30s"; # Scrape interval (default: "30s") - labels = { # Additional labels - component = "web"; - }; - } - ]; - - # Manual health checks - healthChecks = [ - { - name = "web-service"; # Health check name - host = "localhost"; # Host (default: homelab.hostname) - port = 80; # Port (nullable) - path = "/health"; # Health check path (default: "/") - protocol = "http"; # Protocol: "http" | "https" | "tcp" | "icmp" - method = "GET"; # HTTP method (default: "GET") - interval = "30s"; # Check interval (default: "30s") - timeout = "10s"; # Timeout (default: "10s") - conditions = [ # Check conditions - "[STATUS] == 200" - ]; - group = "web"; # Group name (default: "manual") - labels = {}; # Additional labels - enabled = true; # Enable check (default: true) - } - ]; - - # Read-only aggregated data (automatically populated) - allMetrics = [...]; # All metrics from this node - allHealthChecks = [...]; # All health checks from this node - global = { # Global aggregation from all nodes - allMetrics = [...]; # All metrics from entire fleet - allHealthChecks = [...]; # All health checks from entire fleet - metricsByJobName = {...}; # Grouped by job name - healthChecksByGroup = {...}; # Grouped by group - summary = { - totalMetrics = 42; - totalHealthChecks = 15; - nodesCovered = ["node-01" "node-02"]; - }; - }; -}; -``` - -### Logging System (`homelab.logging.*`) - -```nix -homelab.logging = { - enable = true; # Enable logging system - - # Promtail configuration - promtail = { - enable = true; # Enable Promtail (default: true) - port = 9080; # Promtail port (default: 9080) - clients = [ # Loki clients - { - url = "http://monitor.lab:3100/loki/api/v1/push"; - tenant_id = null; # Optional tenant ID - } - ]; - }; - - # Log sources - sources = [ - { - name = "app-logs"; # Source name - type = "file"; # Type: "journal" | "file" | "syslog" | "docker" - files = { - paths = ["/var/log/app.log"]; # File paths - multiline = { # Optional multiline config - firstLineRegex = "^\\d{4}-\\d{2}-\\d{2}"; - maxWaitTime = "3s"; - }; - }; - journal = { # Journal config (for type="journal") - path = "/var/log/journal"; - }; - labels = { # Additional labels - application = "myapp"; - }; - pipelineStages = []; # Promtail pipeline stages - enabled = true; # Enable source (default: true) - } - ]; - - defaultLabels = { # Default labels for all sources - hostname = "node-01"; - environment = "production"; - location = "homelab"; - }; - - # Read-only aggregated data - allSources = [...]; # All sources from this node - global = { # Global aggregation - allSources = [...]; # All sources from entire fleet - sourcesByType = {...}; # Grouped by type - summary = { - total = 25; - byType = {...}; - byNode = {...}; - }; - }; -}; -``` - -### Backup System (`homelab.backups.*`) - -```nix -homelab.backups = { - enable = true; # Enable backup system - - # Backup jobs - jobs = [ - { - name = "database-backup"; # Job name - backend = "restic-s3"; # Backend name (must exist in backends) - backendOptions = { # Backend-specific overrides - repository = "custom-repo"; - }; - labels = { # Additional labels - type = "database"; - }; - } - ]; - - # Backend configurations (defined by imported modules) - backends = { - restic-s3 = {...}; # Defined in restic.nix - }; - - defaultLabels = { # Default labels for all jobs - hostname = "node-01"; - environment = "production"; - location = "homelab"; - }; - - monitoring = true; # Enable backup monitoring (default: true) - - # Read-only aggregated data - allJobs = [...]; # All jobs from this node - allBackends = [...]; # All backend names from this node - global = { # Global aggregation - allJobs = [...]; # All jobs from entire fleet - allBackends = [...]; # All backends from entire fleet - jobsByBackend = {...}; # Grouped by backend - summary = { - total = 15; - byBackend = {...}; - byNode = {...}; - uniqueBackends = ["restic-s3" "borgbackup"]; - }; - }; -}; -``` - -### Reverse Proxy System (`homelab.reverseProxy.*`) - -```nix -homelab.reverseProxy = { - enable = true; # Enable reverse proxy system - - # Proxy entries - entries = [ - { - subdomain = "app"; # Subdomain - host = "localhost"; # Backend host (default: homelab.hostname) - port = 8080; # Backend port - path = "/"; # Backend path (default: "/") - enableAuth = false; # Enable authentication (default: false) - enableSSL = true; # Enable SSL (default: true) - } - ]; - - # Read-only aggregated data - allEntries = [...]; # All entries from this node - global = { # Global aggregation - allEntries = [...]; # All entries from entire fleet - entriesBySubdomain = {...}; # Grouped by subdomain - entriesWithAuth = [...]; # Entries with authentication - entriesWithoutAuth = [...]; # Entries without authentication - summary = { - total = 12; - byNode = {...}; - withAuth = 5; - withoutAuth = 7; - }; - }; -}; -``` - -## Service Configuration Pattern - -All services follow a consistent pattern with automatic monitoring, logging, and proxy integration. - -### Generic Service Structure (`homelab.services.${serviceName}.*`) - -```nix -homelab.services.myservice = { - enable = true; # Enable the service - port = 8080; # Main service port - description = "My Service"; # Service description - - # Monitoring integration (automatic when enabled) - monitoring = { - enable = true; # Enable monitoring (default: true when service enabled) - - metrics = { - enable = true; # Enable metrics endpoint (default: true) - path = "/metrics"; # Metrics path (default: "/metrics") - extraEndpoints = [ # Additional metric endpoints - { - name = "admin-metrics"; - port = 8081; - path = "/admin/metrics"; - jobName = "myservice-admin"; - } - ]; - }; - - healthCheck = { - enable = true; # Enable health check (default: true) - path = "/health"; # Health check path (default: "/health") - conditions = [ # Check conditions - "[STATUS] == 200" - ]; - extraChecks = [ # Additional health checks - { - name = "myservice-api"; - port = 8080; - path = "/api/health"; - conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 500"]; - } - ]; - }; - - extraLabels = { # Additional labels for all monitoring - tier = "application"; - }; - }; - - # Logging integration (automatic when enabled) - logging = { - enable = true; # Enable logging - files = [ # Log files to collect - "/var/log/myservice/app.log" - "/var/log/myservice/error.log" - ]; - - parsing = { - regex = "^(?P\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}) (?P\\w+) (?P.*)"; - extractFields = ["level"]; # Fields to extract as labels - }; - - multiline = { # Multiline log handling - firstLineRegex = "^\\d{4}-\\d{2}-\\d{2}"; - maxWaitTime = "3s"; - }; - - extraLabels = { # Additional labels - application = "myservice"; - }; - - extraSources = [ # Additional log sources - { - name = "myservice-access"; - type = "file"; - files.paths = ["/var/log/myservice/access.log"]; - } - ]; - }; - - # Reverse proxy integration (automatic when enabled) - proxy = { - enable = true; # Enable reverse proxy - subdomain = "myservice"; # Subdomain (default: service name) - enableAuth = false; # Enable authentication (default: false) - - additionalSubdomains = [ # Additional proxy entries - { - subdomain = "myservice-api"; - port = 8081; - path = "/api"; - enableAuth = true; - } - ]; - }; - - # Service-specific options - customOption = "value"; # Service-specific configuration -}; -``` - -## Example Service Implementations - -### Prometheus Service - -```nix -homelab.services.prometheus = { - enable = true; - port = 9090; - - # Prometheus-specific options - retention = "15d"; # Data retention period - alertmanager = { - enable = true; - url = "alertmanager.lab:9093"; - }; - extraScrapeConfigs = []; # Additional scrape configs - extraAlertingRules = []; # Additional alerting rules - globalConfig = { # Prometheus global config - scrape_interval = "15s"; - evaluation_interval = "15s"; - }; - extraFlags = []; # Additional command line flags - ruleFiles = []; # Additional rule files - - # Automatic integrations - monitoring.enable = true; # Self-monitoring - logging.enable = true; # Log collection - proxy = { - enable = true; - subdomain = "prometheus"; - enableAuth = true; # Admin interface needs protection - }; -}; -``` - -### Gatus Service - -```nix -homelab.services.gatus = { - enable = true; - port = 8080; - - # Gatus-specific options - ui = { - title = "Homelab Status"; - header = "Homelab Services Status"; - link = "https://status.procopius.dk"; - buttons = [ - { name = "Grafana"; link = "https://grafana.procopius.dk"; } - { name = "Prometheus"; link = "https://prometheus.procopius.dk"; } - ]; - }; - - alerting = { # Discord/Slack/etc notifications - discord = { - webhook-url = "https://discord.com/api/webhooks/..."; - default-alert = { - enabled = true; - failure-threshold = 3; - success-threshold = 2; - }; - }; - }; - - storage = { # Storage backend - type = "memory"; # or "postgres", "sqlite" - }; - - web.address = "0.0.0.0"; - extraConfig = {}; # Additional Gatus configuration - - # Automatic integrations - monitoring.enable = true; - logging.enable = true; - proxy = { - enable = true; - subdomain = "status"; - enableAuth = false; # Status page should be public - }; -}; -``` - -## Global Aggregation System - -The homelab system automatically aggregates configuration from all nodes in your fleet, making it easy to have centralized monitoring and management. - -### How Global Aggregation Works - -1. **Local Configuration**: Each node defines its own services and configurations -2. **Automatic Collection**: The system automatically collects data from all nodes using the `base.nix` aggregator -3. **Enhancement**: Each collected item is enhanced with node context (`_nodeName`, `_nodeConfig`, etc.) -4. **Global Exposure**: Aggregated data is exposed in `*.global.*` options - -### Global Data Structure - -```nix -# Available on every node with global data from entire fleet -homelab.monitoring.global = { - allMetrics = [ # All metrics from all nodes - { - name = "prometheus-main"; - host = "monitor"; - port = 9090; - # ... other fields - _nodeName = "monitor"; # Source node name - _nodeConfig = {...}; # Source node config - _fullAddress = "monitor:9090"; - _metricsUrl = "http://monitor:9090/metrics"; - } - # ... more metrics from other nodes - ]; - - allHealthChecks = [...]; # All health checks from all nodes - metricsByJobName = { # Grouped by Prometheus job name - "prometheus" = [...]; - "node" = [...]; - }; - healthChecksByGroup = { # Grouped by health check group - "services" = [...]; - "infrastructure" = [...]; - }; - summary = { - totalMetrics = 42; - totalHealthChecks = 15; - nodesCovered = ["monitor" "web-01" "db-01"]; - }; -}; - -homelab.logging.global = { - allSources = [...]; # All log sources from all nodes - sourcesByType = { - "file" = [...]; - "journal" = [...]; - }; - summary = {...}; -}; - -homelab.backups.global = { - allJobs = [...]; # All backup jobs from all nodes - allBackends = [...]; # All backup backends from all nodes - jobsByBackend = {...}; - summary = {...}; -}; - -homelab.reverseProxy.global = { - allEntries = [...]; # All proxy entries from all nodes - entriesBySubdomain = {...}; - entriesWithAuth = [...]; - entriesWithoutAuth = [...]; - summary = {...}; -}; -``` - -### Using Global Data - -Services like Prometheus and Gatus automatically use global data: - -```nix -# Prometheus automatically scrapes ALL metrics from the entire fleet -services.prometheus.scrapeConfigs = - # Automatically generated from homelab.monitoring.global.allMetrics - -# Gatus automatically monitors ALL health checks from the entire fleet -services.gatus.settings.endpoints = - # Automatically generated from homelab.monitoring.global.allHealthChecks -``` - -## Integration Examples - -### Adding a New Service - -1. **Create the service configuration**: -```nix -homelab.services.myapp = { - enable = true; - port = 3000; - monitoring.enable = true; - logging.enable = true; - proxy = { - enable = true; - subdomain = "myapp"; - }; -}; -``` - -2. **The system automatically**: - - Adds metrics endpoint to Prometheus (fleet-wide) - - Adds health check to Gatus (fleet-wide) - - Configures log collection to Loki - - Sets up reverse proxy entry - - Exposes the service globally for other nodes - -### Multi-Node Setup - -```nix -# Node 1 (monitor.nix) -homelab = { - hostname = "monitor"; - services.prometheus.enable = true; - services.gatus.enable = true; -}; - -# Node 2 (web.nix) -homelab = { - hostname = "web-01"; - services.nginx.enable = true; - services.webapp.enable = true; -}; - -# Node 3 (database.nix) -homelab = { - hostname = "db-01"; - services.postgresql.enable = true; - services.redis.enable = true; -}; -``` - -Result: Monitor node automatically discovers and monitors all services across all three nodes. - -## File Structure - -``` -homelab/ -├── default.nix # Main homelab options and imports -├── lib/ -│ ├── systems/ # Core system modules -│ │ ├── monitoring.nix # Monitoring aggregation -│ │ ├── logging.nix # Logging aggregation -│ │ ├── backups.nix # Backup aggregation -│ │ └── proxy.nix # Reverse proxy aggregation -│ ├── features/ # Service feature modules -│ │ ├── monitoring.nix # Service monitoring template -│ │ ├── logging.nix # Service logging template -│ │ └── proxy.nix # Service proxy template -│ └── aggregators/ -│ └── base.nix # Base aggregation functions -└── services/ # Individual service implementations - ├── prometheus.nix - ├── gatus.nix - └── ... -``` - -This architecture provides a scalable, consistent way to manage a homelab fleet with automatic service discovery, monitoring, and management across all nodes. diff --git a/modules/homelab/backup/restic.nix b/modules/homelab/backup/restic.nix deleted file mode 100644 index 31e150a..0000000 --- a/modules/homelab/backup/restic.nix +++ /dev/null @@ -1,105 +0,0 @@ -{ - config, - lib, - ... -}: -with lib; let - cfg = config.homelab.backups; - - # Get restic backend config if it exists - resticBackend = cfg.backends.restic or null; - resticEnabled = resticBackend.enable or false; - - # Filter jobs that use the restic backend - resticJobs = filter (job: job.backend == "restic") cfg.jobs; -in { - options.homelab.backups.backends.restic = mkOption { - type = types.nullOr (types.submodule { - options = { - enable = mkEnableOption "Restic backup backend"; - - # Default restic options - these map directly to services.restic.backups. - repository = mkOption { - type = types.str; - description = "Default repository for restic backups"; - }; - - initialize = lib.mkOption { - type = lib.types.bool; - default = true; - description = '' - Create the repository if it doesn't exist. - ''; - }; - - passwordFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Default password file for restic repository"; - }; - - environmentFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Default environment file for restic credentials"; - }; - - paths = mkOption { - type = types.listOf types.str; - default = []; - description = "Default paths to backup"; - }; - - exclude = mkOption { - type = types.listOf types.str; - default = []; - description = "Default exclude patterns"; - }; - - timerConfig = mkOption { - type = types.attrs; - default = { - OnCalendar = "daily"; - RandomizedDelaySec = "1h"; - }; - description = "Default timer configuration"; - }; - - pruneOpts = mkOption { - type = types.listOf types.str; - default = [ - "--keep-daily 7" - "--keep-weekly 4" - "--keep-monthly 6" - ]; - description = "Default pruning options"; - }; - - # Allow any other restic options - extraOptions = mkOption { - type = types.attrs; - default = {}; - description = "Additional default restic options"; - }; - }; - }); - default = null; - description = "Restic backend configuration"; - }; - - config = mkIf (cfg.enable && resticEnabled && length resticJobs > 0) { - # Configure restic service for each job using the restic backend - services.restic.backups = listToAttrs (map ( - job: let - # Get base config without the 'enable' field - baseConfig = removeAttrs resticBackend ["enable"]; - # Merge extraOptions into base config - baseWithExtras = recursiveUpdate (removeAttrs baseConfig ["extraOptions"]) (baseConfig.extraOptions or {}); - # Apply job-specific overrides - finalConfig = recursiveUpdate baseWithExtras job.backendOptions; - in - nameValuePair job.name finalConfig - ) - resticJobs); - }; -} diff --git a/modules/homelab/default.nix b/modules/homelab/default.nix deleted file mode 100644 index 28d103b..0000000 --- a/modules/homelab/default.nix +++ /dev/null @@ -1,129 +0,0 @@ -{ - config, - lib, - nodes, - ... -}: -with lib; let - cfg = config.homelab; -in { - imports = [ - ./lib/systems/monitoring.nix - ./lib/systems/logging.nix - ./lib/systems/proxy.nix - ./lib/systems/backups.nix - - ./lib/cli/homelab-cli.nix - - ./services - ./motd - ]; - - options.homelab = { - enable = mkEnableOption "Homelab fleet configuration"; - hostname = mkOption { - type = types.str; - description = "Hostname for this system"; - }; - domain = mkOption { - type = types.str; - default = "lab"; - description = "Base domain for the homelab"; - }; - externalDomain = mkOption { - type = types.str; - default = "procopius.dk"; - description = "External doamin to the homelab"; - }; - environment = mkOption { - type = types.enum ["production" "staging" "development"]; - default = "production"; - description = "Environment type"; - }; - location = mkOption { - type = types.str; - default = "homelab"; - description = "Physical location identifier"; - }; - tags = mkOption { - type = types.listOf types.str; - default = []; - description = "Tags for this system"; - }; - }; - - config = mkIf cfg.enable { - # Set hostname - networking.hostName = cfg.hostname; - - # Export configuration for external consumption - # environment.etc."homelab/config.json".text = builtins.toJSON { - # inherit (cfg) hostname domain environment location tags; - - # monitoring = { - # # Metrics endpoints (Prometheus, etc.) - # metrics = - # map (endpoint: { - # inherit (endpoint) name host port path jobName scrapeInterval labels; - # url = "http://${endpoint.host}:${toString endpoint.port}${endpoint.path}"; - # }) - # cfg.global.monitoring.allMetrics or []; - - # # Health check endpoints - # healthChecks = - # map (check: let - # # Determine the host based on useExternalDomain - # actualHost = - # if check.useExternalDomain - # then "${check.subdomain}.${cfg.externalDomain}" - # else check.host; - - # # Build the URL - # portPart = - # if check.port != null - # then ":${toString check.port}" - # else ""; - # url = "${check.protocol}://${actualHost}${portPart}${check.path}"; - # in { - # inherit (check) name protocol method interval timeout conditions alerts group labels enabled; - # host = actualHost; - # port = check.port; - # path = check.path; - # url = url; - # useExternalDomain = check.useExternalDomain; - # subdomain = check.subdomain; - # sourceNode = cfg.hostname; - # }) - # cfg.global.monitoring.allHealthChecks or []; - # }; - - # reverseProxy = { - # entries = - # map (entry: { - # inherit (entry) subdomain host port path enableAuth enableSSL; - # internalHost = "${cfg.hostname}:${toString entry.port}${entry.path}"; - # externalHost = "${entry.subdomain}.${cfg.externalDomain}"; - # }) - # cfg.global.reverseProxy.all; - # }; - - # backups = { - # jobs = - # map (job: { - # inherit (job) name backend labels; - # backupId = job._backupId; - # sourceNode = job._sourceNode; - # }) - # cfg.global.backups.all; - - # backends = cfg.global.backups.allBackends; - - # summary = { - # totalJobs = length cfg.global.backups.all; - # jobsByBackend = mapAttrs (backend: jobs: length jobs) cfg.global.backups.byBackend; - # jobsByNode = mapAttrs (node: jobs: length jobs) cfg.global.backups.byNode; - # }; - # }; - # }; - }; -} diff --git a/modules/homelab/lib/aggregators/base.nix b/modules/homelab/lib/aggregators/base.nix deleted file mode 100644 index e32228c..0000000 --- a/modules/homelab/lib/aggregators/base.nix +++ /dev/null @@ -1,55 +0,0 @@ -{lib}: let - inherit (lib) flatten mapAttrs attrValues filterAttrs mapAttrsToList filter groupBy length unique attrByPath splitString; - - # Generic function to aggregate any attribute across nodes - aggregateFromNodes = { - nodes, - attributePath, # e.g. "homelab.monitoring.metrics" or "homelab.backups.jobs" - enhancer ? null, # optional function to enhance each item with node context - }: let - # Extract the attribute from each node using the path - getNestedAttr = path: config: let - pathList = splitString "." path; - in - attrByPath pathList [] config; - - # Get all items from all nodes - allItems = flatten (mapAttrsToList - (nodeName: nodeConfig: let - items = getNestedAttr attributePath nodeConfig.config; - baseEnhancer = item: - item - // { - _nodeName = nodeName; - _nodeConfig = nodeConfig; - _nodeAddress = nodeConfig.config.networking.hostName or nodeName; - }; - finalEnhancer = - if enhancer != null - then (item: enhancer (baseEnhancer item)) - else baseEnhancer; - in - map finalEnhancer items) - nodes); - in { - # Raw aggregated data - all = allItems; - - # Common grouping patterns - byNode = groupBy (item: item._nodeName) allItems; - byType = groupBy (item: item.type or "unknown") allItems; - byService = groupBy (item: item.service or "unknown") allItems; - - # Utility functions for filtering - filterBy = predicate: filter predicate allItems; - ofType = type: filter (item: (item.type or "") == type) allItems; - ofNode = nodeName: filter (item: item._nodeName == nodeName) allItems; - enabled = filter (item: item.enabled or true) allItems; - - # Counting utilities - count = length allItems; - countBy = fn: mapAttrs (key: items: length items) (groupBy fn allItems); - }; -in { - inherit aggregateFromNodes; -} diff --git a/modules/homelab/lib/cli/cli-commands.sh b/modules/homelab/lib/cli/cli-commands.sh deleted file mode 100644 index 18e1638..0000000 --- a/modules/homelab/lib/cli/cli-commands.sh +++ /dev/null @@ -1,943 +0,0 @@ -#!/usr/bin/env bash -# CLI command implementations - -# Services command -# Enhanced services command with detailed service information -cmd_services() { - local SCOPE="local" - local FORMAT="table" - local SHOW_SYSTEMD=true - local DETAIL_SERVICE="" - local ACTION="" - - while [[ $# -gt 0 ]]; do - case $1 in - --global|-g) SCOPE="global"; shift ;; - --local|-l) SCOPE="local"; shift ;; - --json) FORMAT="json"; shift ;; - --no-systemd) SHOW_SYSTEMD=false; shift ;; - --detail|-d) - DETAIL_SERVICE="$2" - shift 2 - ;; - --logs) - ACTION="logs" - DETAIL_SERVICE="$2" - shift 2 - ;; - --status) - ACTION="status" - DETAIL_SERVICE="$2" - shift 2 - ;; - --restart) - ACTION="restart" - DETAIL_SERVICE="$2" - shift 2 - ;; - --errors) - ACTION="errors" - DETAIL_SERVICE="$2" - shift 2 - ;; - --help|-h) - cat << 'EOF' -homelab services - List and manage services - -USAGE: - homelab services [options] - homelab services --detail - homelab services --logs - homelab services --status - homelab services --restart - homelab services --errors - -OPTIONS: - --global, -g Show services from entire fleet - --local, -l Show local services (default) - --json Output JSON format - --no-systemd Don't check systemd status - --detail, -d Show detailed info for service - --logs Show recent logs for service - --status Show detailed status for service - --restart Restart service - --errors Show recent errors for service - -EXAMPLES: - homelab services - homelab services --global - homelab services --detail prometheus - homelab services --logs grafana - homelab services --errors nginx - homelab services --restart prometheus -EOF - return 0 - ;; - *) shift ;; - esac - done - - # Handle specific service actions - if [[ -n "$DETAIL_SERVICE" ]]; then - case "$ACTION" in - logs) - show_service_logs "$DETAIL_SERVICE" - return $? - ;; - status) - show_service_status "$DETAIL_SERVICE" - return $? - ;; - restart) - restart_service "$DETAIL_SERVICE" - return $? - ;; - errors) - show_service_errors "$DETAIL_SERVICE" - return $? - ;; - *) - show_service_detail "$DETAIL_SERVICE" - return $? - ;; - esac - fi - - # Regular service listing - if [[ "$FORMAT" == "json" ]]; then - if [[ "$SCOPE" == "global" ]]; then - jq -r '.services.global // {}' "$HOMELAB_CONFIG" - else - jq -r '.services.local // {}' "$HOMELAB_CONFIG" - fi - else - info "Homelab Services ($SCOPE)" - echo "==============================" - echo - - services_data=$(jq -r " - if \"$SCOPE\" == \"global\" then .services.global.all // [] - else .services.local.all // [] - end | - .[] | - [.name, (.node // \"local\"), (.port // \"N/A\"), (.description // \"\")] | - @tsv - " "$HOMELAB_CONFIG" 2>/dev/null || echo "") - - if [[ -z "$services_data" ]]; then - warn "No services found" - return 0 - fi - - printf "%-20s %-12s %-8s %-12s %-8s %s\n" "SERVICE" "NODE" "PORT" "SYSTEMD" "UPTIME" "DESCRIPTION" - printf "%-20s %-12s %-8s %-12s %-8s %s\n" "-------" "----" "----" "-------" "------" "-----------" - - while IFS=$'\t' read -r service node port description; do - systemd_status="N/A" - uptime="N/A" - - if [[ "$SHOW_SYSTEMD" == "true" && "$node" == "local" ]]; then - # Get systemd service names for this service - systemd_services=($(jq -r ".services.local.all[] | select(.name == \"$service\") | .systemdServices[]?" "$HOMELAB_CONFIG" 2>/dev/null)) - - if [[ ${#systemd_services[@]} -eq 0 ]]; then - # Fallback to common patterns - systemd_services=("$service" "$service.service") - fi - - for svc_name in "${systemd_services[@]}"; do - if systemctl is-enabled "$svc_name" >/dev/null 2>&1; then - if systemctl is-active "$svc_name" >/dev/null 2>&1; then - systemd_status="${GREEN}active${NC}" - - # Get uptime - active_since=$(systemctl show -p ActiveEnterTimestamp "$svc_name" --value 2>/dev/null) - if [[ -n "$active_since" && "$active_since" != "n/a" ]]; then - active_epoch=$(date -d "$active_since" +%s 2>/dev/null || echo 0) - current_epoch=$(date +%s) - if [[ "$active_epoch" -gt 0 ]]; then - uptime_seconds=$((current_epoch - active_epoch)) - uptime=$(format_duration $uptime_seconds) - fi - fi - else - systemd_status="${RED}inactive${NC}" - uptime="0s" - fi - break - fi - done - fi - - printf "%-20s %-12s %-8s %-12b %-8s %s\n" "$service" "$node" "$port" "$systemd_status" "$uptime" "$description" - done <<< "$services_data" - - echo - service_count=$(echo "$services_data" | wc -l) - success "Total services: $service_count" - - echo - info "💡 Use 'homelab services --detail ' for detailed information" - info "💡 Use 'homelab services --logs ' to view logs" - fi -} - -# Helper function to format duration -format_duration() { - local seconds=$1 - local days=$((seconds / 86400)) - local hours=$(((seconds % 86400) / 3600)) - local minutes=$(((seconds % 3600) / 60)) - local secs=$((seconds % 60)) - - if [[ $days -gt 0 ]]; then - echo "${days}d ${hours}h" - elif [[ $hours -gt 0 ]]; then - echo "${hours}h ${minutes}m" - elif [[ $minutes -gt 0 ]]; then - echo "${minutes}m" - else - echo "${secs}s" - fi -} - -# Robust service detection function -find_systemd_service() { - local service_name="$1" - - # Get configured systemd services from homelab config - local systemd_services=($(jq -r ".services.local.all[] | select(.name == \"$service_name\") | .systemdServices[]?" "$HOMELAB_CONFIG" 2>/dev/null)) - - # If no configured services, use common patterns - if [[ ${#systemd_services[@]} -eq 0 ]]; then - systemd_services=( - "$service_name.service" - "$service_name" - "nixos-$service_name.service" - "nixos-$service_name" - "$service_name-nixos.service" - ) - fi - - # Try each potential service name with multiple detection methods - for svc_name in "${systemd_services[@]}"; do - # Method 1: Check if systemctl can show the unit (most reliable) - if systemctl show "$svc_name" >/dev/null 2>&1; then - echo "$svc_name" - return 0 - fi - - # Method 2: Check if unit file exists - if systemctl list-unit-files --no-pager --no-legend "$svc_name" 2>/dev/null | grep -q "^$svc_name"; then - echo "$svc_name" - return 0 - fi - - # Method 3: Check if unit is loaded - if systemctl list-units --no-pager --no-legend "$svc_name" 2>/dev/null | grep -q "^$svc_name"; then - echo "$svc_name" - return 0 - fi - done - - # If still not found, try a broader search - local found_service=$(systemctl list-units --all --no-pager --no-legend | grep -E "^$service_name[.-]|^$service_name\.service" | head -1 | awk '{print $1}') - if [[ -n "$found_service" ]]; then - echo "$found_service" - return 0 - fi - - # Last resort: check unit files - found_service=$(systemctl list-unit-files --no-pager --no-legend | grep -E "^$service_name[.-]|^$service_name\.service" | head -1 | awk '{print $1}') - if [[ -n "$found_service" ]]; then - echo "$found_service" - return 0 - fi - - return 1 -} - -show_service_logs() { - local service_name="$1" - local lines="${2:-100}" - local follow="${3:-false}" - - if [[ -z "$service_name" ]]; then - error "Service name required" - return 1 - fi - - # Use robust service detection - local found_service=$(find_systemd_service "$service_name") - - if [[ -z "$found_service" ]]; then - error "No systemd service found for '$service_name'" - echo - info "💡 Available services containing '$service_name':" - systemctl list-units --all --no-pager --no-legend | grep -i "$service_name" | awk '{print " " $1}' || echo " None found" - return 1 - fi - - info "📝 Logs for $service_name ($found_service)" - echo "==================================" - echo - - local journalctl_args="-u $found_service -n $lines --no-pager" - if [[ "$follow" == "true" ]]; then - journalctl_args="$journalctl_args -f" - info "Following logs (Press Ctrl+C to stop)..." - echo - fi - - journalctl $journalctl_args -} - -show_service_detail() { - local service_name="$1" - - if [[ -z "$service_name" ]]; then - error "Service name required" - return 1 - fi - - # Get service info from config - local service_info=$(jq -r ".services.local.all[] | select(.name == \"$service_name\")" "$HOMELAB_CONFIG" 2>/dev/null) - - if [[ -z "$service_info" ]]; then - error "Service '$service_name' not found in homelab configuration" - return 1 - fi - - info "🔍 Service Details: $service_name" - echo "=================================" - echo - - # Basic info - local port=$(echo "$service_info" | jq -r '.port // "N/A"') - local description=$(echo "$service_info" | jq -r '.description // "N/A"') - local tags=$(echo "$service_info" | jq -r '.tags[]? // empty' | tr '\n' ',' | sed 's/,$//') - - echo "📋 Configuration:" - echo " Port: $port" - echo " Description: $description" - echo " Tags: ${tags:-"None"}" - echo - - # Use robust service detection - local found_service=$(find_systemd_service "$service_name") - - echo "🔧 Systemd Status:" - if [[ -n "$found_service" ]]; then - echo " Service: $found_service" - echo " Status: $(systemctl is-active "$found_service" 2>/dev/null || echo "unknown")" - echo " Enabled: $(systemctl is-enabled "$found_service" 2>/dev/null || echo "unknown")" - - # Detailed status - local active_since=$(systemctl show -p ActiveEnterTimestamp "$found_service" --value 2>/dev/null) - if [[ -n "$active_since" && "$active_since" != "n/a" ]]; then - echo " Active since: $active_since" - fi - - local main_pid=$(systemctl show -p MainPID "$found_service" --value 2>/dev/null) - if [[ -n "$main_pid" && "$main_pid" != "0" ]]; then - echo " Main PID: $main_pid" - - # Memory usage - local memory_usage=$(systemctl show -p MemoryCurrent "$found_service" --value 2>/dev/null) - if [[ -n "$memory_usage" && "$memory_usage" != "[not set]" && "$memory_usage" -gt 0 ]]; then - local memory_mb=$((memory_usage / 1024 / 1024)) - echo " Memory: ${memory_mb}MB" - fi - fi - echo - - # Recent logs preview - echo "📝 Recent Logs (last 10 lines):" - echo "--------------------------------" - journalctl -u "$found_service" -n 10 --no-pager --output=short 2>/dev/null || echo "No logs available" - echo - - # Check for recent errors - local error_count=$(journalctl -u "$found_service" --since "24 hours ago" --no-pager -q 2>/dev/null | grep -i "error\|failed\|exception" | wc -l) - if [[ "$error_count" -gt 0 ]]; then - warn "⚠️ Found $error_count error(s) in last 24 hours" - echo " Use 'homelab services --errors $service_name' to view them" - else - success "✅ No errors found in last 24 hours" - fi - echo - - info "📊 Available Actions:" - echo " homelab services --logs $service_name # View full logs" - echo " homelab services --errors $service_name # View recent errors" - echo " homelab services --restart $service_name # Restart service" - - else - warn "No systemd service found for '$service_name'" - echo - info "💡 Available services containing '$service_name':" - systemctl list-units --all --no-pager --no-legend | grep -i "$service_name" | awk '{print " " $1}' || echo " None found" - fi -} - -show_service_errors() { - local service_name="$1" - local since="${2:-24 hours ago}" - - if [[ -z "$service_name" ]]; then - error "Service name required" - return 1 - fi - - # Use robust service detection - local found_service=$(find_systemd_service "$service_name") - - if [[ -z "$found_service" ]]; then - error "No systemd service found for '$service_name'" - return 1 - fi - - info "🚨 Errors for $service_name ($found_service) since $since" - echo "==============================================" - echo - - # Get logs with priority filtering for errors and higher - local systemd_errors=$(journalctl -u "$found_service" --since "$since" --no-pager -p err 2>/dev/null) - - # Also get application-level errors from all logs but with better filtering - local app_errors=$(journalctl -u "$found_service" --since "$since" --no-pager 2>/dev/null | \ - grep -E "(ERROR|FATAL|CRITICAL|Exception|Traceback|failed to|cannot|unable to|connection refused|timeout|denied)" | \ - grep -v -E "(debug|DEBUG|info|INFO|warn|WARNING|notice|NOTICE)" | \ - grep -v -E "(successfully|completed|started|stopped|reloaded)") - - local has_errors=false - - # Show systemd-level errors (priority err and above) - if [[ -n "$systemd_errors" ]]; then - warn "📍 System-level errors (priority: err/crit/alert/emerg):" - echo "────────────────────────────────────────────────────────" - echo "$systemd_errors" - echo - has_errors=true - fi - - # Show application-level errors - if [[ -n "$app_errors" ]]; then - warn "📍 Application-level errors:" - echo "─────────────────────────────" - echo "$app_errors" - echo - has_errors=true - fi - - # Check for service failures/restarts - local service_failures=$(journalctl -u "$found_service" --since "$since" --no-pager 2>/dev/null | \ - grep -E "(Failed|failed|Stopped|stopped|Restarted|restarted|Exit code|exit code)" | \ - grep -v -E "(successfully|gracefully)") - - if [[ -n "$service_failures" ]]; then - warn "📍 Service state changes/failures:" - echo "───────────────────────────────────" - echo "$service_failures" - echo - has_errors=true - fi - - if [[ "$has_errors" == "false" ]]; then - success "✅ No errors found since $since" - echo - info "💡 Error detection includes:" - echo " • System-level errors (journald priority: err/crit/alert/emerg)" - echo " • Application errors (ERROR, FATAL, CRITICAL, Exception, etc.)" - echo " • Service failures and unexpected restarts" - else - echo - local total_systemd=$(echo "$systemd_errors" | grep -c . || echo 0) - local total_app=$(echo "$app_errors" | grep -c . || echo 0) - local total_failures=$(echo "$service_failures" | grep -c . || echo 0) - - warn "Summary: $total_systemd system errors, $total_app application errors, $total_failures service issues" - echo - info "💡 Use 'homelab services --logs $service_name' to view all logs" - fi -} - -show_service_status() { - local service_name="$1" - - if [[ -z "$service_name" ]]; then - error "Service name required" - return 1 - fi - - # Use robust service detection - local found_service=$(find_systemd_service "$service_name") - - if [[ -z "$found_service" ]]; then - error "No systemd service found for '$service_name'" - return 1 - fi - - info "📊 Status for $service_name ($found_service)" - echo "==================================" - echo - - systemctl status "$found_service" --no-pager -l -} - -restart_service() { - local service_name="$1" - - if [[ -z "$service_name" ]]; then - error "Service name required" - return 1 - fi - - # Use robust service detection - local found_service=$(find_systemd_service "$service_name") - - if [[ -z "$found_service" ]]; then - error "No systemd service found for '$service_name'" - return 1 - fi - - info "🔄 Restarting $service_name ($found_service)..." - - if sudo systemctl restart "$found_service"; then - success "✅ Successfully restarted $service_name" - - # Show brief status - sleep 2 - if systemctl is-active "$found_service" >/dev/null 2>&1; then - success "✅ Service is now active" - else - error "❌ Service failed to start properly" - warn "Use 'homelab services --status $service_name' to check details" - fi - else - error "❌ Failed to restart $service_name" - return 1 - fi -} - -# Backups command -cmd_backups() { - local SCOPE="local" - local FORMAT="table" - local SHOW_STATUS=true - - while [[ $# -gt 0 ]]; do - case $1 in - --global|-g) SCOPE="global"; shift ;; - --local|-l) SCOPE="local"; shift ;; - --json) FORMAT="json"; shift ;; - --no-status) SHOW_STATUS=false; shift ;; - --help|-h) - cat << 'EOF' -homelab backups - Show backup jobs and status - -USAGE: - homelab backups [options] - -OPTIONS: - --global, -g Show backups from entire fleet - --local, -l Show local backups (default) - --json Output JSON format - --no-status Don't check systemd timer status - -EXAMPLES: - homelab backups - homelab backups --global - homelab backups --no-status -EOF - return 0 - ;; - *) shift ;; - esac - done - - if [[ "$FORMAT" == "json" ]]; then - if [[ "$SCOPE" == "global" ]]; then - jq -r '.backups.global // {}' "$HOMELAB_CONFIG" - else - jq -r '.backups.local // {}' "$HOMELAB_CONFIG" - fi - else - info "Homelab Backups ($SCOPE)" - echo "==========================" - echo - - backup_data=$(jq -r " - if \"$SCOPE\" == \"global\" then .backups.global.allJobs // [] - else .backups.local.allJobs // [] - end | - .[] | - [.name, (.sourceNode // .node // \"local\"), .backend, (.labels | to_entries | map(\"\(.key)=\(.value)\") | join(\",\"))] | - @tsv - " "$HOMELAB_CONFIG" 2>/dev/null || echo "") - - if [[ -z "$backup_data" ]]; then - warn "No backup jobs found" - return 0 - fi - - printf "%-25s %-12s %-8s %-15s %-15s %-15s %s\n" "JOB" "NODE" "STATUS" "BACKEND" "LAST RUN" "NEXT RUN" "LABELS" - printf "%-25s %-12s %-8s %-15s %-15s %-15s %s\n" "---" "----" "------" "-------" "--------" "--------" "------" - - while IFS=$'\t' read -r job node backend labels; do - last_run="Unknown" - status="❓" - next_run="Unknown" - - if [[ "$SHOW_STATUS" == "true" && "$node" == "local" ]]; then - timer_patterns=( - "backup-$job" - "$job-backup" - "restic-backups-$job" - "restic-backup-$job" - "$job.timer" - "backup-$job.timer" - ) - - found_timer="" - actual_timer_name="" - for pattern in "${timer_patterns[@]}"; do - for timer_name in "$pattern" "$pattern.timer"; do - if systemctl list-timers --no-pager --no-legend "$timer_name" 2>/dev/null | grep -q "$timer_name"; then - found_timer="$timer_name" - if [[ "$timer_name" == *.timer ]]; then - actual_timer_name="$timer_name" - else - actual_timer_name="$timer_name.timer" - fi - break 2 - fi - done - done - - if [[ -n "$found_timer" ]]; then - last_trigger=$(systemctl show -p LastTriggerUSec "$actual_timer_name" --value 2>/dev/null) - if [[ "$last_trigger" != "n/a" && -n "$last_trigger" && "$last_trigger" != "Thu 1970-01-01"* ]]; then - last_run=$(date -d "$last_trigger" "+%Y-%m-%d %H:%M" 2>/dev/null || echo "Parse Error") - - last_epoch=$(date -d "$last_trigger" +%s 2>/dev/null || echo 0) - current_epoch=$(date +%s) - if [[ "$last_epoch" != "0" && "$last_epoch" -gt 0 ]]; then - hours_since=$(( (current_epoch - last_epoch) / 3600 )) - - if [[ $hours_since -lt 25 ]]; then - status="✅" - elif [[ $hours_since -lt 48 ]]; then - status="⚠️" - else - status="❌" - fi - else - status="❓" - fi - else - last_run="Never" - status="⏸️" - fi - - next_trigger=$(systemctl show -p NextElapseUSecRealtime "$actual_timer_name" --value 2>/dev/null) - if [[ "$next_trigger" != "n/a" && -n "$next_trigger" && "$next_trigger" != "0" ]]; then - next_run=$(date -d "$next_trigger" "+%Y-%m-%d %H:%M" 2>/dev/null || echo "Parse Error") - else - next_run="Unknown" - fi - - if [[ "$status" == "✅" ]]; then - if journalctl -u "$actual_timer_name" --since "24 hours ago" --no-pager -q 2>/dev/null | grep -qi "error\|failed\|timeout"; then - status="❌" - elif journalctl -u "$actual_timer_name" --since "24 hours ago" --no-pager -q 2>/dev/null | grep -qi "success\|completed\|finished"; then - status="✅" - fi - fi - fi - fi - - printf "%-25s %-12s %-8s %-15s %-15s %-15s %s\n" "$job" "$node" "$status" "$backend" "$last_run" "$next_run" "$labels" - done <<< "$backup_data" - - echo - job_count=$(echo "$backup_data" | wc -l) - success "Total backup jobs: $job_count" - - if [[ "$SHOW_STATUS" == "true" ]]; then - echo - info "Status: ✅=Recent(<25h) ⚠️=Overdue(1-2d) ❌=Failed(>2d) ⏸️=Never ❓=Unknown" - fi - fi -} - - -# Proxy command -cmd_proxy() { - local SCOPE="local" - local FORMAT="table" - - while [[ $# -gt 0 ]]; do - case $1 in - --global|-g) SCOPE="global"; shift ;; - --local|-l) SCOPE="local"; shift ;; - --json) FORMAT="json"; shift ;; - --help|-h) - cat << 'EOF' -homelab proxy - Show reverse proxy entries - -USAGE: - homelab proxy [options] - -OPTIONS: - --global, -g Show proxy entries from entire fleet - --local, -l Show local proxy entries (default) - --json Output JSON format - -EXAMPLES: - homelab proxy - homelab proxy --global -EOF - return 0 - ;; - *) shift ;; - esac - done - - if [[ "$FORMAT" == "json" ]]; then - if [[ "$SCOPE" == "global" ]]; then - jq -r '.reverseProxy.global // {}' "$HOMELAB_CONFIG" - else - jq -r '.reverseProxy.local // {}' "$HOMELAB_CONFIG" - fi - else - info "Homelab Reverse Proxy ($SCOPE)" - echo "===============================" - echo - - proxy_data=$(jq -r " - if \"$SCOPE\" == \"global\" then .reverseProxy.global.allEntries // [] - else .reverseProxy.local.allEntries // [] - end | - .[] | - [.subdomain, (.sourceNode // .node // \"local\"), .host, (.port // \"N/A\"), (.enableAuth // false), (.enableSSL // true)] | - @tsv - " "$HOMELAB_CONFIG" 2>/dev/null || echo "") - - if [[ -z "$proxy_data" ]]; then - warn "No proxy entries found" - return 0 - fi - - printf "%-20s %-12s %-15s %-8s %-6s %-6s %s\n" "SUBDOMAIN" "NODE" "BACKEND" "PORT" "AUTH" "SSL" "EXTERNAL URL" - printf "%-20s %-12s %-15s %-8s %-6s %-6s %s\n" "---------" "----" "-------" "----" "----" "---" "------------" - - external_domain=$(jq -r '.externalDomain // "lab.local"' "$HOMELAB_CONFIG" 2>/dev/null) - - while IFS=$'\t' read -r subdomain node host port auth ssl; do - auth_icon=$(if [[ "$auth" == "true" ]]; then echo "🔒"; else echo "🌐"; fi) - ssl_icon=$(if [[ "$ssl" == "true" ]]; then echo "🔐"; else echo "❌"; fi) - - external_url="https://$subdomain.$external_domain" - if [[ "$ssl" == "false" ]]; then - external_url="http://$subdomain.$external_domain" - fi - - printf "%-20s %-12s %-15s %-8s %-6s %-6s %s\n" "$subdomain" "$node" "$host" "$port" "$auth_icon" "$ssl_icon" "$external_url" - done <<< "$proxy_data" - - echo - entry_count=$(echo "$proxy_data" | wc -l) - success "Total proxy entries: $entry_count" - fi -} - -# Monitoring command -cmd_monitoring() { - local SCOPE="local" - local FORMAT="table" - local SHOW_TYPE="all" - - while [[ $# -gt 0 ]]; do - case $1 in - --global|-g) SCOPE="global"; shift ;; - --local|-l) SCOPE="local"; shift ;; - --json) FORMAT="json"; shift ;; - --metrics) SHOW_TYPE="metrics"; shift ;; - --health) SHOW_TYPE="health"; shift ;; - --logs) SHOW_TYPE="logs"; shift ;; - --help|-h) - cat << 'EOF' -homelab monitoring - Show monitoring configuration - -USAGE: - homelab monitoring [options] - -OPTIONS: - --global, -g Show monitoring from entire fleet - --local, -l Show local monitoring (default) - --json Output JSON format - --metrics Show only metrics endpoints - --health Show only health checks - --logs Show only log sources - -EXAMPLES: - homelab monitoring - homelab monitoring --global --metrics -EOF - return 0 - ;; - *) shift ;; - esac - done - - if [[ "$FORMAT" == "json" ]]; then - if [[ "$SCOPE" == "global" ]]; then - jq -r '.monitoring.global // {}' "$HOMELAB_CONFIG" - else - jq -r '.monitoring.local // {}' "$HOMELAB_CONFIG" - fi - else - info "Homelab Monitoring ($SCOPE)" - echo "============================" - echo - - # Show metrics - if [[ "$SHOW_TYPE" == "all" || "$SHOW_TYPE" == "metrics" ]]; then - info "📊 Metrics Endpoints" - echo "--------------------" - - metrics_data=$(jq -r " - if \"$SCOPE\" == \"global\" then .monitoring.global.allMetrics // [] - else .monitoring.local.allMetrics // [] - end | - .[] | - [.name, (.sourceNode // .node // \"local\"), .host, (.port // \"N/A\"), .path, .jobName] | - @tsv - " "$HOMELAB_CONFIG" 2>/dev/null || echo "") - - if [[ -n "$metrics_data" ]]; then - printf "%-20s %-12s %-15s %-8s %-12s %s\n" "NAME" "NODE" "HOST" "PORT" "PATH" "JOB" - printf "%-20s %-12s %-15s %-8s %-12s %s\n" "----" "----" "----" "----" "----" "---" - - while IFS=$'\t' read -r name node host port path job; do - printf "%-20s %-12s %-15s %-8s %-12s %s\n" "$name" "$node" "$host" "$port" "$path" "$job" - done <<< "$metrics_data" - - echo - metrics_count=$(echo "$metrics_data" | wc -l) - success "Found $metrics_count metrics endpoints" - else - warn "No metrics endpoints found" - fi - echo - fi - - # Show health checks - if [[ "$SHOW_TYPE" == "all" || "$SHOW_TYPE" == "health" ]]; then - info "🏥 Health Checks" - echo "----------------" - - health_data=$(jq -r " - if \"$SCOPE\" == \"global\" then .monitoring.global.allHealthChecks // [] - else .monitoring.local.allHealthChecks // [] - end | - .[] | - [.name, (.sourceNode // .node // \"local\"), .host, (.port // \"N/A\"), .path, .protocol, (.enabled // true)] | - @tsv - " "$HOMELAB_CONFIG" 2>/dev/null || echo "") - - if [[ -n "$health_data" ]]; then - printf "%-20s %-12s %-15s %-8s %-12s %-8s %s\n" "NAME" "NODE" "HOST" "PORT" "PATH" "PROTOCOL" "STATUS" - printf "%-20s %-12s %-15s %-8s %-12s %-8s %s\n" "----" "----" "----" "----" "----" "--------" "------" - - while IFS=$'\t' read -r name node host port path protocol enabled; do - status_icon=$(if [[ "$enabled" == "true" ]]; then echo "✅"; else echo "❌"; fi) - printf "%-20s %-12s %-15s %-8s %-12s %-8s %s\n" "$name" "$node" "$host" "$port" "$path" "$protocol" "$status_icon" - done <<< "$health_data" - - echo - health_count=$(echo "$health_data" | wc -l) - success "Found $health_count health checks" - else - warn "No health checks found" - fi - fi - fi -} - -# Status command -cmd_status() { - local FORMAT="table" - - while [[ $# -gt 0 ]]; do - case $1 in - --json) FORMAT="json"; shift ;; - --help|-h) - cat << 'EOF' -homelab status - Show overall homelab status - -USAGE: - homelab status [options] - -OPTIONS: - --json Output JSON format - -EXAMPLES: - homelab status -EOF - return 0 - ;; - *) shift ;; - esac - done - - if [[ "$FORMAT" == "json" ]]; then - cat "$HOMELAB_CONFIG" - else - # Get basic info - hostname=$(jq -r '.hostname // "unknown"' "$HOMELAB_CONFIG") - domain=$(jq -r '.domain // "lab"' "$HOMELAB_CONFIG") - external_domain=$(jq -r '.externalDomain // "unknown"' "$HOMELAB_CONFIG") - environment=$(jq -r '.environment // "unknown"' "$HOMELAB_CONFIG") - - info "🏠 Homelab Status" - echo "==================" - echo - echo "Node Information:" - echo " Hostname: $hostname" - echo " Domain: $domain" - echo " External: $external_domain" - echo " Environment: $environment" - echo - - # Services summary - local_services=$(jq -r '.services.local.count // 0' "$HOMELAB_CONFIG" 2>/dev/null) - global_services=$(jq -r '.services.global.count // 0' "$HOMELAB_CONFIG" 2>/dev/null) - - echo "📋 Services:" - echo " Local: $local_services" - echo " Fleet: $global_services" - echo - - # Monitoring summary - local_metrics=$(jq -r '.monitoring.local.count // 0' "$HOMELAB_CONFIG" 2>/dev/null) - global_metrics=$(jq -r '.monitoring.global.summary.totalMetrics // 0' "$HOMELAB_CONFIG" 2>/dev/null) - - echo "📊 Monitoring:" - echo " Local Metrics: $local_metrics" - echo " Fleet Metrics: $global_metrics" - echo - - # Backup summary - local_backups=$(jq -r '.backups.local.count // 0' "$HOMELAB_CONFIG" 2>/dev/null) - global_backups=$(jq -r '.backups.global.summary.total // 0' "$HOMELAB_CONFIG" 2>/dev/null) - - echo "💾 Backups:" - echo " Local Jobs: $local_backups" - echo " Fleet Jobs: $global_backups" - echo - - success "Use 'homelab --help' for detailed information" - fi -} diff --git a/modules/homelab/lib/cli/homelab-cli.nix b/modules/homelab/lib/cli/homelab-cli.nix deleted file mode 100644 index 56b61e8..0000000 --- a/modules/homelab/lib/cli/homelab-cli.nix +++ /dev/null @@ -1,295 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab; - - extractServiceData = services: - mapAttrsToList (name: svc: { - inherit name; - enabled = svc.enable or false; - port = svc.port or null; - description = svc.description or name; - tags = svc.tags or []; - systemdServices = svc.systemdServices or ["${name}.service" name]; - }) (filterAttrs (name: svc: svc.enable or false) services); - - extractListData = list: - if isList list - then - map ( - item: - if isAttrs item - then - filterAttrs ( - k: v: - !(isFunction v) - && !(isAttrs v && v ? "_type") - && k != "_module" - ) - item - else item - ) - list - else []; - - homelabCli = pkgs.writeShellScriptBin "homelab" '' - #!/usr/bin/env bash - set -euo pipefail - - HOMELAB_CONFIG="/etc/homelab/config.json" - - # Colors - RED='\033[0;31m' - GREEN='\033[0;32m' - YELLOW='\033[1;33m' - BLUE='\033[0;34m' - NC='\033[0m' - - # Helper functions - error() { echo -e "''${RED}Error: $1''${NC}" >&2; } - info() { echo -e "''${BLUE}$1''${NC}"; } - success() { echo -e "''${GREEN}$1''${NC}"; } - warn() { echo -e "''${YELLOW}$1''${NC}"; } - - # Check config exists - if [[ ! -f "$HOMELAB_CONFIG" ]]; then - error "Homelab configuration not found" - error "Make sure homelab.enable = true and rebuild" - exit 1 - fi - - # Load command implementations - source ${./cli-commands.sh} - - # Help function - show_help() { - cat << 'EOF' - Homelab Management CLI - - USAGE: - homelab [options] - - COMMANDS: - services List and manage services - backups Show backup jobs and status - proxy Show reverse proxy entries - monitoring Show monitoring configuration - status Overall homelab status - help Show this help - - GLOBAL OPTIONS: - --global, -g Show fleet-wide information - --local, -l Show local information (default) - --json Output JSON format - --help, -h Show help - - EXAMPLES: - homelab services --global - homelab backups --local - homelab status - EOF - } - - # Main command dispatcher - case "''${1:-help}" in - services) - shift - cmd_services "$@" - ;; - backups) - shift - cmd_backups "$@" - ;; - proxy) - shift - cmd_proxy "$@" - ;; - monitoring) - shift - cmd_monitoring "$@" - ;; - status) - shift - cmd_status "$@" - ;; - help|--help|-h) - show_help - ;; - *) - error "Unknown command: $1" - show_help - exit 1 - ;; - esac - ''; -in { - # Only enable when homelab is enabled - config = mkIf cfg.enable { - # Install CLI tools - environment.systemPackages = [ - homelabCli - # Create convenient aliases - (pkgs.writeShellScriptBin "hl" "exec homelab \"$@\"") - (pkgs.writeShellScriptBin "hls" "exec homelab services \"$@\"") - (pkgs.writeShellScriptBin "hlb" "exec homelab backups \"$@\"") - (pkgs.writeShellScriptBin "hlp" "exec homelab proxy \"$@\"") - (pkgs.writeShellScriptBin "hlm" "exec homelab monitoring \"$@\"") - ]; - - # Generate minimal, safe JSON config - environment.etc."homelab/config.json" = { - text = builtins.toJSON { - # Basic homelab info (always safe) - hostname = cfg.hostname or "unknown"; - domain = cfg.domain or "lab"; - externalDomain = cfg.externalDomain or "lab.local"; - environment = cfg.environment or "production"; - location = cfg.location or "homelab"; - tags = cfg.tags or []; - - # Services - only extract what we have locally - services = { - local = { - all = - if (cfg ? services) - then extractServiceData cfg.services - else []; - count = - if (cfg ? services) - then length (attrNames (filterAttrs (n: s: s.enable or false) cfg.services)) - else 0; - }; - # For global data, we'll try to read it but provide empty fallback - global = { - all = []; - count = 0; - summary = {}; - }; - }; - - # Monitoring - extract only basic data - monitoring = { - local = { - allMetrics = - if (hasAttr "monitoring" cfg && hasAttr "allMetrics" cfg.monitoring) - then extractListData cfg.monitoring.allMetrics - else []; - allHealthChecks = - if (hasAttr "monitoring" cfg && hasAttr "allHealthChecks" cfg.monitoring) - then extractListData cfg.monitoring.allHealthChecks - else []; - count = - if (hasAttr "monitoring" cfg && hasAttr "allMetrics" cfg.monitoring) - then length cfg.monitoring.allMetrics - else 0; - }; - global = { - allMetrics = []; - allHealthChecks = []; - summary = { - totalMetrics = 0; - totalHealthChecks = 0; - }; - }; - }; - - # Logging - logging = { - local = { - allSources = - if (hasAttr "logging" cfg && hasAttr "allSources" cfg.logging) - then extractListData cfg.logging.allSources - else []; - count = - if (hasAttr "logging" cfg && hasAttr "allSources" cfg.logging) - then length cfg.logging.allSources - else 0; - }; - global = { - allSources = []; - summary = {}; - }; - }; - - # Backups - backups = { - local = { - allJobs = - if (hasAttr "backups" cfg && hasAttr "allJobs" cfg.backups) - then extractListData cfg.backups.allJobs - else []; - count = - if (hasAttr "backups" cfg && hasAttr "allJobs" cfg.backups) - then length cfg.backups.allJobs - else 0; - }; - global = { - allJobs = []; - summary = {}; - }; - }; - - # Reverse Proxy - reverseProxy = { - local = { - allEntries = - if (hasAttr "reverseProxy" cfg && hasAttr "allEntries" cfg.reverseProxy) - then extractListData cfg.reverseProxy.allEntries - else []; - count = - if (hasAttr "reverseProxy" cfg && hasAttr "allEntries" cfg.reverseProxy) - then length cfg.reverseProxy.allEntries - else 0; - }; - global = { - allEntries = []; - summary = {}; - }; - }; - - # Metadata - _metadata = { - # generated = toString builtins.currentTime; - version = "1.0.0"; - }; - }; - mode = "0644"; - }; - - # Add bash completion - environment.etc."bash_completion.d/homelab".text = '' - _homelab_completion() { - local cur prev opts - COMPREPLY=() - cur="''${COMP_WORDS[COMP_CWORD]}" - prev="''${COMP_WORDS[COMP_CWORD-1]}" - - case ''${COMP_CWORD} in - 1) - opts="services backups proxy monitoring status help" - COMPREPLY=( $(compgen -W "''${opts}" -- ''${cur}) ) - return 0 - ;; - *) - case ''${COMP_WORDS[1]} in - services|backups|proxy|monitoring|status) - opts="--global --local --json --help" - ;; - *) - opts="--help" - ;; - esac - COMPREPLY=( $(compgen -W "''${opts}" -- ''${cur}) ) - return 0 - ;; - esac - } - - complete -F _homelab_completion homelab hl - ''; - }; -} diff --git a/modules/homelab/lib/features/logging.nix b/modules/homelab/lib/features/logging.nix deleted file mode 100644 index 60f2cda..0000000 --- a/modules/homelab/lib/features/logging.nix +++ /dev/null @@ -1,92 +0,0 @@ -serviceName: { - config, - lib, - ... -}: -with lib; let - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; - - shouldEnableLogging = - cfg.logging.files - != [] - || cfg.logging.extraSources != []; -in { - options.homelab.services.${serviceName}.logging = { - enable = mkOption { - type = types.bool; - description = "Enable logging for ${serviceName}"; - default = shouldEnableLogging; - }; - - files = mkOption { - type = types.listOf types.str; - default = []; - }; - - parsing = { - regex = mkOption { - type = types.nullOr types.str; - default = null; - }; - - extractFields = mkOption { - type = types.listOf types.str; - default = []; - }; - }; - - multiline = mkOption { - type = types.nullOr (types.submodule { - options = { - firstLineRegex = mkOption {type = types.str;}; - maxWaitTime = mkOption { - type = types.str; - default = "3s"; - }; - }; - }); - default = null; - }; - - extraLabels = mkOption { - type = types.attrsOf types.str; - default = {}; - }; - - extraSources = mkOption { - type = types.listOf types.attrs; - default = []; - }; - }; - - config = mkIf cfg.enable { - homelab.logging.sources = mkIf cfg.logging.enable ( - # Only create file source if files are specified - (optional (cfg.logging.files != []) { - name = "${serviceName}-logs"; - type = "file"; - files = { - paths = cfg.logging.files; - multiline = cfg.logging.multiline; - }; - labels = - cfg.logging.extraLabels - // { - service = serviceName; - node = homelabCfg.hostname; - environment = homelabCfg.environment; - }; - pipelineStages = - (optional (cfg.logging.parsing.regex != null) { - regex.expression = cfg.logging.parsing.regex; - }) - ++ (optional (cfg.logging.parsing.extractFields != []) { - labels = listToAttrs (map (field: nameValuePair field null) cfg.logging.parsing.extractFields); - }); - enabled = true; - }) - ++ cfg.logging.extraSources - ); - }; -} diff --git a/modules/homelab/lib/features/monitoring.nix b/modules/homelab/lib/features/monitoring.nix deleted file mode 100644 index f25e3b8..0000000 --- a/modules/homelab/lib/features/monitoring.nix +++ /dev/null @@ -1,128 +0,0 @@ -serviceName: { - config, - lib, - ... -}: -with lib; let - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; - - hasMetricsConfig = - cfg.monitoring.metrics.path - != null - || cfg.monitoring.metrics.extraEndpoints != []; - - hasHealthCheckConfig = - cfg.monitoring.healthCheck.path - != null - || cfg.monitoring.healthCheck.conditions != [] - || cfg.monitoring.healthCheck.extraChecks != []; -in { - # Define the service-specific monitoring options - options.homelab.services.${serviceName}.monitoring = { - enable = mkOption { - type = types.bool; - description = "Enable monitoring for ${serviceName}"; - default = hasMetricsConfig || hasHealthCheckConfig; - }; - - metrics = { - enable = mkOption { - type = types.bool; - default = hasMetricsConfig; - }; - - path = mkOption { - type = types.nullOr types.str; - default = null; - description = "Metrics endpoint path. Setting this enables metrics collection."; - }; - - extraEndpoints = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional metrics endpoints. Adding endpoints enables metrics collection."; - }; - }; - - healthCheck = { - enable = mkOption { - type = types.bool; - default = hasHealthCheckConfig; - }; - - path = mkOption { - type = types.nullOr types.str; - default = null; - description = "Health check endpoint path. Setting this enables health checks."; - example = "/health"; - }; - - conditions = mkOption { - type = types.listOf types.str; - default = ["[STATUS] == 200"]; - description = "Health check conditions. Setting conditions enables health checks."; - example = ["[STATUS] == 200"]; - }; - - extraChecks = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional health checks. Adding checks enables health monitoring."; - }; - }; - - extraLabels = mkOption { - type = types.attrsOf types.str; - default = {}; - }; - }; - - # Generate the homelab config automatically when service is enabled - config = mkIf cfg.enable { - homelab.monitoring = mkIf cfg.monitoring.enable { - metrics = mkIf hasMetricsConfig ( - (optional (cfg.monitoring.metrics.path != null) { - name = "${serviceName}-main"; - host = homelabCfg.hostname; - port = cfg.port; - path = cfg.monitoring.metrics.path; - jobName = serviceName; - scrapeInterval = "30s"; - labels = - cfg.monitoring.extraLabels - // { - service = serviceName; - node = homelabCfg.hostname; - environment = homelabCfg.environment; - }; - }) - ++ cfg.monitoring.metrics.extraEndpoints - ); - - healthChecks = mkIf hasHealthCheckConfig ( - (optional (cfg.monitoring.healthCheck.path != null) { - name = "${serviceName}-health"; - host = homelabCfg.hostname; - port = cfg.port; - path = cfg.monitoring.healthCheck.path; - protocol = "http"; - method = "GET"; - interval = "30s"; - timeout = "10s"; - conditions = cfg.monitoring.healthCheck.conditions; - group = "services"; - labels = - cfg.monitoring.extraLabels - // { - service = serviceName; - node = homelabCfg.hostname; - environment = homelabCfg.environment; - }; - enabled = true; - }) - ++ cfg.monitoring.healthCheck.extraChecks - ); - }; - }; -} diff --git a/modules/homelab/lib/features/proxy.nix b/modules/homelab/lib/features/proxy.nix deleted file mode 100644 index 595f4c0..0000000 --- a/modules/homelab/lib/features/proxy.nix +++ /dev/null @@ -1,69 +0,0 @@ -serviceName: { - config, - lib, - ... -}: -with lib; let - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; -in { - options.homelab.services.${serviceName}.proxy = { - enable = mkOption { - type = types.bool; - description = "Enable reverse proxy for ${serviceName}"; - default = true; - }; - - subdomain = mkOption { - type = types.str; - default = serviceName; - }; - - enableAuth = mkOption { - type = types.bool; - default = false; - }; - - additionalSubdomains = mkOption { - type = types.listOf (types.submodule { - options = { - subdomain = mkOption {type = types.str;}; - port = mkOption {type = types.port;}; - path = mkOption { - type = types.str; - default = "/"; - }; - enableAuth = mkOption { - type = types.bool; - default = false; - }; - }; - }); - default = []; - }; - }; - - config = mkIf cfg.enable { - homelab.reverseProxy.entries = mkIf cfg.proxy.enable ( - [ - { - subdomain = cfg.proxy.subdomain; - host = homelabCfg.hostname; - port = cfg.port; - path = "/"; - enableAuth = cfg.proxy.enableAuth; - enableSSL = true; - } - ] - ++ map (sub: { - subdomain = sub.subdomain; - host = homelabCfg.hostname; - port = sub.port; - path = sub.path; - enableAuth = sub.enableAuth; - enableSSL = true; - }) - cfg.proxy.additionalSubdomains - ); - }; -} diff --git a/modules/homelab/lib/systems/backups.nix b/modules/homelab/lib/systems/backups.nix deleted file mode 100644 index 49d10b7..0000000 --- a/modules/homelab/lib/systems/backups.nix +++ /dev/null @@ -1,163 +0,0 @@ -{ - config, - lib, - nodes, - ... -}: -with lib; let - cfg = config.homelab.backups; - homelabCfg = config.homelab; - hasNodes = length (attrNames nodes) > 0; - - # Get all defined backend names dynamically - backendNames = attrNames cfg.backends or {}; - - backupJobType = types.submodule { - options = { - name = mkOption { - type = types.str; - description = "Name of the backup job"; - }; - backend = mkOption { - type = types.enum backendNames; - description = "Backend to use for this backup job"; - }; - backendOptions = mkOption { - type = types.attrs; - default = {}; - description = "Backend-specific options to override or extend the backend configuration"; - }; - labels = mkOption { - type = types.attrsOf types.str; - default = {}; - description = "Additional labels for this backup job"; - }; - }; - }; - - # Local aggregation - localAggregation = { - allJobs = cfg.jobs; - allBackends = backendNames; - }; - - # Global aggregation - globalAggregation = let - baseAgg = import ../aggregators/base.nix {inherit lib;}; - - jobsAgg = baseAgg.aggregateFromNodes { - inherit nodes; - attributePath = "homelab.backups.allJobs"; - enhancer = job: - job - // { - _sourceNode = job._nodeName; - _backupId = "${job._nodeName}-${job.name}"; - _jobFqdn = "${job.name}.${job._nodeName}"; - }; - }; - - # Get all backends from all nodes - allBackendsFromNodes = let - backendConfigs = - mapAttrsToList ( - nodeName: nodeConfig: - attrByPath ["homelab" "backups" "backends"] {} nodeConfig.config - ) - nodes; - enabledBackends = flatten (map ( - backends: - filter (name: backends.${name} != null) (attrNames backends) - ) - backendConfigs); - in - unique enabledBackends; - in { - allJobs = jobsAgg.all; - allBackends = allBackendsFromNodes; - jobsByBackend = groupBy (j: j.backend) jobsAgg.all; - summary = { - total = length jobsAgg.all; - byBackend = jobsAgg.countBy (j: j.backend); - byNode = jobsAgg.countBy (j: j._nodeName); - uniqueBackends = unique (map (j: j.backend) jobsAgg.all); - }; - }; -in { - imports = [ - ../../backup/restic.nix - # ./backup/borgbackup.nix - ]; - - options.homelab.backups = { - enable = mkEnableOption "backup system"; - - jobs = mkOption { - type = types.listOf backupJobType; - default = []; - description = "Backup jobs to execute on this system"; - }; - - # Backend configurations (like your existing setup) - # backends = mkOption { - # type = types.attrs; - # default = {}; - # description = "Backup backend configurations"; - # }; - - defaultLabels = mkOption { - type = types.attrsOf types.str; - default = { - hostname = homelabCfg.hostname; - environment = homelabCfg.environment; - location = homelabCfg.location; - }; - description = "Default labels applied to all backup jobs"; - }; - - monitoring = mkOption { - type = types.bool; - default = true; - description = "Enable backup monitoring and metrics"; - }; - - # Always exposed aggregated data - allJobs = mkOption { - type = types.listOf types.attrs; - default = localAggregation.allJobs; - readOnly = true; - }; - - allBackends = mkOption { - type = types.listOf types.str; - default = localAggregation.allBackends; - readOnly = true; - }; - - global = mkOption { - type = types.attrs; - default = globalAggregation; - readOnly = true; - }; - }; - - config = mkIf cfg.enable { - # Validate that all job backends exist - assertions = [ - { - assertion = all (job: cfg.backends.${job.backend} != null) cfg.jobs; - message = "All backup jobs must reference backends that are defined and not null in homelab.backups.backends"; - } - ]; - - # Always expose both local and global - # homelab.backups = { - # allJobs = localAggregation.allJobs; - # allBackends = localAggregation.allBackends; - # global = - # if hasNodes - # then globalAggregation - # else {}; - # }; - }; -} diff --git a/modules/homelab/lib/systems/logging.nix b/modules/homelab/lib/systems/logging.nix deleted file mode 100644 index 710b990..0000000 --- a/modules/homelab/lib/systems/logging.nix +++ /dev/null @@ -1,209 +0,0 @@ -{ - config, - lib, - nodes, - ... -}: -with lib; let - cfg = config.homelab.logging; - homelabCfg = config.homelab; - hasNodes = length (attrNames nodes) > 0; - - # Local aggregation - localAggregation = { - allSources = - cfg.sources - ++ (optional cfg.promtail.enable { - name = "system-journal"; - type = "journal"; - journal.path = "/var/log/journal"; - labels = - cfg.defaultLabels - // { - component = "system"; - log_source = "journald"; - }; - enabled = true; - }); - }; - - # Global aggregation - globalAggregation = let - baseAgg = import ../aggregators/base.nix {inherit lib;}; - - sourcesAgg = baseAgg.aggregateFromNodes { - inherit nodes; - attributePath = "homelab.logging.allSources"; - enhancer = source: - source - // { - _sourceNode = source._nodeName; - _logId = "${source._nodeName}-${source.name}"; - }; - }; - in { - allSources = sourcesAgg.all; - sourcesByType = groupBy (s: s.type) sourcesAgg.all; - summary = { - total = length sourcesAgg.all; - byType = sourcesAgg.countBy (s: s.type); - byNode = sourcesAgg.countBy (s: s._nodeName); - }; - }; -in { - options.homelab.logging = { - enable = mkEnableOption "logging system"; - - promtail = { - enable = mkOption { - type = types.bool; - default = true; - }; - port = mkOption { - type = types.port; - default = 9080; - }; - clients = mkOption { - type = types.listOf (types.submodule { - options = { - url = mkOption {type = types.str;}; - tenant_id = mkOption { - type = types.nullOr types.str; - default = null; - }; - }; - }); - default = [{url = "http://monitor.${homelabCfg.domain}:3100/loki/api/v1/push";}]; - }; - }; - - sources = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption {type = types.str;}; - type = mkOption { - type = types.enum ["journal" "file" "syslog" "docker"]; - default = "file"; - }; - files = mkOption { - type = types.submodule { - options = { - paths = mkOption { - type = types.listOf types.str; - default = []; - }; - multiline = mkOption { - type = types.nullOr types.attrs; - default = null; - }; - }; - }; - default = {}; - }; - journal = mkOption { - type = types.submodule { - options = { - path = mkOption { - type = types.str; - default = "/var/log/journal"; - }; - }; - }; - default = {}; - }; - labels = mkOption { - type = types.attrsOf types.str; - default = {}; - }; - pipelineStages = mkOption { - type = types.listOf types.attrs; - default = []; - }; - enabled = mkOption { - type = types.bool; - default = true; - }; - }; - }); - default = []; - }; - - defaultLabels = mkOption { - type = types.attrsOf types.str; - default = { - hostname = homelabCfg.hostname; - environment = homelabCfg.environment; - location = homelabCfg.location; - }; - }; - - # Always exposed aggregated data - allSources = mkOption { - type = types.listOf types.attrs; - default = localAggregation.allSources; - readOnly = true; - }; - - global = mkOption { - type = types.attrs; - default = globalAggregation; - readOnly = true; - }; - }; - - config = mkIf cfg.enable { - # Local setup - services.promtail = mkIf cfg.promtail.enable { - enable = true; - configuration = { - server = { - http_listen_port = cfg.promtail.port; - grpc_listen_port = 0; - }; - positions.filename = "/var/lib/promtail/positions.yaml"; - clients = cfg.promtail.clients; - scrape_configs = map (source: - { - job_name = source.name; - static_configs = [ - { - targets = ["localhost"]; - labels = - cfg.defaultLabels - // source.labels - // ( - if source.type == "file" - then { - __path__ = concatStringsSep "," source.files.paths; - } - else {} - ); - } - ]; - # pipeline_stages = source.pipelineStages; - } - // ( - if source.type == "journal" - then { - journal = { - path = source.journal.path; - labels = cfg.defaultLabels // source.labels; - }; - } - else {} - )) - localAggregation.allSources; - }; - }; - - networking.firewall.allowedTCPPorts = optionals cfg.promtail.enable [cfg.promtail.port]; - - # homelab.logging = { - # allSources = localAggregation.allSources; - # global = - # if hasNodes - # then globalAggregation - # else {}; - # }; - }; -} diff --git a/modules/homelab/lib/systems/monitoring.nix b/modules/homelab/lib/systems/monitoring.nix deleted file mode 100644 index a44df40..0000000 --- a/modules/homelab/lib/systems/monitoring.nix +++ /dev/null @@ -1,222 +0,0 @@ -{ - config, - lib, - nodes, - ... -}: -with lib; let - cfg = config.homelab.monitoring; - homelabCfg = config.homelab; - hasNodes = length (attrNames nodes) > 0; - - # Local aggregation from this instance - localAggregation = { - # Metrics from manually configured + automatic node exporter - allMetrics = - cfg.metrics - ++ (optional cfg.nodeExporter.enable { - name = "node-exporter"; - host = homelabCfg.hostname; - port = cfg.nodeExporter.port; - path = "/metrics"; - jobName = "node"; - scrapeInterval = "30s"; - labels = { - instance = "${homelabCfg.hostname}.${homelabCfg.domain}"; - environment = homelabCfg.environment; - location = homelabCfg.location; - }; - }); - - allHealthChecks = cfg.healthChecks; - }; - - # Global aggregation from all nodes (when nodes available) - globalAggregation = let - baseAgg = import ../aggregators/base.nix {inherit lib;}; - - # Aggregate metrics from all nodes - metricsAgg = baseAgg.aggregateFromNodes { - inherit nodes; - attributePath = "homelab.monitoring.allMetrics"; - enhancer = endpoint: - endpoint - // { - _fullAddress = "${endpoint.host}:${toString endpoint.port}"; - _metricsUrl = "http://${endpoint.host}:${toString endpoint.port}${endpoint.path}"; - }; - }; - - # Aggregate health checks from all nodes - healthChecksAgg = baseAgg.aggregateFromNodes { - inherit nodes; - attributePath = "homelab.monitoring.allHealthChecks"; - enhancer = check: let - actualHost = check.host; - portPart = - if check.port != null - then ":${toString check.port}" - else ""; - url = "${check.protocol or "http"}://${actualHost}${portPart}${check.path}"; - in - check - // { - _actualHost = actualHost; - _url = url; - }; - }; - in { - allMetrics = metricsAgg.all; - allHealthChecks = healthChecksAgg.all; - - # Useful groupings for services - metricsByJobName = groupBy (m: m.jobName) metricsAgg.all; - healthChecksByGroup = groupBy (hc: hc.group or "default") healthChecksAgg.all; - - summary = { - totalMetrics = length metricsAgg.all; - totalHealthChecks = length healthChecksAgg.all; - nodesCovered = unique (map (m: m._nodeName or m.host) metricsAgg.all); - }; - }; -in { - # Instance-level monitoring options - options.homelab.monitoring = { - enable = mkEnableOption "monitoring system"; - - # Node exporter (automatically enabled) - nodeExporter = { - enable = mkOption { - type = types.bool; - default = true; - }; - port = mkOption { - type = types.port; - default = 9100; - }; - }; - - # Manual metrics (in addition to service auto-registration) - metrics = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption {type = types.str;}; - host = mkOption { - type = types.str; - default = homelabCfg.hostname; - }; - port = mkOption {type = types.port;}; - path = mkOption { - type = types.str; - default = "/metrics"; - }; - jobName = mkOption {type = types.str;}; - scrapeInterval = mkOption { - type = types.str; - default = "30s"; - }; - labels = mkOption { - type = types.attrsOf types.str; - default = {}; - }; - }; - }); - default = []; - }; - - # Manual health checks (in addition to service auto-registration) - healthChecks = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption {type = types.str;}; - host = mkOption { - type = types.str; - default = homelabCfg.hostname; - }; - port = mkOption { - type = types.nullOr types.port; - default = null; - }; - path = mkOption { - type = types.str; - default = "/"; - }; - protocol = mkOption { - type = types.enum ["http" "https" "tcp" "icmp"]; - default = "http"; - }; - method = mkOption { - type = types.str; - default = "GET"; - }; - interval = mkOption { - type = types.str; - default = "30s"; - }; - timeout = mkOption { - type = types.str; - default = "10s"; - }; - conditions = mkOption { - type = types.listOf types.str; - default = ["[STATUS] == 200"]; - }; - group = mkOption { - type = types.str; - default = "manual"; - }; - labels = mkOption { - type = types.attrsOf types.str; - default = {}; - }; - enabled = mkOption { - type = types.bool; - default = true; - }; - }; - }); - default = []; - }; - - # Read-only aggregated data (always exposed) - allMetrics = mkOption { - type = types.listOf types.attrs; - default = localAggregation.allMetrics; - readOnly = true; - }; - - allHealthChecks = mkOption { - type = types.listOf types.attrs; - default = localAggregation.allHealthChecks; - readOnly = true; - }; - - # Global aggregation (always available, empty if no nodes) - global = mkOption { - type = types.attrs; - default = globalAggregation; - readOnly = true; - }; - }; - - # Configuration - always includes both local and global - config = mkIf cfg.enable { - # Basic instance setup - services.prometheus.exporters.node = mkIf cfg.nodeExporter.enable { - enable = true; - port = cfg.nodeExporter.port; - enabledCollectors = ["systemd" "textfile" "filesystem" "loadavg" "meminfo" "netdev" "stat"]; - }; - - networking.firewall.allowedTCPPorts = optionals cfg.nodeExporter.enable [cfg.nodeExporter.port]; - - # homelab.monitoring = { - # allMetrics = localAggregation.allMetrics; - # allHealthChecks = localAggregation.allHealthChecks; - # global = - # if hasNodes - # then globalAggregation - # else {}; - # }; - }; -} diff --git a/modules/homelab/lib/systems/proxy.nix b/modules/homelab/lib/systems/proxy.nix deleted file mode 100644 index c698fdd..0000000 --- a/modules/homelab/lib/systems/proxy.nix +++ /dev/null @@ -1,98 +0,0 @@ -{ - config, - lib, - nodes, - ... -}: -with lib; let - cfg = config.homelab.reverseProxy; - homelabCfg = config.homelab; - hasNodes = length (attrNames nodes) > 0; - - # Local aggregation - localAggregation = { - allEntries = cfg.entries; - }; - - # Global aggregation - globalAggregation = let - baseAgg = import ../aggregators/base.nix {inherit lib;}; - - entriesAgg = baseAgg.aggregateFromNodes { - inherit nodes; - attributePath = "homelab.reverseProxy.allEntries"; - enhancer = entry: - entry - // { - _upstream = "http://${entry.host}:${toString entry.port}"; - _fqdn = "${entry.subdomain}.${entry._nodeConfig.config.homelab.externalDomain or homelabCfg.externalDomain}"; - _internal = "${entry.host}:${toString entry.port}"; - }; - }; - in { - allEntries = entriesAgg.all; - entriesBySubdomain = groupBy (e: e.subdomain) entriesAgg.all; - entriesWithAuth = entriesAgg.filterBy (e: e.enableAuth or false); - entriesWithoutAuth = entriesAgg.filterBy (e: !(e.enableAuth or false)); - summary = { - total = length entriesAgg.all; - byNode = entriesAgg.countBy (e: e._nodeName); - withAuth = length (entriesAgg.filterBy (e: e.enableAuth or false)); - withoutAuth = length (entriesAgg.filterBy (e: !(e.enableAuth or false))); - }; - }; -in { - options.homelab.reverseProxy = { - enable = mkEnableOption "reverse proxy system"; - - entries = mkOption { - type = types.listOf (types.submodule { - options = { - subdomain = mkOption {type = types.str;}; - host = mkOption { - type = types.str; - default = homelabCfg.hostname; - }; - port = mkOption {type = types.port;}; - path = mkOption { - type = types.str; - default = "/"; - }; - enableAuth = mkOption { - type = types.bool; - default = false; - }; - enableSSL = mkOption { - type = types.bool; - default = true; - }; - }; - }); - default = []; - }; - - # Always exposed aggregated data - allEntries = mkOption { - type = types.listOf types.attrs; - default = localAggregation.allEntries; - readOnly = true; - }; - - global = mkOption { - type = types.attrs; - default = globalAggregation; - readOnly = true; - }; - }; - - config = mkIf cfg.enable { - # Always expose both local and global - # homelab.reverseProxy = { - # allEntries = localAggregation.allEntries; - # global = - # if hasNodes - # then globalAggregation - # else {}; - # }; - }; -} diff --git a/modules/homelab/motd/default.nix b/modules/homelab/motd/default.nix deleted file mode 100644 index b5f3bb7..0000000 --- a/modules/homelab/motd/default.nix +++ /dev/null @@ -1,397 +0,0 @@ -# modules/motd/default.nix -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab.motd; - - homelab-motd = pkgs.writeShellScriptBin "homelab-motd" '' - #! /usr/bin/env bash - - # Colors for output - RED="\e[31m" - GREEN="\e[32m" - YELLOW="\e[33m" - BLUE='\e[0;34m' - CYAN='\e[0;36m' - WHITE='\e[1;37m' - NC='\e[0m' # No Color - BOLD='\e[1m' - - # Helper functions - print_header() { - echo -e "''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC}''${WHITE} 🏠 $(hostname -s) HOMELAB ''${NC}''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}╚══════════════════════════════════════════════════════════════╝''${NC}" - } - - print_section() { - echo -e "\n''${BOLD}''${CYAN}▶ $1''${NC}" - echo -e "''${CYAN}─────────────────────────────────────────────────────────────''${NC}" - } - - get_service_status() { - local service="$1" - if ${pkgs.systemd}/bin/systemctl is-active --quiet "$service" 2>/dev/null; then - echo -e "''${GREEN}●''${NC}" - elif ${pkgs.systemd}/bin/systemctl is-enabled --quiet "$service" 2>/dev/null; then - echo -e "''${YELLOW}○''${NC}" - else - echo -e "''${RED}×''${NC}" - fi - } - - check_backup_issues() { - local issues=0 - # Check for failed backup services in the last 24 hours - if ${pkgs.systemd}/bin/journalctl --since "24 hours ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | grep -q .; then - issues=$((issues + 1)) - fi - - # Check for failed backup timers - local failed_timers=$(${pkgs.systemd}/bin/systemctl list-timers --failed --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)" | wc -l) - issues=$((issues + failed_timers)) - - echo $issues - } - - # Main script - ${optionalString cfg.clearScreen "clear"} - print_header - - # System info - print_section "SYSTEM" - echo -e " ''${BOLD}Uptime:''${NC} $(${pkgs.procps}/bin/uptime -p | sed 's/up //')" - echo -e " ''${BOLD}Load:''${NC} $(${pkgs.procps}/bin/uptime | awk -F'load average:' '{print $2}' | xargs)" - echo -e " ''${BOLD}Memory:''${NC} $(${pkgs.procps}/bin/free -h | awk '/^Mem:/ {printf "%s/%s", $3, $2}')" - echo -e " ''${BOLD}Disk:''${NC} $(${pkgs.coreutils}/bin/df -h / | awk 'NR==2 {printf "%s/%s (%s)", $3, $2, $5}')" - - ${optionalString cfg.showServices '' - # Local homelab services (auto-detected + manual) - print_section "HOMELAB SERVICES" - - # Auto-detect services from homelab configuration - ${optionalString (config.homelab.services.gatus.enable or false) '' - status=$(get_service_status "gatus") - printf " %-20s %b %s\n" "gatus" "$status" "Uptime monitoring" - ''} - - ${optionalString (config.homelab.services.prometheus.enable or false) '' - status=$(get_service_status "prometheus") - printf " %-20s %b %s\n" "prometheus" "$status" "Metrics collection" - ''} - - ${optionalString (config.homelab.services.grafana.enable or false) '' - status=$(get_service_status "grafana") - printf " %-20s %b %s\n" "grafana" "$status" "Monitoring dashboard" - ''} - - ${optionalString (config.homelab.services.alertmanager.enable or false) '' - status=$(get_service_status "alertmanager") - printf " %-20s %b %s\n" "alertmanager" "$status" "Alert routing" - ''} - - ${optionalString (config.services.nginx.enable or false) '' - status=$(get_service_status "nginx") - printf " %-20s %b %s\n" "nginx" "$status" "Web server/proxy" - ''} - - ${optionalString (config.services.postgresql.enable or false) '' - status=$(get_service_status "postgresql") - printf " %-20s %b %s\n" "postgresql" "$status" "Database server" - ''} - - ${optionalString (config.services.redis.server.enable or false) '' - status=$(get_service_status "redis") - printf " %-20s %b %s\n" "redis" "$status" "Key-value store" - ''} - - # Manual services from configuration - ${concatStringsSep "\n" (mapAttrsToList (name: service: '' - status=$(get_service_status "${service.systemdService}") - printf " %-20s %b %s\n" "${name}" "$status" "${service.description}" - '') - cfg.services)} - - # Show legend - echo -e "\n ''${GREEN}●''${NC} Active ''${YELLOW}○''${NC} Inactive ''${RED}×''${NC} Disabled" - ''} - - # Quick backup check - backup_issues=$(check_backup_issues) - if [[ $backup_issues -gt 0 ]]; then - echo -e "\n''${BOLD}''${RED}⚠ WARNING: $backup_issues backup issues detected!''${NC}" - echo -e " Run ''${BOLD}homelab-backup-status''${NC} for details" - fi - - # Recent critical issues - error_count=$(${pkgs.systemd}/bin/journalctl --since "24 hours ago" --priority=err --no-pager -q 2>/dev/null | wc -l || echo 0) - if [[ "$error_count" -gt 0 ]]; then - echo -e "\n''${BOLD}''${YELLOW}⚠ $error_count system errors in last 24h''${NC}" - echo -e " Run ''${BOLD}journalctl --priority=err --since='24 hours ago' ''${NC} for details" - fi - - # Helpful commands - echo -e "\n''${BOLD}''${BLUE}╔══════════════════════════════════════════════════════════════╗''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC} ''${WHITE}Useful commands: ''${NC}''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC} ''${CYAN}homelab-monitor-status''${NC} - Monitoring overview ''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC} ''${CYAN}homelab-backup-status''${NC} - Backup jobs status ''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC} ''${CYAN}homelab-proxy-status''${NC} - Reverse proxy entries ''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}║''${NC} ''${CYAN}systemctl status ''${NC} - Check specific service ''${BOLD}''${BLUE}║''${NC}" - echo -e "''${BOLD}''${BLUE}╚══════════════════════════════════════════════════════════════╝''${NC}" - echo - ''; - - # Helper script for monitoring status - homelab-monitor-status = pkgs.writeShellScriptBin "homelab-monitor-status" '' - #! /usr/bin/env bash - - # Colors - RED="\e[31m" - GREEN="\e[32m" - YELLOW="\e[33m" - BLUE='\e[0;34m' - CYAN='\e[0;36m' - WHITE='\e[1;37m' - NC='\e[0m' - BOLD='\e[1m' - - CONFIG_FILE="/etc/homelab/config.json" - if [[ ! -f "$CONFIG_FILE" ]]; then - echo -e "''${RED}❌ Global homelab configuration not found''${NC}" - exit 1 - fi - - echo -e "''${BOLD}''${BLUE}📊 Homelab Monitoring Status''${NC}" - echo -e "''${BLUE}=============================''${NC}" - - # Show metrics endpoints - echo -e "\n''${BOLD}''${CYAN}Metrics Endpoints:''${NC}" - metrics_count=$(${pkgs.jq}/bin/jq '.monitoring.metrics | length' "$CONFIG_FILE" 2>/dev/null || echo 0) - if [[ $metrics_count -gt 0 ]]; then - ${pkgs.jq}/bin/jq -r '.monitoring.metrics[]? | " ''${GREEN}●''${NC} \(.name): ''${BOLD}\(.host):\(.port)''${NC}\(.path) ''${YELLOW}(job: \(.jobName))''${NC}"' "$CONFIG_FILE" 2>/dev/null - echo -e "\n ''${BOLD}Total: ''${metrics_count} endpoints''${NC}" - else - echo -e " ''${YELLOW}No metrics endpoints configured''${NC}" - fi - - # Show health checks by group - echo -e "\n''${BOLD}''${CYAN}Health Checks:''${NC}" - health_count=$(${pkgs.jq}/bin/jq '.monitoring.healthChecks | length' "$CONFIG_FILE" 2>/dev/null || echo 0) - if [[ $health_count -gt 0 ]]; then - # Group health checks - ${pkgs.jq}/bin/jq -r ' - .monitoring.healthChecks | - group_by(.group // "default") | - .[] | - "''${BOLD} \(.[0].group // "default" | ascii_upcase) Group:''${NC}" as $header | - ($header, ( - .[] | - " ''${if .enabled // true then "''${GREEN}●" else "''${YELLOW}○" end}''${NC} \(.name): ''${BOLD}\(.protocol)://\(.host)\(if .port then ":\(.port)" else "" end)''${NC}\(.path)" - )) - ' "$CONFIG_FILE" 2>/dev/null - echo -e "\n ''${BOLD}Total: ''${health_count} health checks''${NC}" - else - echo -e " ''${YELLOW}No health checks configured''${NC}" - fi - - echo -e "\n''${CYAN}Run ''${BOLD}homelab-proxy-status''${NC}''${CYAN} and ''${BOLD}homelab-backup-status''${NC}''${CYAN} for more details.''${NC}" - ''; - - # Helper script for backup status - homelab-backup-status = pkgs.writeShellScriptBin "homelab-backup-status" '' - #! /usr/bin/env bash - - # Colors - RED="\e[31m" - GREEN="\e[32m" - YELLOW="\e[33m" - BLUE='\e[0;34m' - CYAN='\e[0;36m' - WHITE='\e[1;37m' - NC='\e[0m' - BOLD='\e[1m' - - echo -e "''${BOLD}''${BLUE}💾 Backup Status''${NC}" - echo -e "''${BLUE}===============''${NC}" - - # Check backup timers - echo -e "\n''${BOLD}''${CYAN}Backup Timers:''${NC}" - backup_timers=$(${pkgs.systemd}/bin/systemctl list-timers --no-pager --no-legend 2>/dev/null | grep -E "(backup|restic|borgbackup)") - if [[ -n "$backup_timers" ]]; then - while IFS= read -r line; do - if [[ -n "$line" ]]; then - next=$(echo "$line" | awk '{print $1, $2}') - left=$(echo "$line" | awk '{print $3}') - timer=$(echo "$line" | awk '{print $5}') - service=$(echo "$line" | awk '{print $6}') - - # Color code based on time left - if [[ "$left" == "n/a" ]]; then - color="''${RED}" - status="●" - elif echo "$left" | grep -qE "(sec|min|[0-9]h)"; then - color="''${YELLOW}" - status="●" - else - color="''${GREEN}" - status="●" - fi - - printf " %b%s%b %-25s Next: %s (%s)\n" "$color" "$status" "$NC" "$(basename "$timer" .timer)" "$next" "$left" - fi - done <<< "$backup_timers" - else - echo -e " ''${YELLOW}No backup timers found''${NC}" - fi - - # Check recent backup activity (last 3 days, summarized) - echo -e "\n''${BOLD}''${CYAN}Recent Activity (3 days):''${NC}" - - # Count successful vs failed backups - success_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --no-pager -q 2>/dev/null | grep -iE "(completed|success|finished)" | wc -l) - error_count=$(${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager -q 2>/dev/null | wc -l) - - if [[ $success_count -gt 0 ]]; then - echo -e " ''${GREEN}✅ $success_count successful backups''${NC}" - fi - if [[ $error_count -gt 0 ]]; then - echo -e " ''${RED}❌ $error_count failed backups''${NC}" - echo -e "\n''${BOLD}''${RED}Recent Failures:''${NC}" - ${pkgs.systemd}/bin/journalctl --since "3 days ago" --unit="*backup*" --unit="restic*" --unit="borgbackup*" --priority=err --no-pager --lines=3 2>/dev/null | while read -r line; do - # Extract just the important parts - timestamp=$(echo "$line" | awk '{print $1, $2, $3}') - service=$(echo "$line" | grep -oE "(restic-backups-[^[]+|borgbackup-job-[^[]+|[^[]*backup[^[]*)" | head -1) - message=$(echo "$line" | sed -E 's/.*\]: //' | cut -c1-60) - echo -e " ''${YELLOW}$timestamp''${NC} ''${BOLD}$service''${NC}: $message..." - done - elif [[ $success_count -eq 0 ]]; then - echo -e " ''${YELLOW}⚠️ No backup activity in last 3 days''${NC}" - else - echo -e " ''${GREEN}✅ All backups completed successfully''${NC}" - fi - - # Show backup summary from global config if available - CONFIG_FILE="/etc/homelab/config.json" - if [[ -f "$CONFIG_FILE" ]]; then - total_jobs=$(${pkgs.jq}/bin/jq -r '.backups.summary.totalJobs // 0' "$CONFIG_FILE" 2>/dev/null) - backends=$(${pkgs.jq}/bin/jq -r '.backups.summary.backendsInUse[]?' "$CONFIG_FILE" 2>/dev/null | tr '\n' ' ') - - if [[ $total_jobs -gt 0 ]]; then - echo -e "\n''${BOLD}''${CYAN}Configuration:''${NC}" - echo -e " ''${BOLD}Total jobs:''${NC} $total_jobs" - if [[ -n "$backends" ]]; then - echo -e " ''${BOLD}Backends:''${NC} $backends" - fi - fi - fi - ''; - - # Helper script for proxy status - homelab-proxy-status = pkgs.writeShellScriptBin "homelab-proxy-status" '' - #! /usr/bin/env bash - - # Colors - RED="\e[31m" - GREEN="\e[32m" - YELLOW="\e[33m" - BLUE='\e[0;34m' - CYAN='\e[0;36m' - WHITE='\e[1;37m' - NC='\e[0m' - BOLD='\e[1m' - - CONFIG_FILE="/etc/homelab/config.json" - if [[ ! -f "$CONFIG_FILE" ]]; then - echo -e "''${RED}❌ Global homelab configuration not found''${NC}" - exit 1 - fi - - echo -e "''${BOLD}''${BLUE}🔗 Reverse Proxy Status''${NC}" - echo -e "''${BLUE}======================''${NC}" - - proxy_count=$(${pkgs.jq}/bin/jq '.reverseProxy.entries | length' "$CONFIG_FILE" 2>/dev/null || echo 0) - if [[ $proxy_count -gt 0 ]]; then - ${pkgs.jq}/bin/jq -r '.reverseProxy.entries[]? | - " ''${GREEN}●''${NC} ''${BOLD}\(.subdomain)''${NC}: \(.externalHost) → \(.internalHost)\(if .enableAuth then " ''${YELLOW}🔐''${NC}" else "" end)\(if .enableSSL then " ''${GREEN}🔒''${NC}" else "" end)"' "$CONFIG_FILE" 2>/dev/null - - echo -e "\n''${BOLD}Legend:''${NC} ''${YELLOW}🔐''${NC} Auth enabled, ''${GREEN}🔒''${NC} SSL enabled" - echo -e "''${BOLD}Total: ''${proxy_count} proxy entries''${NC}" - else - echo -e " ''${YELLOW}No proxy entries configured''${NC}" - fi - ''; -in { - options.homelab.motd = { - enable = mkEnableOption "Simple homelab MOTD"; - - clearScreen = mkOption { - type = types.bool; - default = true; - description = "Clear screen before showing MOTD"; - }; - - showServices = mkOption { - type = types.bool; - default = true; - description = "Show local homelab services status"; - }; - - services = mkOption { - type = types.attrsOf (types.submodule { - options = { - systemdService = mkOption { - type = types.str; - description = "Name of the systemd service to monitor"; - }; - description = mkOption { - type = types.str; - default = ""; - description = "Human-readable description of the service"; - }; - }; - }); - default = {}; - description = "Local homelab services to show in MOTD"; - example = literalExpression '' - { - "nginx" = { - systemdService = "nginx"; - description = "Web server"; - }; - "grafana" = { - systemdService = "grafana"; - description = "Monitoring dashboard"; - }; - } - ''; - }; - }; - - config = mkIf cfg.enable { - # Create helper commands - environment.systemPackages = with pkgs; [ - jq - homelab-motd - homelab-monitor-status - homelab-backup-status - homelab-proxy-status - ]; - - # Set up MOTD to run on login - programs.bash.interactiveShellInit = '' - # Run homelab MOTD on interactive login (only once per session) - if [[ $- == *i* ]] && [[ -z "$MOTD_SHOWN" ]] && [[ -n "$SSH_CONNECTION" || "$TERM" == "linux" ]]; then - export MOTD_SHOWN=1 - ${homelab-motd}/bin/homelab-motd - fi - ''; - - # Disable default MOTD - users.motd = mkDefault ""; - security.pam.services.login.showMotd = mkDefault false; - }; -} diff --git a/modules/homelab/services/alertmanager.nix b/modules/homelab/services/alertmanager.nix deleted file mode 100644 index 43c9636..0000000 --- a/modules/homelab/services/alertmanager.nix +++ /dev/null @@ -1,162 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceName = "alertmanager"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Vault Warden"; - - description = mkOption { - type = types.str; - default = "Vault Warden"; - }; - - port = mkOption { - type = types.port; - default = 9093; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Whether to open the ports specified in `port` and `webPort` in the firewall. - ''; - }; - - environmentFile = lib.mkOption { - type = with lib.types; nullOr path; - default = null; - example = "/var/lib/vaultwarden.env"; - description = '' - Additional environment file as defined in {manpage}`systemd.exec(5)`. - - Secrets like {env}`ADMIN_TOKEN` and {env}`SMTP_PASSWORD` - should be passed to the service without adding them to the world-readable Nix store. - - Note that this file needs to be available on the host on which `vaultwarden` is running. - - As a concrete example, to make the Admin UI available (from which new users can be invited initially), - the secret {env}`ADMIN_TOKEN` needs to be defined as described - [here](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page): - - ``` - # Admin secret token, see - # https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page - ADMIN_TOKEN=...copy-paste a unique generated secret token here... - ``` - ''; - }; - - systemdServices = mkOption { - type = types.listOf types.str; - default = [ - "vaultwarden.service" - "vaultwarden" - ]; - description = "Systemd services to monitor"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - { - services.prometheus.alertmanager = { - enable = true; - openFirewall = cfg.openFirewall; - - environmentFile = alertmanagerEnv; - - webExternalUrl = "http://monitor.lab:9093"; # optional but helpful - configuration = { - route = { - receiver = "null"; - group_by = ["alertname"]; - group_wait = "10s"; - group_interval = "5m"; - repeat_interval = "4h"; - - routes = [ - { - receiver = "telegram"; - matchers = [ - "severity =~ \"warning|critical\"" - ]; - group_wait = "10s"; - continue = true; - } - ]; - }; - - receivers = [ - {name = "null";} - { - name = "telegram"; - telegram_configs = [ - { - api_url = "https://api.telegram.org"; - bot_token = "$TELEGRAM_BOT_TOKEN"; - chat_id = -1002642560007; - message_thread_id = 4; - parse_mode = "HTML"; - send_resolved = true; - message = "{{ template \"telegram.message\". }}"; - } - ]; - } - ]; - - templates = [ - (pkgs.writeText "telegram.tmpl" (builtins.readFile ./provisioning/templates/telegram.tmpl)) - # (pkgs.writeText "telegram.markdown.v2.tmpl" (builtins.readFile ./provisioning/templates/telegram.markdown.v2.tmpl)) - ]; - }; - }; - - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - } - { - homelab.services.${serviceName}.monitoring = { - metrics.path = "/metrics"; - - healthCheck.path = "/healthz"; - healthCheck.conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - - extraLabels = { - component = "example"; - }; - }; - } - { - # homelab.services.${serviceName}.logging = { - # files = ["/var/log/example/log.log"]; - # # parsing = { - # # regex = "^ts=(?P[^ ]+) caller=(?P[^ ]+) level=(?P\\w+) msg=\"(?P[^\"]*)\""; - # # extractFields = ["level" "caller"]; - # # }; - # extraLabels = { - # component = "example"; - # application = "example"; - # }; - # }; - } - { - homelab.services.${serviceName}.proxy = { - enableAuth = true; - }; - } - ]); -} diff --git a/modules/homelab/services/caddy.nix b/modules/homelab/services/caddy.nix deleted file mode 100644 index a3f6b6c..0000000 --- a/modules/homelab/services/caddy.nix +++ /dev/null @@ -1,96 +0,0 @@ -{ - config, - lib, - ... -}: -with lib; let - serviceName = "caddy"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; - - allProxyEntries = homelabCfg.reverseProxy.global.allEntries; - generateVirtualHosts = entries: - listToAttrs (map (entry: { - name = entry._fqdn; - value = { - extraConfig = '' - reverse_proxy ${entry._upstream} - ''; - }; - }) - entries); -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Caddy web server"; - - description = mkOption { - type = types.str; - default = "Caddy web server"; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Whether to open the ports specified in `port` and `webPort` in the firewall. - ''; - }; - - systemdServices = mkOption { - type = types.listOf types.str; - default = [ - "caddy.service" - "caddy" - ]; - description = "Systemd services to monitor"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - { - services.caddy = { - enable = true; - - virtualHosts = generateVirtualHosts allProxyEntries; - }; - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [80 443]; - } - { - # homelab.services.${serviceName}.monitoring = { - # metrics.path = "/metrics"; - - # healthCheck.path = "/healthz"; - # healthCheck.conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - - # extraLabels = { - # component = "example"; - # }; - # }; - } - { - # homelab.services.${serviceName}.logging = { - # files = ["/var/log/example/log.log"]; - # # parsing = { - # # regex = "^ts=(?P[^ ]+) caller=(?P[^ ]+) level=(?P\\w+) msg=\"(?P[^\"]*)\""; - # # extractFields = ["level" "caller"]; - # # }; - # extraLabels = { - # component = "example"; - # application = "example"; - # }; - # }; - } - { - # homelab.services.${serviceName}.proxy = { - # enableAuth = true; - # }; - } - ]); -} diff --git a/modules/homelab/services/default.nix b/modules/homelab/services/default.nix deleted file mode 100644 index e9878cc..0000000 --- a/modules/homelab/services/default.nix +++ /dev/null @@ -1,29 +0,0 @@ -{ - imports = [ - ./minio.nix - ./gatus.nix - ./prometheus.nix - ./grafana.nix - ./example.nix - ./vaultwarden.nix - # ./monitoring/loki.nix - # - # - # TODO - # - # ./alertmanager.nix - # ./dnsmasq.nix - # ./authelia.nix - # ./lldap.nix - # ./roundcube.nix - # ./mailserver.nix - ./caddy.nix - # ./traefik.nix - # ./ente-photos.nix - # ./forgejo.nix - # ./forgejo-runner.nix - # ./jellyfin.nix - # ./arr.nix - # - ]; -} diff --git a/modules/homelab/services/example.nix b/modules/homelab/services/example.nix deleted file mode 100644 index 0fdf20a..0000000 --- a/modules/homelab/services/example.nix +++ /dev/null @@ -1,86 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceName = "example"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Example Homelab Service"; - - description = mkOption { - type = types.str; - default = "Example Homelab Service"; - }; - - port = mkOption { - type = types.port; - default = 1234; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Whether to open the ports specified in `port` and `webPort` in the firewall. - ''; - }; - - systemdServices = mkOption { - type = types.listOf types.str; - default = [ - "example.service" - "example" - ]; - description = "Systemd services to monitor"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - { - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - } - { - homelab.services.${serviceName}.monitoring = { - metrics.path = "/metrics"; - - healthCheck.path = "/healthz"; - healthCheck.conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - - extraLabels = { - component = "example"; - }; - }; - } - { - homelab.services.${serviceName}.logging = { - files = ["/var/log/example/log.log"]; - # parsing = { - # regex = "^ts=(?P[^ ]+) caller=(?P[^ ]+) level=(?P\\w+) msg=\"(?P[^\"]*)\""; - # extractFields = ["level" "caller"]; - # }; - extraLabels = { - component = "example"; - application = "example"; - }; - }; - } - { - homelab.services.${serviceName}.proxy = { - enableAuth = true; - }; - } - ]); -} diff --git a/modules/homelab/services/gatus.nix b/modules/homelab/services/gatus.nix deleted file mode 100644 index 3bdd610..0000000 --- a/modules/homelab/services/gatus.nix +++ /dev/null @@ -1,266 +0,0 @@ -{ - config, - lib, - ... -}: -with lib; let - serviceName = "gatus"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; - - # Convert homelab health checks to Gatus format - formatHealthCheck = check: let - # Build the URL based on the health check configuration - url = check._url or "http://${check.host}:${toString (check.port or 80)}${check.path}"; - - # Convert conditions to Gatus format (they should already be compatible) - conditions = check.conditions or ["[STATUS] == 200"]; - - # Convert alerts to Gatus format - alerts = map (alert: { - inherit (alert) type enabled; - failure-threshold = alert.failure-threshold or 3; - success-threshold = alert.success-threshold or 2; - description = "Health check alert for ${check.name}"; - }) (check.alerts or []); - in { - name = check.name; - group = check.group or "default"; - url = url; - interval = check.interval or "30s"; - - # Add method and headers for HTTP/HTTPS checks - method = - if (check.protocol == "http" || check.protocol == "https") - then check.method or "GET" - else null; - - conditions = conditions; - - # Add timeout - client = { - timeout = check.timeout or "10s"; - }; - - # Add alerts if configured - alerts = - if alerts != [] - then alerts - else []; - - # Add labels for UI organization - ui = { - hide-hostname = false; - hide-url = false; - description = "Health check for ${check.name} on ${check.host or check._actualHost or "unknown"}"; - }; - }; - - # Generate Gatus configuration from aggregated health checks - gatusConfig = - recursiveUpdate { - # Global Gatus settings - alerting = mkIf (cfg.alerting != {}) cfg.alerting; - - web = { - address = cfg.web.address; - port = cfg.port; - }; - - # Enable metrics - metrics = cfg.monitoring.enable; - - ui = { - title = cfg.ui.title; - header = cfg.ui.header; - link = cfg.ui.link; - buttons = cfg.ui.buttons; - }; - - storage = cfg.storage; - - # Convert all enabled health checks from the fleet to Gatus endpoints - endpoints = let - # Get all health checks - try global first, fallback to local - allHealthChecks = homelabCfg.monitoring.global.allHealthChecks - or homelabCfg.monitoring.allHealthChecks - or []; - - # Filter only enabled health checks - enabledHealthChecks = filter (check: check.enabled or true) allHealthChecks; - - # Convert to Gatus format - gatusEndpoints = map formatHealthCheck enabledHealthChecks; - in - gatusEndpoints; - } - cfg.extraConfig; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Gatus Status Page"; - - port = mkOption { - type = types.port; - default = 8080; - }; - - description = mkOption { - type = types.str; - default = "Gatus Status Page"; - }; - - # Gatus-specific options - ui = { - title = mkOption { - type = types.str; - default = "Homelab Status"; - description = "Title for the Gatus web interface"; - }; - - header = mkOption { - type = types.str; - default = "Homelab Services Status"; - description = "Header text for the Gatus interface"; - }; - - link = mkOption { - type = types.str; - default = "https://status.${homelabCfg.externalDomain}"; - description = "Link in the Gatus header"; - }; - - buttons = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption {type = types.str;}; - link = mkOption {type = types.str;}; - }; - }); - default = [ - { - name = "Grafana"; - link = "https://grafana.${homelabCfg.externalDomain}"; - } - { - name = "Prometheus"; - link = "https://prometheus.${homelabCfg.externalDomain}"; - } - ]; - description = "Navigation buttons in the Gatus interface"; - }; - }; - - alerting = mkOption { - type = types.attrs; - default = {}; - description = "Gatus alerting configuration"; - example = literalExpression '' - { - discord = { - webhook-url = "https://discord.com/api/webhooks/..."; - default-alert = { - enabled = true; - description = "Health check failed"; - failure-threshold = 3; - success-threshold = 2; - }; - }; - } - ''; - }; - - storage = mkOption { - type = types.attrs; - default = { - type = "memory"; - }; - description = "Gatus storage configuration"; - example = literalExpression '' - { - type = "postgres"; - path = "postgres://user:password@localhost/gatus?sslmode=disable"; - } - ''; - }; - - web = { - address = mkOption { - type = types.str; - default = "0.0.0.0"; - description = "Web interface bind address"; - }; - }; - - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional Gatus configuration options"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - # Core Gatus service - { - services.gatus = { - enable = true; - settings = gatusConfig; - }; - - networking.firewall.allowedTCPPorts = [cfg.port]; - - homelab.services.${serviceName}.monitoring.enable = mkDefault true; - } - - { - homelab.services.${serviceName}.monitoring = mkDefault { - metrics = { - path = "/metrics"; - extraEndpoints = []; - }; - healthCheck = { - path = "/health"; - conditions = [ - "[STATUS] == 200" - "[BODY].status == UP" - "[RESPONSE_TIME] < 1000" - ]; - extraChecks = []; - }; - extraLabels = { - component = "status-monitoring"; - tier = "monitoring"; - }; - }; - } - - { - homelab.services.${serviceName}.logging = mkDefault { - files = ["/var/log/gatus/gatus.log"]; - parsing = { - # Gatus log format: 2024-01-01T12:00:00Z [INFO] message - regex = "^(?P\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z) \\[(?P\\w+)\\] (?P.*)"; - extractFields = ["level"]; - }; - extraLabels = { - component = "status-monitoring"; - application = "gatus"; - }; - }; - } - - { - homelab.services.${serviceName}.proxy = mkDefault { - subdomain = "status"; - enableAuth = false; # Status page should be public - }; - } - ]); -} diff --git a/modules/homelab/services/grafana.nix b/modules/homelab/services/grafana.nix deleted file mode 100644 index 2663cc9..0000000 --- a/modules/homelab/services/grafana.nix +++ /dev/null @@ -1,84 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceName = "grafana"; - cfg = config.homelab.services.${serviceName}; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Grafana Dashboard"; - - port = mkOption { - type = types.port; - default = 3000; - }; - - description = mkOption { - type = types.str; - default = "Grafana Metrics Dashboard"; - }; - }; - - config = mkIf cfg.enable (mkMerge [ - # Core Grafana service - { - services.grafana = { - enable = true; - settings.server = { - http_port = cfg.port; - http_addr = "0.0.0.0"; - }; - }; - - networking.firewall.allowedTCPPorts = [cfg.port]; - - homelab.services.${serviceName}.monitoring.enable = mkDefault true; - } - - # Smart defaults for Grafana - { - # Grafana-specific log setup - homelab.services.${serviceName}.logging = mkDefault { - files = ["/var/log/grafana/grafana.log"]; - parsing = { - # Grafana log format: t=2024-01-01T12:00:00Z lvl=info msg="message" - regex = "^t=(?P[^ ]+) lvl=(?P\\w+) msg=\"(?P[^\"]*)\""; - extractFields = ["level"]; - }; - extraLabels = { - application = "grafana"; - component = "dashboard"; - }; - }; - } - { - homelab.services.${serviceName}.monitoring = mkDefault { - metrics.path = "/metrics"; - healthCheck = { - path = "/api/health"; - conditions = ["[STATUS] == 200" "[BODY].database == ok"]; - }; - extraLabels = { - component = "dashboard"; - tier = "monitoring"; - }; - }; - } - { - # Grafana needs auth by default (admin interface) - homelab.services.${serviceName}.proxy = mkDefault { - subdomain = "grafana"; - # enableAuth = true; - }; - } - ]); -} diff --git a/modules/homelab/services/minio.nix b/modules/homelab/services/minio.nix deleted file mode 100644 index cebdd50..0000000 --- a/modules/homelab/services/minio.nix +++ /dev/null @@ -1,66 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - service = "minio"; - cfg = config.homelab.services.${service}; - homelabCfg = config.homelab; -in { - options.homelab.services.${service} = { - enable = mkEnableOption "Minio Object Storage"; - - port = mkOption { - default = 9000; - type = types.port; - description = "Port of the server."; - }; - - webPort = mkOption { - default = 9001; - type = types.port; - description = "Port of the web UI (console)."; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Whether to open the ports specified in `port` and `webPort` in the firewall. - ''; - }; - }; - - config = mkIf cfg.enable { - sops.secrets."ente/minio/root_user" = {}; - sops.secrets."ente/minio/root_password" = {}; - - sops.templates."minio-root-credentials".content = '' - MINIO_ROOT_USER=${config.sops.placeholder."ente/minio/root_user"} - MINIO_ROOT_PASSWORD=${config.sops.placeholder."ente/minio/root_password"} - ''; - - services.minio = { - enable = true; - rootCredentialsFile = config.sops.templates."minio-root-credentials".path; - }; - - networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [cfg.port cfg.webPort]; - - homelab.reverseProxy.entries = [ - { - subdomain = "${service}-api"; - port = cfg.port; - } - { - subdomain = "${service}"; - port = cfg.webPort; - } - ]; - - # https://min.io/docs/minio/linux/operations/monitoring/collect-minio-metrics-using-prometheus.html - # metrics and monitoring... - }; -} diff --git a/modules/homelab/services/monitoring/alertmanager.nix b/modules/homelab/services/monitoring/alertmanager.nix deleted file mode 100644 index b8da33d..0000000 --- a/modules/homelab/services/monitoring/alertmanager.nix +++ /dev/null @@ -1,237 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab.services.alertmanager; - homelabCfg = config.homelab; - - # Default alertmanager configuration - defaultConfig = { - global = { - smtp_smarthost = cfg.smtp.host; - smtp_from = cfg.smtp.from; - smtp_auth_username = cfg.smtp.username; - smtp_auth_password = cfg.smtp.password; - }; - - # Inhibit rules to prevent spam - inhibit_rules = [ - { - source_match = { - severity = "critical"; - }; - target_match = { - severity = "warning"; - }; - equal = ["alertname" "dev" "instance"]; - } - ]; - - route = { - group_by = ["alertname"]; - group_wait = "10s"; - group_interval = "10s"; - repeat_interval = "1h"; - receiver = "web.hook"; - routes = cfg.routes; - }; - - receivers = - [ - { - name = "web.hook"; - webhook_configs = [ - { - url = "http://127.0.0.1:5001/"; - } - ]; - } - ] - ++ cfg.receivers; - }; - - # Merge with user config - alertmanagerConfig = recursiveUpdate defaultConfig cfg.extraConfig; -in { - options.homelab.services.alertmanager = { - enable = mkEnableOption "Alertmanager for handling alerts"; - - port = mkOption { - type = types.port; - default = 9093; - description = "Port for Alertmanager web interface"; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = "Whether to open firewall ports"; - }; - - dataDir = mkOption { - type = types.str; - default = "/var/lib/alertmanager"; - description = "Directory to store Alertmanager data"; - }; - - smtp = { - host = mkOption { - type = types.str; - default = "localhost:587"; - description = "SMTP server host:port"; - }; - - from = mkOption { - type = types.str; - default = "alertmanager@${homelabCfg.externalDomain}"; - description = "From email address"; - }; - - username = mkOption { - type = types.str; - default = ""; - description = "SMTP username"; - }; - - password = mkOption { - type = types.str; - default = ""; - description = "SMTP password"; - }; - }; - - routes = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional routing rules"; - example = literalExpression '' - [ - { - match = { - service = "gatus"; - }; - receiver = "discord-webhook"; - } - { - match = { - severity = "critical"; - }; - receiver = "email-alerts"; - } - ] - ''; - }; - - receivers = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Alert receivers configuration"; - example = literalExpression '' - [ - { - name = "email-alerts"; - email_configs = [{ - to = "admin@example.com"; - subject = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}"; - body = "{{ range .Alerts }}{{ .Annotations.description }}{{ end }}"; - }]; - } - { - name = "discord-webhook"; - webhook_configs = [{ - url = "https://discord.com/api/webhooks/..."; - title = "{{ range .Alerts }}{{ .Annotations.summary }}{{ end }}"; - }]; - } - ] - ''; - }; - - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional Alertmanager configuration"; - }; - - webExternalUrl = mkOption { - type = types.str; - default = "https://alertmanager.${homelabCfg.externalDomain}"; - description = "External URL for Alertmanager web interface"; - }; - }; - - config = mkIf cfg.enable { - services.prometheus.alertmanager = { - enable = true; - port = cfg.port; - listenAddress = "0.0.0.0"; - webExternalUrl = cfg.webExternalUrl; - dataDir = cfg.dataDir; - - # Write configuration to file - configuration = alertmanagerConfig; - }; - - # Open firewall if requested - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - - # Add to monitoring endpoints - homelab.monitoring.metrics = [ - { - name = "alertmanager"; - port = cfg.port; - path = "/metrics"; - jobName = "alertmanager"; - labels = { - service = "alertmanager"; - component = "monitoring"; - }; - } - ]; - - # Add health checks - homelab.monitoring.healthChecks = [ - { - name = "alertmanager-web-interface"; - port = cfg.port; - path = "/-/healthy"; - interval = "30s"; - conditions = [ - "[STATUS] == 200" - "[RESPONSE_TIME] < 1000" - ]; - group = "monitoring"; - labels = { - service = "alertmanager"; - component = "web-interface"; - }; - } - { - name = "alertmanager-ready"; - port = cfg.port; - path = "/-/ready"; - interval = "30s"; - conditions = [ - "[STATUS] == 200" - ]; - group = "monitoring"; - labels = { - service = "alertmanager"; - component = "readiness"; - }; - } - ]; - - # Add reverse proxy entry - homelab.reverseProxy.entries = [ - { - subdomain = "alertmanager"; - host = homelabCfg.hostname; - port = cfg.port; - } - ]; - }; -} diff --git a/modules/homelab/services/monitoring/alertmanager_new.nix b/modules/homelab/services/monitoring/alertmanager_new.nix deleted file mode 100644 index f64d7c3..0000000 --- a/modules/homelab/services/monitoring/alertmanager_new.nix +++ /dev/null @@ -1,326 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab.services.alertmanager; - homelabCfg = config.homelab; - - # Build alertmanager configuration - alertmanagerConfig = { - route = { - receiver = cfg.defaultReceiver; - group_by = cfg.groupBy; - group_wait = cfg.groupWait; - group_interval = cfg.groupInterval; - repeat_interval = cfg.repeatInterval; - routes = cfg.routes; - }; - - receivers = - [ - {name = cfg.defaultReceiver;} - ] - ++ cfg.receivers; - - inhibit_rules = cfg.inhibitRules; - - templates = cfg.templates; - }; -in { - options.homelab.services.alertmanager = { - enable = mkEnableOption "Alertmanager for handling alerts"; - - port = mkOption { - type = types.port; - default = 9093; - description = "Port for Alertmanager web interface"; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = "Whether to open firewall ports"; - }; - - dataDir = mkOption { - type = types.str; - default = "/var/lib/alertmanager"; - description = "Directory to store Alertmanager data"; - }; - - webExternalUrl = mkOption { - type = types.str; - default = "http://${homelabCfg.hostname}.${homelabCfg.domain}:${toString cfg.port}"; - description = "External URL for Alertmanager web interface"; - }; - - environmentFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Environment file for secrets (e.g., Telegram bot token)"; - example = "/run/secrets/alertmanager-env"; - }; - - # Routing configuration - defaultReceiver = mkOption { - type = types.str; - default = "null"; - description = "Default receiver for unmatched alerts"; - }; - - groupBy = mkOption { - type = types.listOf types.str; - default = ["alertname"]; - description = "Labels to group alerts by"; - }; - - groupWait = mkOption { - type = types.str; - default = "10s"; - description = "Time to wait before sending initial notification"; - }; - - groupInterval = mkOption { - type = types.str; - default = "5m"; - description = "Time to wait before sending updates for a group"; - }; - - repeatInterval = mkOption { - type = types.str; - default = "4h"; - description = "Time to wait before re-sending an alert"; - }; - - routes = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Alert routing rules"; - example = literalExpression '' - [ - { - receiver = "telegram"; - matchers = ["severity =~ \"warning|critical\""]; - group_wait = "10s"; - continue = true; - } - ] - ''; - }; - - receivers = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Alert receivers configuration"; - example = literalExpression '' - [ - { - name = "telegram"; - telegram_configs = [{ - api_url = "https://api.telegram.org"; - bot_token = "$TELEGRAM_BOT_TOKEN"; - chat_id = -1002642560007; - message_thread_id = 4; - parse_mode = "HTML"; - send_resolved = true; - message = "{{ template \"telegram.message\" . }}"; - }]; - } - ] - ''; - }; - - inhibitRules = mkOption { - type = types.listOf types.attrs; - default = [ - { - source_match = {severity = "critical";}; - target_match = {severity = "warning";}; - equal = ["alertname" "instance"]; - } - ]; - description = "Rules for inhibiting alerts"; - }; - - templates = mkOption { - type = types.listOf types.path; - default = []; - description = "Template files for alert formatting"; - example = literalExpression '' - [ - (pkgs.writeText "telegram.tmpl" ''' - {{- define "telegram.message" -}} - {{- if gt (len .Alerts.Firing) 0 -}} - 🔥 FIRING 🔥 - {{- range .Alerts.Firing }} - {{ .Annotations.summary }} - {{ .Annotations.description }} - {{- end }} - {{- end }} - {{- if gt (len .Alerts.Resolved) 0 -}} - ✅ RESOLVED ✅ - {{- range .Alerts.Resolved }} - {{ .Annotations.summary }} - {{- end }} - {{- end }} - {{- end -}} - ''') - ] - ''; - }; - - # Convenience options for common receivers - telegram = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable Telegram notifications"; - }; - - botToken = mkOption { - type = types.str; - default = "$TELEGRAM_BOT_TOKEN"; - description = "Telegram bot token (use environment variable)"; - }; - - chatId = mkOption { - type = types.int; - description = "Telegram chat ID"; - example = -1002642560007; - }; - - messageThreadId = mkOption { - type = types.nullOr types.int; - default = null; - description = "Telegram message thread ID (for forum groups)"; - }; - - template = mkOption { - type = types.str; - default = "telegram.message"; - description = "Template to use for Telegram messages"; - }; - }; - - discord = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable Discord notifications"; - }; - - webhookUrl = mkOption { - type = types.str; - default = "$DISCORD_WEBHOOK_URL"; - description = "Discord webhook URL (use environment variable)"; - }; - - username = mkOption { - type = types.str; - default = "Alertmanager"; - description = "Discord bot username"; - }; - }; - }; - - config = mkIf cfg.enable { - services.prometheus.alertmanager = { - enable = true; - port = cfg.port; - listenAddress = "0.0.0.0"; - openFirewall = cfg.openFirewall; - webExternalUrl = cfg.webExternalUrl; - dataDir = cfg.dataDir; - environmentFile = cfg.environmentFile; - configuration = alertmanagerConfig; - }; - - # Auto-configure Telegram and Discord receiver if enabled - homelab.services.alertmanager.receivers = [ - (optional cfg.telegram.enable { - name = "telegram"; - telegram_configs = [ - { - api_url = "https://api.telegram.org"; - bot_token = cfg.telegram.botToken; - chat_id = cfg.telegram.chatId; - message_thread_id = cfg.telegram.messageThreadId; - parse_mode = "HTML"; - send_resolved = true; - message = "{{ template \"${cfg.telegram.template}\" . }}"; - } - ]; - }) - (optional cfg.discord.enable { - name = "discord"; - discord_configs = [ - { - webhook_url = cfg.discord.webhookUrl; - username = cfg.discord.username; - send_resolved = true; - } - ]; - }) - ]; - - # Auto-configure routes for convenience receivers - homelab.services.alertmanager.routes = - (optional cfg.telegram.enable { - receiver = "telegram"; - matchers = ["severity =~ \"warning|critical\""]; - group_wait = "10s"; - continue = true; - }) - ++ (optional cfg.discord.enable { - receiver = "discord"; - matchers = ["severity =~ \"warning|critical\""]; - group_wait = "10s"; - continue = true; - }); - - # Add to monitoring endpoints - homelab.monitoring.metrics = [ - { - name = "alertmanager"; - port = cfg.port; - path = "/metrics"; - jobName = "alertmanager"; - labels = { - service = "alertmanager"; - component = "monitoring"; - }; - } - ]; - - # Add health checks - homelab.monitoring.healthChecks = [ - { - name = "alertmanager-web-interface"; - port = cfg.port; - path = "/-/healthy"; - interval = "30s"; - conditions = [ - "[STATUS] == 200" - "[RESPONSE_TIME] < 1000" - ]; - group = "monitoring"; - labels = { - service = "alertmanager"; - component = "web-interface"; - }; - } - ]; - - # Add reverse proxy entry - homelab.reverseProxy.entries = [ - { - subdomain = "alertmanager"; - host = homelabCfg.hostname; - port = cfg.port; - } - ]; - }; -} diff --git a/modules/homelab/services/monitoring/example.nix b/modules/homelab/services/monitoring/example.nix deleted file mode 100644 index a1ab301..0000000 --- a/modules/homelab/services/monitoring/example.nix +++ /dev/null @@ -1,148 +0,0 @@ -# Example configuration showing how to use the monitoring stack -# with the homelab.global approach for dynamic discovery -{ - config, - pkgs, - ... -}: { - # Import the monitoring services - imports = [ - ./services/prometheus.nix - ./services/alertmanager.nix - ./services/grafana.nix - ./services/monitoring-stack.nix - ]; - - # Enable the full monitoring stack - homelab.services.monitoring-stack.enable = true; - - # Configure Prometheus - it will automatically discover scrape targets - # from homelab.global.monitoring.allMetrics - homelab.services.prometheus = { - enable = true; - port = 9090; - retention = "7d"; - - # Optional: Add custom scrape configs if needed - extraScrapeConfigs = [ - # Any additional manual scrape configs can go here - # but most should be discovered via homelab.monitoring.metrics - ]; - - # Optional: Add custom alerting rules - extraAlertingRules = [ - # Custom alert groups can be added here - ]; - - # Optional: Add external rule files - ruleFiles = [ - # ./path/to/custom-rules.yml - ]; - }; - - # Configure Alertmanager with Telegram support (like your original) - homelab.services.alertmanager = { - enable = true; - port = 9093; - - # Use sops secrets for environment variables - environmentFile = config.sops.secrets."alertmanager/env".path; - - # Enable Telegram notifications - telegram = { - enable = true; - botToken = "$TELEGRAM_BOT_TOKEN"; # From environment file - chatId = -1002642560007; - messageThreadId = 4; - }; - - # Custom templates (similar to your setup) - templates = [ - (pkgs.writeText "telegram.tmpl" '' - {{- define "telegram.message" -}} - {{- if gt (len .Alerts.Firing) 0 -}} - 🔥 FIRING 🔥 - {{- range .Alerts.Firing }} - {{ .Annotations.summary }} - {{ .Annotations.description }} - {{- end }} - {{- end }} - {{- if gt (len .Alerts.Resolved) 0 -}} - ✅ RESOLVED ✅ - {{- range .Alerts.Resolved }} - {{ .Annotations.summary }} - {{- end }} - {{- end }} - {{- end -}} - '') - ]; - }; - - # Configure Grafana with data sources (similar to your setup) - homelab.services.grafana = { - enable = true; - port = 3000; - domain = "grafana.procopius.dk"; - rootUrl = "https://grafana.procopius.dk"; - - # Add grafana user to influxdb2 group for accessing secrets - extraGroups = ["influxdb2"]; - - # Enable data sources - datasources = { - prometheus.enable = true; - loki.enable = true; - influxdb = { - enable = true; - database = "proxmox"; - tokenPath = config.sops.secrets."influxdb/token".path; - }; - }; - - # Provision dashboards (similar to your environment.etc approach) - dashboards.files = [ - { - name = "traefik"; - source = ./dashboards/traefik.json; - } - { - name = "traefik-access"; - source = ./dashboards/traefik-access.json; - } - { - name = "grafana-traefik"; - source = ./dashboards/grafana-traefik.json; - } - { - name = "node-exporter"; - source = ./dashboards/node-exporter.json; - } - { - name = "promtail"; - source = ./dashboards/promtail.json; - } - { - name = "gitea"; - source = ./dashboards/gitea.json; - } - { - name = "postgres"; - source = ./dashboards/postgres.json; - } - { - name = "gatus"; - source = ./dashboards/gatus.json; - } - ]; - }; - - # Configure sops secrets (keep your existing setup) - sops.secrets."alertmanager/env" = { - sopsFile = ../../secrets/secrets.yaml; - mode = "0440"; - }; - - # All services automatically register with homelab.monitoring.metrics - # and homelab.monitoring.healthChecks for Gatus monitoring - # All services automatically get reverse proxy entries -} diff --git a/modules/homelab/services/monitoring/gatus.nix b/modules/homelab/services/monitoring/gatus.nix deleted file mode 100644 index 60f0700..0000000 --- a/modules/homelab/services/monitoring/gatus.nix +++ /dev/null @@ -1,217 +0,0 @@ -{ - config, - lib, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.gatus; - homelabCfg = config.homelab; - - # Service-specific options beyond the standard interface - gatusServiceOptions = { - ui = { - title = mkOption { - type = types.str; - default = "Homelab Status"; - description = "Title for the Gatus web interface"; - }; - - header = mkOption { - type = types.str; - default = "Homelab Services Status"; - description = "Header text for the Gatus interface"; - }; - - link = mkOption { - type = types.str; - default = "https://status.${homelabCfg.externalDomain}"; - description = "Link in the Gatus header"; - }; - - buttons = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption {type = types.str;}; - link = mkOption {type = types.str;}; - }; - }); - default = [ - { - name = "Grafana"; - link = "https://grafana.${homelabCfg.externalDomain}"; - } - { - name = "Prometheus"; - link = "https://prometheus.${homelabCfg.externalDomain}"; - } - ]; - description = "Navigation buttons in the Gatus interface"; - }; - }; - - alerting = mkOption { - type = types.attrs; - default = {}; - description = "Gatus alerting configuration"; - example = literalExpression '' - { - discord = { - webhook-url = "https://discord.com/api/webhooks/..."; - default-alert = { - enabled = true; - description = "Health check failed"; - failure-threshold = 3; - success-threshold = 2; - }; - }; - } - ''; - }; - - storage = mkOption { - type = types.attrs; - default = { - type = "memory"; - }; - description = "Gatus storage configuration"; - example = literalExpression '' - { - type = "postgres"; - path = "postgres://user:password@localhost/gatus?sslmode=disable"; - } - ''; - }; - - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional Gatus configuration options"; - }; - - web = { - address = mkOption { - type = types.str; - default = "0.0.0.0"; - description = "Web interface bind address"; - }; - }; - }; - - # Convert our health check format to Gatus format - formatHealthCheck = check: let - # Build the URL based on the health check configuration - url = check._url; - - # Convert conditions to Gatus format (they should already be compatible) - conditions = check.conditions or ["[STATUS] == 200"]; - - # Convert alerts to Gatus format - alerts = map (alert: { - inherit (alert) type enabled; - failure-threshold = alert.failure-threshold or 3; - success-threshold = alert.success-threshold or 2; - description = "Health check alert for ${check.name}"; - }) (check.alerts or []); - in { - name = check.name; - group = check.group or "default"; - url = url; - interval = check.interval or "30s"; - - # Add method and headers for HTTP/HTTPS checks - method = - if (check.protocol == "http" || check.protocol == "https") - then check.method or "GET" - else null; - - conditions = conditions; - - # Add timeout - client = { - timeout = check.timeout or "10s"; - }; - - # Add alerts if configured - alerts = - if alerts != [] - then alerts - else []; - - # Add labels for UI organization - ui = { - hide-hostname = false; - hide-url = false; - description = "Health check for ${check.name} on ${check.host}"; - }; - }; - - # Generate Gatus configuration - gatusConfig = - recursiveUpdate { - # Global Gatus settings - alerting = mkIf (cfg.alerting != {}) cfg.alerting; - - web = { - address = cfg.web.address; - port = cfg.port; - }; - - # Enable metrics - metrics = cfg.monitoring.enable; - - ui = { - title = cfg.ui.title; - header = cfg.ui.header; - link = cfg.ui.link; - buttons = cfg.ui.buttons; - }; - - storage = cfg.storage; - - # Convert all enabled health checks to Gatus endpoints - endpoints = let - # Get all health checks from global config - allHealthChecks = homelabCfg.global.monitoring.allHealthChecks or []; - - # Filter only enabled health checks - enabledHealthChecks = filter (check: check.enabled or true) allHealthChecks; - - # Convert to Gatus format - gatusEndpoints = map formatHealthCheck enabledHealthChecks; - in - gatusEndpoints; - } - cfg.extraConfig; -in { - options.homelab.services.gatus = serviceInterface.mkServiceInterface { - serviceName = "gatus"; - defaultPort = 8080; - defaultSubdomain = "status"; - monitoringPath = "/metrics"; - healthCheckPath = "/health"; - healthCheckConditions = [ - "[STATUS] == 200" - "[BODY].status == UP" - "[RESPONSE_TIME] < 1000" - ]; - serviceOptions = gatusServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "gatus"; - - extraMonitoringLabels = { - component = "status-monitoring"; - }; - - serviceConfig = { - services.gatus = { - enable = true; - settings = gatusConfig; - }; - }; - }; -} diff --git a/modules/homelab/services/monitoring/grafana.nix b/modules/homelab/services/monitoring/grafana.nix deleted file mode 100644 index 8ecb14c..0000000 --- a/modules/homelab/services/monitoring/grafana.nix +++ /dev/null @@ -1,728 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.grafana; - homelabCfg = config.homelab; - - # Default community dashboards with proper configuration - defaultDashboards = { - "node-exporter-full" = { - name = "Node Exporter Full"; - id = 12486; - revision = 2; - # url = "https://grafana.com/api/dashboards/1860/revisions/37/download"; - sha256 = "sha256-1DE1aaanRHHeCOMWDGdOS1wBXxOF84UXAjJzT5Ek6mM="; - - url = "https://grafana.com/api/dashboards/12486/revisions/2/download"; - }; - "prometheus-2-0-stats" = { - name = "Prometheus 2.0 Stats"; - id = 2; - revision = 2; - url = "https://grafana.com/api/dashboards/2/revisions/2/download"; - sha256 = "sha256-Ydk4LPwfX4qJN8tiWPLWQdtAqzj8CKi6HYsuE+kWcXw="; - }; - }; - - # Function to fetch a dashboard from Grafana.com - fetchGrafanaDashboard = name: config: - pkgs.fetchurl { - inherit (config) url sha256; - name = "${name}-dashboard.json"; - }; - - # Git repository management for custom dashboards - gitDashboardsRepo = mkIf (cfg.dashboards.git.enable && cfg.dashboards.git.url != "") ( - pkgs.fetchgit { - url = cfg.dashboards.git.url; - rev = cfg.dashboards.git.rev; - sha256 = cfg.dashboards.git.sha256; - } - ); - - # Dashboard provisioning configuration - provisionDashboard = name: source: { - "grafana-dashboards/${name}.json" = { - inherit source; - user = "grafana"; - group = "grafana"; - mode = "0644"; - }; - }; - - # Generate dashboard files from various sources - dashboardFiles = - # Default community dashboards - (foldl' ( - acc: name: - acc // (provisionDashboard name (fetchGrafanaDashboard name defaultDashboards.${name})) - ) {} (attrNames (filterAttrs (n: v: cfg.dashboards.defaults.${n}.enable) cfg.dashboards.defaults))) - # Custom file-based dashboards - // (foldl' ( - acc: dashboard: - acc // (provisionDashboard dashboard.name dashboard.source) - ) {} - cfg.dashboards.files) - # Git-synced dashboards - // (optionalAttrs (cfg.dashboards.git.enable && cfg.dashboards.git.url != "") ( - let - gitDashboards = - if pathExists "${gitDashboardsRepo}/${cfg.dashboards.git.path}" - then builtins.readDir "${gitDashboardsRepo}/${cfg.dashboards.git.path}" - else {}; - in - mapAttrs' ( - filename: type: let - name = removeSuffix ".json" filename; - source = "${gitDashboardsRepo}/${cfg.dashboards.git.path}/${filename}"; - in - nameValuePair "grafana-dashboards/${name}.json" { - inherit source; - user = "grafana"; - group = "grafana"; - mode = "0644"; - } - ) (filterAttrs (name: type: type == "regular" && hasSuffix ".json" name) gitDashboards) - )); - - # Service-specific options beyond the standard interface - grafanaServiceOptions = { - # Authentication settings - auth = { - admin = { - user = mkOption { - type = types.str; - default = "admin"; - description = "Admin username"; - }; - - passwordFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to admin password file"; - }; - - email = mkOption { - type = types.str; - default = "admin@${homelabCfg.externalDomain}"; - description = "Admin email address"; - }; - }; - - disableLoginForm = mkOption { - type = types.bool; - default = false; - description = "Disable the login form"; - }; - - oauthAutoLogin = mkOption { - type = types.bool; - default = false; - description = "Enable OAuth auto-login"; - }; - - anonymousAccess = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable anonymous access"; - }; - - orgName = mkOption { - type = types.str; - default = "Homelab"; - description = "Organization name for anonymous users"; - }; - - orgRole = mkOption { - type = types.enum ["Viewer" "Editor" "Admin"]; - default = "Viewer"; - description = "Role for anonymous users"; - }; - }; - - genericOauth = { - enabled = mkOption { - type = types.bool; - default = false; - description = "Enable generic OAuth"; - }; - - configFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to OAuth configuration file"; - }; - }; - }; - - # Enhanced datasource configuration - datasources = { - prometheus = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable Prometheus datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:9090"; - description = "Prometheus URL"; - }; - - uid = mkOption { - type = types.str; - default = "prometheus"; - description = "Unique identifier for Prometheus datasource"; - }; - - scrapeInterval = mkOption { - type = types.str; - default = "15s"; - description = "Default scrape interval for Prometheus"; - }; - - manageAlerts = mkOption { - type = types.bool; - default = true; - description = "Manage alerts in Grafana"; - }; - - exemplarTraceIdDestinations = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Exemplar trace ID destinations"; - }; - }; - - loki = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable Loki datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:3100"; - description = "Loki URL"; - }; - - uid = mkOption { - type = types.str; - default = "loki"; - description = "Unique identifier for Loki datasource"; - }; - - maxLines = mkOption { - type = types.int; - default = 1000; - description = "Maximum lines to return from Loki"; - }; - - derivedFields = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Derived fields configuration for Loki"; - }; - }; - - influxdb = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable InfluxDB datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:8086"; - description = "InfluxDB URL"; - }; - - database = mkOption { - type = types.str; - default = "homelab"; - description = "InfluxDB database name"; - }; - - tokenFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to InfluxDB token file"; - }; - - uid = mkOption { - type = types.str; - default = "influxdb"; - description = "Unique identifier for InfluxDB datasource"; - }; - - version = mkOption { - type = types.enum ["1.x" "2.x"]; - default = "2.x"; - description = "InfluxDB version"; - }; - - organization = mkOption { - type = types.str; - default = "homelab"; - description = "InfluxDB organization (for v2.x)"; - }; - - bucket = mkOption { - type = types.str; - default = "homelab"; - description = "InfluxDB bucket (for v2.x)"; - }; - }; - - extra = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional data sources"; - }; - }; - - # Enhanced dashboard configuration - dashboards = { - # Default community dashboards - defaults = mkOption { - type = types.attrsOf (types.submodule { - options = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable this default dashboard"; - }; - }; - }); - default = mapAttrs (name: config: {enable = false;}) defaultDashboards; - description = "Enable default community dashboards"; - example = literalExpression '' - { - "node-exporter-full".enable = true; - "prometheus-2-0-stats".enable = true; - } - ''; - }; - - # File-based dashboards - files = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption { - type = types.str; - description = "Dashboard name (without .json extension)"; - }; - source = mkOption { - type = types.path; - description = "Path to dashboard JSON file"; - }; - }; - }); - default = []; - description = "Dashboard files to provision"; - }; - - # Git-based dashboard sync - git = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable git-based dashboard synchronization"; - }; - - url = mkOption { - type = types.str; - default = ""; - description = "Git repository URL for dashboards"; - }; - - rev = mkOption { - type = types.str; - default = "HEAD"; - description = "Git revision to use"; - }; - - sha256 = mkOption { - type = types.str; - default = ""; - description = "SHA256 hash of the git repository content"; - }; - - path = mkOption { - type = types.str; - default = "."; - description = "Path within the git repository containing dashboards"; - }; - - updateInterval = mkOption { - type = types.str; - default = "1h"; - description = "How often to check for dashboard updates"; - }; - }; - - path = mkOption { - type = types.str; - default = "/etc/grafana-dashboards"; - description = "Path where dashboard files are stored"; - }; - }; - - # Plugin configuration - plugins = mkOption { - type = types.listOf types.package; - default = []; - description = "Grafana plugins to install"; - }; - - # SMTP configuration - smtp = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable SMTP for email notifications"; - }; - - host = mkOption { - type = types.str; - default = "localhost:587"; - description = "SMTP server host:port"; - }; - - user = mkOption { - type = types.str; - default = ""; - description = "SMTP username"; - }; - - passwordFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to SMTP password file"; - }; - - fromAddress = mkOption { - type = types.str; - default = "grafana@${homelabCfg.externalDomain}"; - description = "From email address"; - }; - - fromName = mkOption { - type = types.str; - default = "Homelab Grafana"; - description = "From name"; - }; - - skipVerify = mkOption { - type = types.bool; - default = false; - description = "Skip SSL certificate verification"; - }; - }; - - # Security settings - security = { - secretKeyFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to secret key file for signing"; - }; - - allowEmbedding = mkOption { - type = types.bool; - default = false; - description = "Allow embedding Grafana in iframes"; - }; - - cookieSecure = mkOption { - type = types.bool; - default = true; - description = "Set secure flag on cookies"; - }; - - contentSecurityPolicy = mkOption { - type = types.bool; - default = true; - description = "Enable Content Security Policy header"; - }; - - strictTransportSecurity = mkOption { - type = types.bool; - default = true; - description = "Enable Strict Transport Security header"; - }; - }; - - # Data directory - dataDir = mkOption { - type = types.str; - default = "/var/lib/grafana"; - description = "Directory to store Grafana data"; - }; - - # Extra Grafana settings - extraSettings = mkOption { - type = types.attrs; - default = {}; - description = "Additional Grafana settings"; - }; - }; - - # Enhanced datasource configuration - buildDatasources = let - # Build prometheus datasource - prometheusDatasource = optional cfg.datasources.prometheus.enable { - uid = cfg.datasources.prometheus.uid; - name = "Prometheus"; - type = "prometheus"; - url = cfg.datasources.prometheus.url; - access = "proxy"; - isDefault = true; - editable = false; - jsonData = { - timeInterval = cfg.datasources.prometheus.scrapeInterval; - queryTimeout = "60s"; - httpMethod = "POST"; - manageAlerts = cfg.datasources.prometheus.manageAlerts; - exemplarTraceIdDestinations = cfg.datasources.prometheus.exemplarTraceIdDestinations; - }; - }; - - # Build loki datasource - lokiDatasource = optional cfg.datasources.loki.enable { - uid = cfg.datasources.loki.uid; - name = "Loki"; - type = "loki"; - url = cfg.datasources.loki.url; - access = "proxy"; - editable = false; - jsonData = { - maxLines = cfg.datasources.loki.maxLines; - derivedFields = cfg.datasources.loki.derivedFields; - }; - }; - - # Build influxdb datasource - influxdbDatasource = optional cfg.datasources.influxdb.enable { - uid = cfg.datasources.influxdb.uid; - name = "InfluxDB"; - type = "influxdb"; - url = cfg.datasources.influxdb.url; - access = "proxy"; - database = cfg.datasources.influxdb.database; - editable = false; - jsonData = { - dbName = cfg.datasources.influxdb.database; - httpHeaderName1 = "Authorization"; - version = cfg.datasources.influxdb.version; - organization = cfg.datasources.influxdb.organization; - defaultBucket = cfg.datasources.influxdb.bucket; - }; - secureJsonData = mkIf (cfg.datasources.influxdb.tokenFile != null) { - httpHeaderValue1 = "$__file{${cfg.datasources.influxdb.tokenFile}}"; - }; - }; - - # Build extra datasources - extraDatasources = cfg.datasources.extra; - in - prometheusDatasource ++ lokiDatasource ++ influxdbDatasource ++ extraDatasources; -in { - options.homelab.services.grafana = serviceInterface.mkServiceInterface { - serviceName = "grafana"; - defaultPort = 3000; - defaultSubdomain = "grafana"; - monitoringPath = "/metrics"; - healthCheckPath = "/api/health"; - healthCheckConditions = [ - "[STATUS] == 200" - "[BODY].database == ok" - "[RESPONSE_TIME] < 2000" - ]; - serviceOptions = grafanaServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "grafana"; - - extraMonitoringLabels = { - component = "dashboard"; - }; - - # Additional health checks specific to Grafana - customHealthChecks = []; - - serviceConfig = mkMerge [ - { - services.grafana = { - enable = true; - dataDir = cfg.dataDir; - # declarativePlugins = - # cfg.plugins - # ++ (with pkgs.grafanaPlugins; [ - # grafana-exploretraces-app - # grafana-metricsdrilldown-app - # grafana-pyroscope-app - # grafana-lokiexplore-app - # grafana-worldmap-panel - # grafana-piechart-panel - # ]); - - settings = - recursiveUpdate { - server = { - http_port = cfg.port; - http_addr = "0.0.0.0"; - domain = "${cfg.proxy.subdomain}.${homelabCfg.externalDomain}"; - root_url = "https://${cfg.proxy.subdomain}.${homelabCfg.externalDomain}"; - serve_from_sub_path = false; - }; - - database = { - type = "sqlite3"; - path = "${cfg.dataDir}/grafana.db"; - }; - - security = - { - admin_user = cfg.auth.admin.user; - admin_email = cfg.auth.admin.email; - # allow_embedding = cfg.security.allowEmbedding; - # cookie_secure = cfg.security.cookieSecure; - # content_security_policy = cfg.security.contentSecurityPolicy; - # strict_transport_security = cfg.security.strictTransportSecurity; - } - // (optionalAttrs (cfg.auth.admin.passwordFile != null) { - admin_password = "$__file{${cfg.auth.admin.passwordFile}}"; - }) - // (optionalAttrs (cfg.security.secretKeyFile != null) { - secret_key = "$__file{${cfg.security.secretKeyFile}}"; - }); - - users = { - allow_sign_up = false; - auto_assign_org = true; - auto_assign_org_role = "Viewer"; - }; - - "auth.anonymous" = { - enabled = cfg.auth.anonymousAccess.enable; - org_name = cfg.auth.anonymousAccess.orgName; - org_role = cfg.auth.anonymousAccess.orgRole; - }; - - "auth.basic" = { - enabled = !cfg.auth.disableLoginForm; - }; - - "auth.generic_oauth" = - mkIf cfg.auth.genericOauth.enabled { - enabled = true; - } - // (optionalAttrs (cfg.auth.genericOauth.configFile != null) { - client_id = "$__file{${cfg.auth.genericOauth.configFile}}"; - }); - - smtp = mkIf cfg.smtp.enable ({ - enabled = true; - host = cfg.smtp.host; - user = cfg.smtp.user; - from_address = cfg.smtp.fromAddress; - from_name = cfg.smtp.fromName; - skip_verify = cfg.smtp.skipVerify; - } - // (optionalAttrs (cfg.smtp.passwordFile != null) { - password = "$__file{${cfg.smtp.passwordFile}}"; - })); - - analytics = { - reporting_enabled = false; - check_for_updates = false; - }; - news.news_feed_enabled = false; - - feature_toggles = { - provisioning = true; - kubernetesDashboards = true; - }; - # paths = { - # plugins = "${cfg.dataDir}/plugins"; - # provisioning = "/etc/grafana/provisioning"; - # }; - } - cfg.extraSettings; - - provision = { - enable = true; - - datasources.settings.datasources = buildDatasources; - - dashboards.settings.providers = [ - { - name = "homelab-dashboards"; - type = "file"; - disableDeletion = false; - updateIntervalSeconds = 10; - allowUiUpdates = true; - options = { - path = cfg.dashboards.path; - }; - } - ]; - }; - }; - - # Provision dashboard files - environment.etc = dashboardFiles; - - # Ensure dashboard directory exists - systemd.tmpfiles.rules = [ - "d ${cfg.dashboards.path} 0755 grafana grafana -" - ]; - } - - # Git dashboard sync service (if enabled) - (mkIf (cfg.dashboards.git.enable && cfg.dashboards.git.url != "") { - systemd.services.grafana-dashboard-sync = { - description = "Sync Grafana dashboards from git"; - after = ["grafana.service"]; - wantedBy = ["multi-user.target"]; - - serviceConfig = { - Type = "oneshot"; - User = "grafana"; - Group = "grafana"; - }; - - script = '' - echo "Syncing dashboards from git repository..." - # Dashboard files are already provisioned via Nix - # This service can be extended for runtime updates if needed - systemctl reload grafana.service - ''; - }; - - systemd.timers.grafana-dashboard-sync = { - description = "Timer for Grafana dashboard sync"; - wantedBy = ["timers.target"]; - - timerConfig = { - OnCalendar = cfg.dashboards.git.updateInterval; - Persistent = true; - }; - }; - }) - ]; - }; -} diff --git a/modules/homelab/services/monitoring/grafana_1.nix b/modules/homelab/services/monitoring/grafana_1.nix deleted file mode 100644 index c5ae73f..0000000 --- a/modules/homelab/services/monitoring/grafana_1.nix +++ /dev/null @@ -1,198 +0,0 @@ -# Example showing how to create a service using the standard interface -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.grafana; - homelabCfg = config.homelab; - - # Service-specific options beyond the standard interface - grafanaServiceOptions = { - admin = { - user = mkOption { - type = types.str; - default = "admin"; - description = "Admin username"; - }; - - passwordFile = mkOption { - type = types.str; - default = "admin"; - description = "Path to the Admin password file"; - }; - }; - - datasources = { - prometheus = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable Prometheus datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:9090"; - description = "Prometheus URL"; - }; - - uid = mkOption { - type = types.str; - default = "prometheus"; - description = "Unique identifier for Prometheus datasource"; - }; - }; - - loki = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable Loki datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:3100"; - description = "Loki URL"; - }; - - uid = mkOption { - type = types.str; - default = "loki"; - description = "Unique identifier for Loki datasource"; - }; - }; - - influxdb = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable InfluxDB datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:8086"; - description = "InfluxDB URL"; - }; - - database = mkOption { - type = types.str; - default = "homelab"; - description = "InfluxDB database name"; - }; - - tokenFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to InfluxDB token file"; - }; - - uid = mkOption { - type = types.str; - default = "influxdb"; - description = "Unique identifier for InfluxDB datasource"; - }; - }; - - extra = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional data sources"; - }; - }; - - plugins = mkOption { - type = types.listOf types.package; - default = []; - description = "Grafana plugins to install"; - }; - }; -in { - options.homelab.services.grafana = serviceInterface.mkServiceInterface { - serviceName = "grafana"; - defaultPort = 3000; - defaultSubdomain = "grafana"; - monitoringPath = "/metrics"; - healthCheckPath = "/api/health"; - healthCheckConditions = [ - "[STATUS] == 200" - "[BODY].database == ok" - "[RESPONSE_TIME] < 2000" - ]; - serviceOptions = grafanaServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "grafana"; - - extraMonitoringLabels = { - component = "dashboard"; - }; - - serviceConfig = { - services.grafana = { - enable = true; - declarativePlugins = cfg.plugins; - - settings = { - server = { - http_port = cfg.port; - http_addr = "0.0.0.0"; - root_url = "https://${cfg.proxy.subdomain}.${homelabCfg.externalDomain}"; - }; - - security = { - admin_user = cfg.admin.user; - admin_password = "$__file{${cfg.admin.passwordFile}}"; - }; - }; - - provision = { - enable = true; - datasources.settings = { - datasources = let - # Build datasource list - datasources = - [] - ++ optional cfg.datasources.prometheus.enable { - uid = cfg.datasources.prometheus.uid; - name = "Prometheus"; - type = "prometheus"; - url = cfg.datasources.prometheus.url; - } - ++ optional cfg.datasources.loki.enable { - uid = cfg.datasources.loki.uid; - name = "Loki"; - type = "loki"; - url = cfg.datasources.loki.url; - } - ++ optional cfg.datasources.influxdb.enable { - uid = cfg.datasources.influxdb.uid; - name = "InfluxDB"; - type = "influxdb"; - url = cfg.datasources.influxdb.url; - access = "proxy"; - jsonData = { - dbName = cfg.datasources.influxdb.database; - httpHeaderName1 = "Authorization"; - }; - secureJsonData = mkIf (cfg.datasources.influxdb.tokenPath != null) { - httpHeaderValue1 = "$__file{${cfg.datasources.influxdb.tokenPath}}"; - }; - } - ++ cfg.datasources.extra; - in - datasources; - }; - }; - }; - }; - }; -} diff --git a/modules/homelab/services/monitoring/grafana_gg.nix b/modules/homelab/services/monitoring/grafana_gg.nix deleted file mode 100644 index 64650cf..0000000 --- a/modules/homelab/services/monitoring/grafana_gg.nix +++ /dev/null @@ -1,416 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab.services.grafana; - homelabCfg = config.homelab; - - # Default dashboards for homelab monitoring - defaultDashboards = { - "node-exporter" = pkgs.fetchurl { - url = "https://grafana.com/api/dashboards/1860/revisions/37/download"; - sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this - }; - "prometheus-stats" = pkgs.fetchurl { - url = "https://grafana.com/api/dashboards/2/revisions/2/download"; - sha256 = "sha256-0000000000000000000000000000000000000000000="; # You'll need to update this - }; - }; - - # Grafana provisioning configuration - provisioningConfig = { - # Data sources - datasources = - [ - { - name = "Prometheus"; - type = "prometheus"; - access = "proxy"; - url = cfg.datasources.prometheus.url; - isDefault = true; - editable = false; - jsonData = { - timeInterval = "5s"; - queryTimeout = "60s"; - httpMethod = "POST"; - }; - } - ] - ++ cfg.datasources.extra; - - # Dashboard providers - dashboards = [ - { - name = "homelab"; - type = "file"; - disableDeletion = false; - updateIntervalSeconds = 10; - allowUiUpdates = true; - options = { - path = "/var/lib/grafana/dashboards"; - }; - } - ]; - - # Notification channels - notifiers = cfg.notifications; - }; -in { - options.homelab.services.grafana = { - enable = mkEnableOption "Grafana dashboard service"; - - port = mkOption { - type = types.port; - default = 3000; - description = "Port for Grafana web interface"; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = "Whether to open firewall ports"; - }; - - dataDir = mkOption { - type = types.str; - default = "/var/lib/grafana"; - description = "Directory to store Grafana data"; - }; - - domain = mkOption { - type = types.str; - default = "grafana.${homelabCfg.externalDomain}"; - description = "Domain for Grafana"; - }; - - rootUrl = mkOption { - type = types.str; - default = "https://grafana.${homelabCfg.externalDomain}"; - description = "Root URL for Grafana"; - }; - - admin = { - user = mkOption { - type = types.str; - default = "admin"; - description = "Admin username"; - }; - - password = mkOption { - type = types.str; - default = "admin"; - description = "Admin password (change this!)"; - }; - - email = mkOption { - type = types.str; - default = "admin@${homelabCfg.externalDomain}"; - description = "Admin email"; - }; - }; - - datasources = { - prometheus = { - url = mkOption { - type = types.str; - default = "http://localhost:9090"; - description = "Prometheus URL"; - }; - }; - - extra = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional data sources"; - example = literalExpression '' - [ - { - name = "Loki"; - type = "loki"; - url = "http://localhost:3100"; - } - ] - ''; - }; - }; - - notifications = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Notification channels configuration"; - example = literalExpression '' - [ - { - name = "discord-webhook"; - type = "discord"; - settings = { - url = "https://discord.com/api/webhooks/..."; - username = "Grafana"; - }; - } - ] - ''; - }; - - plugins = mkOption { - type = types.listOf types.str; - default = [ - "grafana-piechart-panel" - "grafana-worldmap-panel" - "grafana-clock-panel" - "grafana-simple-json-datasource" - ]; - description = "Grafana plugins to install"; - }; - - smtp = { - enabled = mkOption { - type = types.bool; - default = false; - description = "Enable SMTP for email notifications"; - }; - - host = mkOption { - type = types.str; - default = "localhost:587"; - description = "SMTP server host:port"; - }; - - user = mkOption { - type = types.str; - default = ""; - description = "SMTP username"; - }; - - password = mkOption { - type = types.str; - default = ""; - description = "SMTP password"; - }; - - fromAddress = mkOption { - type = types.str; - default = "grafana@${homelabCfg.externalDomain}"; - description = "From email address"; - }; - - fromName = mkOption { - type = types.str; - default = "Homelab Grafana"; - description = "From name"; - }; - }; - - security = { - allowEmbedding = mkOption { - type = types.bool; - default = false; - description = "Allow embedding Grafana in iframes"; - }; - - cookieSecure = mkOption { - type = types.bool; - default = true; - description = "Set secure flag on cookies"; - }; - - secretKey = mkOption { - type = types.str; - default = "change-this-secret-key"; - description = "Secret key for signing (change this!)"; - }; - }; - - auth = { - anonymousEnabled = mkOption { - type = types.bool; - default = false; - description = "Enable anonymous access"; - }; - - disableLoginForm = mkOption { - type = types.bool; - default = false; - description = "Disable login form"; - }; - }; - - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional Grafana configuration"; - }; - }; - - config = mkIf cfg.enable { - services.grafana = { - enable = true; - settings = - recursiveUpdate { - server = { - http_addr = "0.0.0.0"; - http_port = cfg.port; - domain = cfg.domain; - root_url = cfg.rootUrl; - serve_from_sub_path = false; - }; - - database = { - type = "sqlite3"; - path = "${cfg.dataDir}/grafana.db"; - }; - - security = { - admin_user = cfg.admin.user; - admin_password = cfg.admin.password; - admin_email = cfg.admin.email; - allow_embedding = cfg.security.allowEmbedding; - cookie_secure = cfg.security.cookieSecure; - secret_key = cfg.security.secretKey; - }; - - users = { - allow_sign_up = false; - auto_assign_org = true; - auto_assign_org_role = "Viewer"; - }; - - auth.anonymous = { - enabled = cfg.auth.anonymousEnabled; - org_name = "Homelab"; - org_role = "Viewer"; - }; - - auth.basic = { - enabled = !cfg.auth.disableLoginForm; - }; - - smtp = mkIf cfg.smtp.enabled { - enabled = true; - host = cfg.smtp.host; - user = cfg.smtp.user; - password = cfg.smtp.password; - from_address = cfg.smtp.fromAddress; - from_name = cfg.smtp.fromName; - }; - - analytics = { - reporting_enabled = false; - check_for_updates = false; - }; - - log = { - mode = "console"; - level = "info"; - }; - - paths = { - data = cfg.dataDir; - logs = "${cfg.dataDir}/log"; - plugins = "${cfg.dataDir}/plugins"; - provisioning = "/etc/grafana/provisioning"; - }; - } - cfg.extraConfig; - - dataDir = cfg.dataDir; - }; - - # Install plugins - systemd.services.grafana.preStart = mkIf (cfg.plugins != []) ( - concatStringsSep "\n" (map ( - plugin: "${pkgs.grafana}/bin/grafana-cli --pluginsDir ${cfg.dataDir}/plugins plugins install ${plugin} || true" - ) - cfg.plugins) - ); - - # Provisioning configuration - environment.etc = - { - "grafana/provisioning/datasources/datasources.yaml".text = builtins.toJSON { - apiVersion = 1; - datasources = provisioningConfig.datasources; - }; - - "grafana/provisioning/dashboards/dashboards.yaml".text = builtins.toJSON { - apiVersion = 1; - providers = provisioningConfig.dashboards; - }; - } - // (mkIf (cfg.notifications != []) { - "grafana/provisioning/notifiers/notifiers.yaml".text = builtins.toJSON { - apiVersion = 1; - notifiers = provisioningConfig.notifiers; - }; - }); - - # Create dashboard directory - systemd.tmpfiles.rules = [ - "d ${cfg.dataDir}/dashboards 0755 grafana grafana -" - ]; - - # Open firewall if requested - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - - # Add to monitoring endpoints - homelab.monitoring.metrics = [ - { - name = "grafana"; - port = cfg.port; - path = "/metrics"; - jobName = "grafana"; - labels = { - service = "grafana"; - component = "monitoring"; - }; - } - ]; - - # Add health checks - homelab.monitoring.healthChecks = [ - { - name = "grafana-web-interface"; - port = cfg.port; - path = "/api/health"; - interval = "30s"; - conditions = [ - "[STATUS] == 200" - "[BODY].database == ok" - "[RESPONSE_TIME] < 2000" - ]; - group = "monitoring"; - labels = { - service = "grafana"; - component = "web-interface"; - }; - } - { - name = "grafana-login-page"; - port = cfg.port; - path = "/login"; - interval = "60s"; - conditions = [ - "[STATUS] == 200" - "[RESPONSE_TIME] < 3000" - ]; - group = "monitoring"; - labels = { - service = "grafana"; - component = "login"; - }; - } - ]; - - # Add reverse proxy entry - homelab.reverseProxy.entries = [ - { - subdomain = "grafana"; - host = homelabCfg.hostname; - port = cfg.port; - } - ]; - }; -} diff --git a/modules/homelab/services/monitoring/grafana_new.nix b/modules/homelab/services/monitoring/grafana_new.nix deleted file mode 100644 index a73eadb..0000000 --- a/modules/homelab/services/monitoring/grafana_new.nix +++ /dev/null @@ -1,369 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - cfg = config.homelab.services.grafana; - homelabCfg = config.homelab; - - # Dashboard provisioning - provisionDashboard = name: source: { - "grafana-dashboards/${name}.json" = { - inherit source; - user = "grafana"; - group = "grafana"; - mode = "0644"; - }; - }; - - # Generate all dashboard files - dashboardFiles = - fold ( - dashboard: acc: - acc // (provisionDashboard dashboard.name dashboard.source) - ) {} - cfg.dashboards.files; -in { - options.homelab.services.grafana = { - enable = mkEnableOption "Grafana dashboard service"; - - port = mkOption { - type = types.port; - default = 3000; - description = "Port for Grafana web interface"; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = "Whether to open firewall ports"; - }; - - dataDir = mkOption { - type = types.str; - default = "/var/lib/grafana"; - description = "Directory to store Grafana data"; - }; - - domain = mkOption { - type = types.str; - default = "grafana.${homelabCfg.externalDomain}"; - description = "Domain for Grafana"; - }; - - rootUrl = mkOption { - type = types.str; - default = "https://grafana.${homelabCfg.externalDomain}"; - description = "Root URL for Grafana"; - }; - - # Authentication settings - auth = { - disableLoginForm = mkOption { - type = types.bool; - default = false; - description = "Disable the login form"; - }; - - oauthAutoLogin = mkOption { - type = types.bool; - default = false; - description = "Enable OAuth auto-login"; - }; - - genericOauth = { - enabled = mkOption { - type = types.bool; - default = false; - description = "Enable generic OAuth"; - }; - }; - }; - - # Data source configuration - datasources = { - prometheus = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable Prometheus datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:9090"; - description = "Prometheus URL"; - }; - - uid = mkOption { - type = types.str; - default = "prometheus"; - description = "Unique identifier for Prometheus datasource"; - }; - }; - - loki = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable Loki datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:3100"; - description = "Loki URL"; - }; - - uid = mkOption { - type = types.str; - default = "loki"; - description = "Unique identifier for Loki datasource"; - }; - }; - - influxdb = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable InfluxDB datasource"; - }; - - url = mkOption { - type = types.str; - default = "http://127.0.0.1:8086"; - description = "InfluxDB URL"; - }; - - database = mkOption { - type = types.str; - default = "homelab"; - description = "InfluxDB database name"; - }; - - tokenPath = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to InfluxDB token file"; - }; - - uid = mkOption { - type = types.str; - default = "influxdb"; - description = "Unique identifier for InfluxDB datasource"; - }; - }; - - extra = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional data sources"; - }; - }; - - # Dashboard configuration - dashboards = { - path = mkOption { - type = types.str; - default = "/etc/grafana-dashboards"; - description = "Path to dashboard files"; - }; - - files = mkOption { - type = types.listOf (types.submodule { - options = { - name = mkOption { - type = types.str; - description = "Dashboard name (without .json extension)"; - example = "node-exporter"; - }; - source = mkOption { - type = types.path; - description = "Path to dashboard JSON file"; - }; - }; - }); - default = []; - description = "Dashboard files to provision"; - example = literalExpression '' - [ - { - name = "node-exporter"; - source = ./dashboards/node-exporter.json; - } - { - name = "traefik"; - source = ./dashboards/traefik.json; - } - ] - ''; - }; - }; - - # Extra user groups for accessing secrets - extraGroups = mkOption { - type = types.listOf types.str; - default = []; - description = "Additional groups for the grafana user"; - example = ["influxdb2"]; - }; - - # Additional settings - extraSettings = mkOption { - type = types.attrs; - default = {}; - description = "Additional Grafana settings"; - }; - - plugins = mkOption { - type = types.listOf types.package; - default = []; - description = "Grafana plugins to install"; - example = literalExpression "with pkgs.grafanaPlugins; [ grafana-piechart-panel ]"; - }; - }; - - config = mkIf cfg.enable { - # Add grafana user to extra groups (e.g., for accessing secrets) - users.users.grafana.extraGroups = cfg.extraGroups; - - services.grafana = { - enable = true; - dataDir = cfg.dataDir; - declarativePlugins = cfg.plugins; - - settings = - recursiveUpdate { - server = { - http_port = cfg.port; - http_addr = "0.0.0.0"; - domain = cfg.domain; - root_url = cfg.rootUrl; - oauth_auto_login = cfg.auth.oauthAutoLogin; - }; - - "auth.generic_oauth" = { - enabled = cfg.auth.genericOauth.enabled; - }; - - auth = { - disable_login_form = cfg.auth.disableLoginForm; - }; - } - cfg.extraSettings; - - provision = { - enable = true; - - datasources.settings = { - datasources = let - # Build datasource list - datasources = - [] - ++ optional cfg.datasources.prometheus.enable { - uid = cfg.datasources.prometheus.uid; - name = "Prometheus"; - type = "prometheus"; - url = cfg.datasources.prometheus.url; - } - ++ optional cfg.datasources.loki.enable { - uid = cfg.datasources.loki.uid; - name = "Loki"; - type = "loki"; - url = cfg.datasources.loki.url; - } - ++ optional cfg.datasources.influxdb.enable { - uid = cfg.datasources.influxdb.uid; - name = "InfluxDB"; - type = "influxdb"; - url = cfg.datasources.influxdb.url; - access = "proxy"; - jsonData = { - dbName = cfg.datasources.influxdb.database; - httpHeaderName1 = "Authorization"; - }; - secureJsonData = mkIf (cfg.datasources.influxdb.tokenPath != null) { - httpHeaderValue1 = "$__file{${cfg.datasources.influxdb.tokenPath}}"; - }; - } - ++ cfg.datasources.extra; - in - datasources; - }; - - dashboards.settings.providers = mkIf (cfg.dashboards.files != []) [ - { - name = "homelab-dashboards"; - options.path = cfg.dashboards.path; - } - ]; - }; - }; - - # Open firewall if requested - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - - # Provision dashboard files - environment.etc = dashboardFiles; - - # Add to monitoring endpoints - homelab.monitoring.metrics = [ - { - name = "grafana"; - port = cfg.port; - path = "/metrics"; - jobName = "grafana"; - labels = { - service = "grafana"; - component = "monitoring"; - }; - } - ]; - - # Add health checks - homelab.monitoring.healthChecks = [ - { - name = "grafana-web-interface"; - port = cfg.port; - path = "/api/health"; - interval = "30s"; - conditions = [ - "[STATUS] == 200" - "[BODY].database == ok" - "[RESPONSE_TIME] < 2000" - ]; - group = "monitoring"; - labels = { - service = "grafana"; - component = "web-interface"; - }; - } - { - name = "grafana-login-page"; - port = cfg.port; - path = "/login"; - interval = "60s"; - conditions = [ - "[STATUS] == 200" - "[RESPONSE_TIME] < 3000" - ]; - group = "monitoring"; - labels = { - service = "grafana"; - component = "login"; - }; - } - ]; - - # Add reverse proxy entry - homelab.reverseProxy.entries = [ - { - subdomain = "grafana"; - host = homelabCfg.hostname; - port = cfg.port; - } - ]; - }; -} diff --git a/modules/homelab/services/monitoring/influxdb.nix b/modules/homelab/services/monitoring/influxdb.nix deleted file mode 100644 index 75bd525..0000000 --- a/modules/homelab/services/monitoring/influxdb.nix +++ /dev/null @@ -1,399 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.influxdb; - homelabCfg = config.homelab; - - # Service-specific options beyond the standard interface - influxdbServiceOptions = { - version = mkOption { - type = types.enum ["1" "2"]; - default = "2"; - description = "InfluxDB version to use"; - }; - - dataDir = mkOption { - type = types.str; - default = "/var/lib/influxdb"; - description = "Directory to store InfluxDB data"; - }; - - # InfluxDB 2.x options - v2 = { - org = mkOption { - type = types.str; - default = "homelab"; - description = "Initial organization name"; - }; - - bucket = mkOption { - type = types.str; - default = "homelab"; - description = "Initial bucket name"; - }; - - username = mkOption { - type = types.str; - default = "admin"; - description = "Initial admin username"; - }; - - password = mkOption { - type = types.str; - default = "changeme"; - description = "Initial admin password"; - }; - - retention = mkOption { - type = types.str; - default = "30d"; - description = "Default retention period"; - }; - - tokenFile = mkOption { - type = types.nullOr types.path; - default = null; - description = "File containing the admin token"; - }; - }; - - # InfluxDB 1.x options - v1 = { - database = mkOption { - type = types.str; - default = "homelab"; - description = "Default database name"; - }; - - retention = mkOption { - type = types.str; - default = "30d"; - description = "Default retention period"; - }; - - adminUser = mkOption { - type = types.str; - default = "admin"; - description = "Admin username"; - }; - - adminPassword = mkOption { - type = types.str; - default = "changeme"; - description = "Admin password"; - }; - - httpAuth = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable HTTP authentication"; - }; - }; - }; - - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional InfluxDB configuration"; - }; - - backup = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable automatic backups"; - }; - - schedule = mkOption { - type = types.str; - default = "daily"; - description = "Backup schedule"; - }; - - retention = mkOption { - type = types.str; - default = "7d"; - description = "Backup retention period"; - }; - }; - }; - - # Generate configuration based on version - influxdbConfig = - if cfg.version == "2" - then - recursiveUpdate { - bolt-path = "${cfg.dataDir}/influxd.bolt"; - engine-path = "${cfg.dataDir}/engine"; - http-bind-address = "0.0.0.0:${toString cfg.port}"; - reporting-disabled = true; - log-level = "info"; - } - cfg.extraConfig - else - recursiveUpdate { - meta = { - dir = "${cfg.dataDir}/meta"; - }; - data = { - dir = "${cfg.dataDir}/data"; - wal-dir = "${cfg.dataDir}/wal"; - }; - http = { - bind-address = "0.0.0.0:${toString cfg.port}"; - auth-enabled = cfg.v1.httpAuth.enable; - }; - logging = { - level = "info"; - }; - reporting-disabled = true; - } - cfg.extraConfig; -in { - options.homelab.services.influxdb = serviceInterface.mkServiceInterface { - serviceName = "influxdb"; - defaultPort = 8086; - defaultSubdomain = "influxdb"; - monitoringPath = "/metrics"; - healthCheckPath = - if cfg.version == "2" - then "/health" - else "/ping"; - healthCheckConditions = - if cfg.version == "2" - then ["[STATUS] == 200" "[BODY].status == pass"] - else ["[STATUS] == 204" "[RESPONSE_TIME] < 1000"]; - serviceOptions = influxdbServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "influxdb"; - - extraMonitoringLabels = { - component = "timeseries-database"; - version = cfg.version; - }; - - customHealthChecks = - [ - { - name = "influxdb-query"; - port = cfg.port; - path = - if cfg.version == "2" - then "/api/v2/query" - else "/query"; - interval = "60s"; - method = "POST"; - conditions = [ - "[STATUS] < 500" - "[RESPONSE_TIME] < 3000" - ]; - group = "monitoring"; - labels = { - service = "influxdb"; - component = "query-engine"; - }; - } - ] - ++ optional (cfg.version == "2") { - name = "influxdb-write"; - port = cfg.port; - path = "/api/v2/write"; - interval = "60s"; - method = "POST"; - conditions = [ - "[STATUS] < 500" - "[RESPONSE_TIME] < 2000" - ]; - group = "monitoring"; - labels = { - service = "influxdb"; - component = "write-engine"; - }; - }; - - serviceConfig = mkMerge [ - # Common configuration - { - # Create data directories - systemd.tmpfiles.rules = - [ - "d ${cfg.dataDir} 0755 influxdb influxdb -" - ] - ++ optionals (cfg.version == "1") [ - "d ${cfg.dataDir}/meta 0755 influxdb influxdb -" - "d ${cfg.dataDir}/data 0755 influxdb influxdb -" - "d ${cfg.dataDir}/wal 0755 influxdb influxdb -" - ]; - - # Ensure influxdb user exists - users.users.influxdb = { - isSystemUser = true; - group = "influxdb"; - home = cfg.dataDir; - createHome = true; - }; - - users.groups.influxdb = {}; - } - - # InfluxDB 2.x configuration - (mkIf (cfg.version == "2") { - services.influxdb2 = { - enable = true; - dataDir = cfg.dataDir; - settings = influxdbConfig; - }; - - # Initial setup for InfluxDB 2.x - systemd.services.influxdb2-setup = { - description = "InfluxDB 2.x initial setup"; - after = ["influxdb2.service"]; - wants = ["influxdb2.service"]; - wantedBy = ["multi-user.target"]; - serviceConfig = { - Type = "oneshot"; - RemainAfterExit = true; - User = "influxdb"; - Group = "influxdb"; - }; - script = let - setupScript = pkgs.writeShellScript "influxdb2-setup" '' - # Wait for InfluxDB to be ready - timeout=60 - while [ $timeout -gt 0 ]; do - if ${pkgs.curl}/bin/curl -f http://localhost:${toString cfg.port}/health > /dev/null 2>&1; then - break - fi - sleep 1 - timeout=$((timeout - 1)) - done - - # Check if setup is already done - if ${pkgs.curl}/bin/curl -f http://localhost:${toString cfg.port}/api/v2/setup > /dev/null 2>&1; then - # Setup InfluxDB if not already done - ${pkgs.influxdb2}/bin/influx setup \ - --host http://localhost:${toString cfg.port} \ - --org "${cfg.v2.org}" \ - --bucket "${cfg.v2.bucket}" \ - --username "${cfg.v2.username}" \ - --password "${cfg.v2.password}" \ - --retention "${cfg.v2.retention}" \ - --force - fi - ''; - in "${setupScript}"; - }; - }) - - # InfluxDB 1.x configuration - (mkIf (cfg.version == "1") { - services.influxdb = { - enable = true; - dataDir = cfg.dataDir; - extraConfig = influxdbConfig; - }; - - # Initial setup for InfluxDB 1.x - systemd.services.influxdb-setup = mkIf cfg.v1.httpAuth.enable { - description = "InfluxDB 1.x initial setup"; - after = ["influxdb.service"]; - wants = ["influxdb.service"]; - wantedBy = ["multi-user.target"]; - serviceConfig = { - Type = "oneshot"; - RemainAfterExit = true; - User = "influxdb"; - Group = "influxdb"; - }; - script = let - setupScript = pkgs.writeShellScript "influxdb-setup" '' - # Wait for InfluxDB to be ready - timeout=60 - while [ $timeout -gt 0 ]; do - if ${pkgs.curl}/bin/curl -f http://localhost:${toString cfg.port}/ping > /dev/null 2>&1; then - break - fi - sleep 1 - timeout=$((timeout - 1)) - done - - # Create admin user - ${pkgs.influxdb}/bin/influx -host localhost -port ${toString cfg.port} -execute "CREATE USER \"${cfg.v1.adminUser}\" WITH PASSWORD '${cfg.v1.adminPassword}' WITH ALL PRIVILEGES" || true - - # Create database - ${pkgs.influxdb}/bin/influx -host localhost -port ${toString cfg.port} -username "${cfg.v1.adminUser}" -password "${cfg.v1.adminPassword}" -execute "CREATE DATABASE \"${cfg.v1.database}\"" || true - - # Set retention policy - ${pkgs.influxdb}/bin/influx -host localhost -port ${toString cfg.port} -username "${cfg.v1.adminUser}" -password "${cfg.v1.adminPassword}" -database "${cfg.v1.database}" -execute "CREATE RETENTION POLICY \"default\" ON \"${cfg.v1.database}\" DURATION ${cfg.v1.retention} REPLICATION 1 DEFAULT" || true - ''; - in "${setupScript}"; - }; - }) - - # Backup configuration - (mkIf cfg.backup.enable { - systemd.services.influxdb-backup = { - description = "InfluxDB backup"; - serviceConfig = { - Type = "oneshot"; - User = "influxdb"; - Group = "influxdb"; - }; - script = let - backupScript = - if cfg.version == "2" - then - pkgs.writeShellScript "influxdb2-backup" '' - backup_dir="${cfg.dataDir}/backups/$(date +%Y%m%d_%H%M%S)" - mkdir -p "$backup_dir" - ${pkgs.influxdb2}/bin/influx backup \ - --host http://localhost:${toString cfg.port} \ - --org "${cfg.v2.org}" \ - "$backup_dir" - - # Clean old backups - find "${cfg.dataDir}/backups" -type d -mtime +${cfg.backup.retention} -exec rm -rf {} + || true - '' - else - pkgs.writeShellScript "influxdb-backup" '' - backup_dir="${cfg.dataDir}/backups/$(date +%Y%m%d_%H%M%S)" - mkdir -p "$backup_dir" - ${pkgs.influxdb}/bin/influxd backup \ - -host localhost:${toString cfg.port} \ - -database "${cfg.v1.database}" \ - "$backup_dir" - - # Clean old backups - find "${cfg.dataDir}/backups" -type d -mtime +${cfg.backup.retention} -exec rm -rf {} + || true - ''; - in "${backupScript}"; - }; - - systemd.timers.influxdb-backup = { - description = "InfluxDB backup timer"; - wantedBy = ["timers.target"]; - timerConfig = { - OnCalendar = cfg.backup.schedule; - Persistent = true; - RandomizedDelaySec = "5m"; - }; - }; - - # Create backup directory - systemd.tmpfiles.rules = [ - "d ${cfg.dataDir}/backups 0755 influxdb influxdb -" - ]; - }) - ]; - }; -} diff --git a/modules/homelab/services/monitoring/loki.nix b/modules/homelab/services/monitoring/loki.nix deleted file mode 100644 index 4467b2a..0000000 --- a/modules/homelab/services/monitoring/loki.nix +++ /dev/null @@ -1,356 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.loki; - homelabCfg = config.homelab; - - # Service-specific options beyond the standard interface - lokiServiceOptions = { - # Storage configuration - storage = { - type = mkOption { - type = types.enum ["filesystem" "s3" "gcs"]; - default = "filesystem"; - description = "Storage backend type"; - }; - - filesystem = { - directory = mkOption { - type = types.str; - default = "/var/lib/loki"; - description = "Directory for filesystem storage"; - }; - }; - - s3 = { - endpoint = mkOption { - type = types.nullOr types.str; - default = null; - description = "S3 endpoint URL"; - }; - - bucket = mkOption { - type = types.nullOr types.str; - default = null; - description = "S3 bucket name"; - }; - - region = mkOption { - type = types.nullOr types.str; - default = null; - description = "S3 region"; - }; - - accessKeyId = mkOption { - type = types.nullOr types.str; - default = null; - description = "S3 access key ID"; - }; - - secretAccessKey = mkOption { - type = types.nullOr types.path; - default = null; - description = "Path to file containing S3 secret access key"; - }; - }; - }; - - # Retention configuration - retention = { - period = mkOption { - type = types.str; - default = "168h"; # 7 days - description = "Log retention period"; - }; - - streamRetention = mkOption { - type = types.listOf (types.submodule { - options = { - selector = mkOption { - type = types.str; - description = "Log stream selector"; - example = "{environment=\"development\"}"; - }; - priority = mkOption { - type = types.int; - description = "Rule priority (higher = more important)"; - default = 1; - }; - period = mkOption { - type = types.str; - description = "Retention period for this stream"; - example = "24h"; - }; - }; - }); - default = []; - description = "Per-stream retention rules"; - }; - }; - - # Performance tuning - limits = { - rejectOldSamples = mkOption { - type = types.bool; - default = true; - description = "Reject samples older than max age"; - }; - - rejectOldSamplesMaxAge = mkOption { - type = types.str; - default = "168h"; - description = "Maximum age for samples"; - }; - - ingestionRateMB = mkOption { - type = types.int; - default = 4; - description = "Ingestion rate limit in MB/s per tenant"; - }; - - ingestionBurstSizeMB = mkOption { - type = types.int; - default = 6; - description = "Ingestion burst size in MB per tenant"; - }; - - maxStreamsPerUser = mkOption { - type = types.int; - default = 10000; - description = "Maximum number of streams per user"; - }; - - maxLineSize = mkOption { - type = types.str; - default = "256KB"; - description = "Maximum line size"; - }; - }; - - # Authentication - auth = { - enabled = mkOption { - type = types.bool; - default = false; - description = "Enable authentication"; - }; - }; - - # Extra configuration options - extraConfig = mkOption { - type = types.attrs; - default = {}; - description = "Additional Loki configuration options"; - }; - - # Data directory - dataDir = mkOption { - type = types.str; - default = "/var/lib/loki"; - description = "Directory to store Loki data"; - }; - }; - - # Build the Loki configuration - lokiConfig = - recursiveUpdate { - # Server configuration - server = { - http_listen_port = cfg.port; - grpc_listen_port = cfg.port + 1000; # e.g., 3100 -> 4100 - http_listen_address = "0.0.0.0"; - grpc_listen_address = "0.0.0.0"; - log_level = cfg.monitoring.extraLabels.log_level or "info"; - }; - - # Authentication - auth_enabled = cfg.auth.enabled; - - # Analytics - analytics.reporting_enabled = false; - - # Common configuration for single-binary mode - common = { - ring = { - instance_addr = "127.0.0.1"; - kvstore.store = "inmemory"; - }; - replication_factor = 1; - path_prefix = cfg.dataDir; - }; - - # Schema configuration - schema_config = { - configs = [ - { - from = "2020-05-15"; - store = "tsdb"; - object_store = cfg.storage.type; - schema = "v13"; - index = { - prefix = "index_"; - period = "24h"; - }; - } - ]; - }; - - # Storage configuration - storage_config = mkMerge [ - # Filesystem storage - (mkIf (cfg.storage.type == "filesystem") { - filesystem.directory = "${cfg.storage.filesystem.directory}/chunks"; - }) - - # S3 storage - (mkIf (cfg.storage.type == "s3") { - aws = - { - s3 = cfg.storage.s3.endpoint; - bucketnames = cfg.storage.s3.bucket; - region = cfg.storage.s3.region; - access_key_id = cfg.storage.s3.accessKeyId; - } - // (optionalAttrs (cfg.storage.s3.secretAccessKey != null) { - secret_access_key = "$__file{${cfg.storage.s3.secretAccessKey}}"; - }); - }) - ]; - - # Limits configuration - limits_config = - { - reject_old_samples = cfg.limits.rejectOldSamples; - reject_old_samples_max_age = cfg.limits.rejectOldSamplesMaxAge; - ingestion_rate_mb = cfg.limits.ingestionRateMB; - ingestion_burst_size_mb = cfg.limits.ingestionBurstSizeMB; - max_streams_per_user = cfg.limits.maxStreamsPerUser; - max_line_size = cfg.limits.maxLineSize; - - # Retention configuration - retention_period = cfg.retention.period; - } - // (optionalAttrs (cfg.retention.streamRetention != []) { - retention_stream = - map (rule: { - selector = rule.selector; - priority = rule.priority; - period = rule.period; - }) - cfg.retention.streamRetention; - }); - - # Table manager for retention - table_manager = { - retention_deletes_enabled = true; - retention_period = cfg.retention.period; - }; - - # Compactor configuration - compactor = { - working_directory = "${cfg.dataDir}/compactor"; - # shared_store = cfg.storage.type; - compaction_interval = "10m"; - # retention_enabled = true; - # retention_delete_delay = "2h"; - # retention_delete_worker_count = 150; - }; - - # Query range configuration - query_range = { - results_cache = { - cache = { - embedded_cache = { - enabled = true; - max_size_mb = 100; - }; - }; - }; - }; - - # Frontend configuration - frontend = { - max_outstanding_per_tenant = 256; - compress_responses = true; - }; - - # Query scheduler - query_scheduler = { - max_outstanding_requests_per_tenant = 256; - }; - - # Runtime configuration - runtime_config = { - file = "/etc/loki/runtime.yml"; - }; - } - cfg.extraConfig; -in { - options.homelab.services.loki = serviceInterface.mkServiceInterface { - serviceName = "loki"; - defaultPort = 3100; - defaultSubdomain = "loki"; - monitoringPath = "/metrics"; - healthCheckPath = "/ready"; - healthCheckConditions = [ - "[STATUS] == 200" - "[RESPONSE_TIME] < 2000" - ]; - serviceOptions = lokiServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "loki"; - - extraMonitoringLabels = { - component = "log-aggregation"; - log_level = "info"; - }; - - customHealthChecks = [ - { - name = "loki-health"; - port = cfg.port; - # https://grafana.com/docs/loki/latest/reference/loki-http-api/#status-endpoints - path = "/loki/api/v1/status/buildinfo"; - interval = "30s"; - conditions = ["[STATUS] == 200"]; - group = "logging"; - labels = { - service = "loki"; - component = "api"; - }; - } - ]; - - serviceConfig = mkMerge [ - { - services.loki = { - enable = true; - dataDir = cfg.dataDir; - configuration = lokiConfig; - }; - - # Ensure data directories exist - systemd.tmpfiles.rules = [ - "d ${cfg.dataDir} 0755 loki loki -" - "d ${cfg.dataDir}/chunks 0755 loki loki -" - "d ${cfg.dataDir}/compactor 0755 loki loki -" - ]; - - # Runtime configuration file for dynamic updates - environment.etc."loki/runtime.yml".text = '' - # Runtime configuration for Loki - # This file can be updated without restarting Loki - ''; - } - ]; - }; -} diff --git a/modules/homelab/services/monitoring/prometheus.nix b/modules/homelab/services/monitoring/prometheus.nix deleted file mode 100644 index b4ac904..0000000 --- a/modules/homelab/services/monitoring/prometheus.nix +++ /dev/null @@ -1,204 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceInterface = import ../../lib/service-interface.nix {inherit lib;}; - - cfg = config.homelab.services.prometheus; - homelabCfg = config.homelab; - - # Generate Prometheus scrape configs from global monitoring data - prometheusScrapeConfigs = let - allMetrics = homelabCfg.global.monitoring.allMetrics or []; - jobGroups = groupBy (m: m.jobName) allMetrics; - - scrapeConfigs = - mapAttrsToList (jobName: endpoints: { - job_name = jobName; - scrape_interval = head endpoints.scrapeInterval or ["30s"]; - static_configs = - map - (endpoint: { - targets = ["${endpoint.host}:${toString endpoint.port}"]; - labels = endpoint.labels; - }) - endpoints; - metrics_path = head endpoints.path or [null]; - }) - jobGroups; - in - scrapeConfigs; - - # Service-specific options beyond the standard interface - prometheusServiceOptions = { - retention = mkOption { - type = types.str; - default = "15d"; - description = "How long to retain metrics data"; - }; - - alertmanager = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable integration with Alertmanager"; - }; - - url = mkOption { - type = types.str; - default = "${homelabCfg.hostname}.${homelabCfg.domain}:9093"; - description = "Alertmanager URL"; - }; - }; - - extraScrapeConfigs = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional scrape configurations"; - }; - - extraAlertingRules = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional alerting rules"; - }; - - globalConfig = mkOption { - type = types.attrs; - default = { - scrape_interval = "15s"; - evaluation_interval = "15s"; - }; - description = "Global Prometheus configuration"; - }; - - extraFlags = mkOption { - type = types.listOf types.str; - default = []; - description = "Extra command line flags"; - }; - - ruleFiles = mkOption { - type = types.listOf types.path; - default = []; - description = "Additional rule files to load"; - }; - }; - - # Standard alerting rules - alertingRules = [ - { - name = "homelab.rules"; - rules = [ - { - alert = "InstanceDown"; - expr = "up == 0"; - for = "5m"; - labels = {severity = "critical";}; - annotations = { - summary = "Instance {{ $labels.instance }} down"; - description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."; - }; - } - { - alert = "HighCPUUsage"; - expr = "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100) > 80"; - for = "10m"; - labels = {severity = "warning";}; - annotations = { - summary = "High CPU usage on {{ $labels.instance }}"; - description = "CPU usage is above 80% for more than 10 minutes on {{ $labels.instance }}."; - }; - } - { - alert = "HighMemoryUsage"; - expr = "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100 > 85"; - for = "10m"; - labels = {severity = "warning";}; - annotations = { - summary = "High memory usage on {{ $labels.instance }}"; - description = "Memory usage is above 85% for more than 10 minutes on {{ $labels.instance }}."; - }; - } - { - alert = "DiskSpaceLow"; - expr = "((node_filesystem_size_bytes - node_filesystem_avail_bytes) / node_filesystem_size_bytes) * 100 > 90"; - for = "5m"; - labels = {severity = "critical";}; - annotations = { - summary = "Disk space low on {{ $labels.instance }}"; - description = "Disk usage is above 90% on {{ $labels.instance }} {{ $labels.mountpoint }}."; - }; - } - ]; - } - ]; -in { - options.homelab.services.prometheus = serviceInterface.mkServiceInterface { - serviceName = "prometheus"; - defaultPort = 9090; - defaultSubdomain = "prometheus"; - monitoringPath = "/metrics"; - healthCheckPath = "/-/healthy"; - healthCheckConditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - serviceOptions = prometheusServiceOptions; - }; - - config = serviceInterface.mkServiceConfig { - inherit config cfg homelabCfg; - serviceName = "prometheus"; - - extraMonitoringLabels = { - component = "monitoring-server"; - }; - - customHealthChecks = [ - { - name = "prometheus-ready"; - port = cfg.port; - path = "/-/ready"; - interval = "30s"; - conditions = ["[STATUS] == 200"]; - group = "monitoring"; - labels = { - service = "prometheus"; - component = "readiness"; - }; - } - ]; - - serviceConfig = { - services.prometheus = { - enable = true; - port = cfg.port; - listenAddress = "0.0.0.0"; - retentionTime = cfg.retention; - - globalConfig = cfg.globalConfig; - extraFlags = cfg.extraFlags; - - scrapeConfigs = prometheusScrapeConfigs ++ cfg.extraScrapeConfigs; - - ruleFiles = - map (ruleGroup: - pkgs.writeText "${ruleGroup.name}.yml" (builtins.toJSON { - groups = [ruleGroup]; - })) (alertingRules ++ cfg.extraAlertingRules) - ++ cfg.ruleFiles; - - alertmanagers = mkIf cfg.alertmanager.enable [ - { - static_configs = [ - { - targets = [cfg.alertmanager.url]; - } - ]; - } - ]; - }; - }; - }; -} diff --git a/modules/homelab/services/prometheus.nix b/modules/homelab/services/prometheus.nix deleted file mode 100644 index 55f1883..0000000 --- a/modules/homelab/services/prometheus.nix +++ /dev/null @@ -1,237 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceName = "prometheus"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; - - # Generate Prometheus scrape configs from global monitoring data - prometheusScrapeConfigs = let - allMetrics = homelabCfg.monitoring.global.allMetrics; - - jobGroups = groupBy (m: m.jobName) allMetrics; - - scrapeConfigs = - mapAttrsToList (jobName: endpoints: { - job_name = jobName; - scrape_interval = head endpoints.scrapeInterval or ["30s"]; - static_configs = - map - (endpoint: { - targets = ["${endpoint.host}:${toString endpoint.port}"]; - labels = endpoint.labels; - }) - endpoints; - metrics_path = head endpoints.path or ["/metrics"]; - }) - jobGroups; - in - scrapeConfigs; - - # Standard alerting rules for homelab - alertingRules = [ - { - name = "homelab.rules"; - rules = [ - { - alert = "InstanceDown"; - expr = "up == 0"; - for = "5m"; - labels = {severity = "critical";}; - annotations = { - summary = "Instance {{ $labels.instance }} down"; - description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."; - }; - } - { - alert = "HighCPUUsage"; - expr = "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100) > 80"; - for = "10m"; - labels = {severity = "warning";}; - annotations = { - summary = "High CPU usage on {{ $labels.instance }}"; - description = "CPU usage is above 80% for more than 10 minutes on {{ $labels.instance }}."; - }; - } - { - alert = "HighMemoryUsage"; - expr = "(1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)) * 100 > 85"; - for = "10m"; - labels = {severity = "warning";}; - annotations = { - summary = "High memory usage on {{ $labels.instance }}"; - description = "Memory usage is above 85% for more than 10 minutes on {{ $labels.instance }}."; - }; - } - { - alert = "DiskSpaceLow"; - expr = "((node_filesystem_size_bytes - node_filesystem_avail_bytes) / node_filesystem_size_bytes) * 100 > 90"; - for = "5m"; - labels = {severity = "critical";}; - annotations = { - summary = "Disk space low on {{ $labels.instance }}"; - description = "Disk usage is above 90% on {{ $labels.instance }} {{ $labels.mountpoint }}."; - }; - } - ]; - } - ]; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Prometheus Monitoring Server"; - - port = mkOption { - type = types.port; - default = 9090; - }; - - description = mkOption { - type = types.str; - default = "Prometheus Monitoring Server"; - }; - - # Prometheus-specific options - retention = mkOption { - type = types.str; - default = "15d"; - description = "How long to retain metrics data"; - }; - - alertmanager = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable integration with Alertmanager"; - }; - - url = mkOption { - type = types.str; - default = "alertmanager.${homelabCfg.domain}:9093"; - description = "Alertmanager URL"; - }; - }; - - extraScrapeConfigs = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional scrape configurations"; - }; - - extraAlertingRules = mkOption { - type = types.listOf types.attrs; - default = []; - description = "Additional alerting rules"; - }; - - globalConfig = mkOption { - type = types.attrs; - default = { - scrape_interval = "15s"; - evaluation_interval = "15s"; - }; - description = "Global Prometheus configuration"; - }; - - extraFlags = mkOption { - type = types.listOf types.str; - default = []; - description = "Extra command line flags"; - }; - - ruleFiles = mkOption { - type = types.listOf types.path; - default = []; - description = "Additional rule files to load"; - }; - - systemdServices = mkOption { - type = types.listOf types.str; - default = [ - "prometheus.service" - "prometheus" - ]; - description = "Systemd services to monitor"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - { - services.prometheus = { - enable = true; - port = cfg.port; - listenAddress = "0.0.0.0"; - retentionTime = cfg.retention; - - globalConfig = cfg.globalConfig; - extraFlags = cfg.extraFlags; - - # Automatically aggregate all metrics from the fleet - scrapeConfigs = prometheusScrapeConfigs ++ cfg.extraScrapeConfigs; - - # Include standard + custom alerting rules - ruleFiles = - map (ruleGroup: - pkgs.writeText "${ruleGroup.name}.yml" (builtins.toJSON { - groups = [ruleGroup]; - })) (alertingRules ++ cfg.extraAlertingRules) - ++ cfg.ruleFiles; - - # Connect to Alertmanager if enabled - alertmanagers = mkIf cfg.alertmanager.enable [ - { - static_configs = [ - { - targets = [cfg.alertmanager.url]; - } - ]; - } - ]; - }; - - networking.firewall.allowedTCPPorts = [cfg.port]; - } - { - homelab.services.${serviceName}.monitoring = { - metrics.path = "/metrics"; - healthCheck.path = "/-/healthy"; # ✅ Enables health checks - healthCheck.conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - - extraLabels = { - component = "monitoring-server"; - tier = "monitoring"; - }; - }; - } - { - homelab.services.${serviceName}.logging = { - files = ["/var/log/prometheus/prometheus.log"]; - parsing = { - # Prometheus log format: ts=2024-01-01T12:00:00.000Z caller=main.go:123 level=info msg="message" - regex = "^ts=(?P[^ ]+) caller=(?P[^ ]+) level=(?P\\w+) msg=\"(?P[^\"]*)\""; - extractFields = ["level" "caller"]; - }; - extraLabels = { - component = "monitoring-server"; - application = "prometheus"; - }; - }; - } - { - homelab.services.${serviceName}.proxy = { - enableAuth = true; - }; - } - ]); -} diff --git a/modules/homelab/services/vaultwarden.nix b/modules/homelab/services/vaultwarden.nix deleted file mode 100644 index 9ebabad..0000000 --- a/modules/homelab/services/vaultwarden.nix +++ /dev/null @@ -1,137 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: -with lib; let - serviceName = "vaultwarden"; - cfg = config.homelab.services.${serviceName}; - homelabCfg = config.homelab; -in { - imports = [ - (import ../lib/features/monitoring.nix serviceName) - (import ../lib/features/logging.nix serviceName) - (import ../lib/features/proxy.nix serviceName) - ]; - - # Core service options - options.homelab.services.${serviceName} = { - enable = mkEnableOption "Vault Warden"; - - description = mkOption { - type = types.str; - default = "Vault Warden"; - }; - - port = mkOption { - type = types.port; - default = 8222; - }; - - openFirewall = mkOption { - type = types.bool; - default = true; - description = '' - Whether to open the ports specified in `port` and `webPort` in the firewall. - ''; - }; - - environmentFile = lib.mkOption { - type = with lib.types; nullOr path; - default = null; - example = "/var/lib/vaultwarden.env"; - description = '' - Additional environment file as defined in {manpage}`systemd.exec(5)`. - - Secrets like {env}`ADMIN_TOKEN` and {env}`SMTP_PASSWORD` - should be passed to the service without adding them to the world-readable Nix store. - - Note that this file needs to be available on the host on which `vaultwarden` is running. - - As a concrete example, to make the Admin UI available (from which new users can be invited initially), - the secret {env}`ADMIN_TOKEN` needs to be defined as described - [here](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page): - - ``` - # Admin secret token, see - # https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page - ADMIN_TOKEN=...copy-paste a unique generated secret token here... - ``` - ''; - }; - - systemdServices = mkOption { - type = types.listOf types.str; - default = [ - "vaultwarden.service" - "vaultwarden" - ]; - description = "Systemd services to monitor"; - }; - }; - - # Service configuration with smart defaults - config = mkIf cfg.enable (mkMerge [ - { - services.vaultwarden = { - enable = true; - config = { - DOMAIN = "https://bitwarden.example.com"; - SIGNUPS_ALLOWED = false; - - ROCKET_ADDRESS = "0.0.0.0"; - ROCKET_PORT = cfg.port; - - ROCKET_LOG = "critical"; - - # This example assumes a mailserver running on localhost, - # thus without transport encryption. - # If you use an external mail server, follow: - # https://github.com/dani-garcia/vaultwarden/wiki/SMTP-configuration - # SMTP_HOST = "127.0.0.1"; - # SMTP_PORT = 25; - # SMTP_SSL = false; - - # SMTP_FROM = "admin@bitwarden.example.com"; - # SMTP_FROM_NAME = "example.com Bitwarden server"; - - ADMIN_TOKEN = "1234"; - }; - environmentFile = cfg.environmentFile; - }; - - networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.port]; - } - { - # homelab.services.${serviceName}.monitoring = { - # metrics.path = "/metrics"; - - # healthCheck.path = "/healthz"; - # healthCheck.conditions = ["[STATUS] == 200" "[RESPONSE_TIME] < 1000"]; - - # extraLabels = { - # component = "example"; - # }; - # }; - } - { - # homelab.services.${serviceName}.logging = { - # files = ["/var/log/example/log.log"]; - # # parsing = { - # # regex = "^ts=(?P[^ ]+) caller=(?P[^ ]+) level=(?P\\w+) msg=\"(?P[^\"]*)\""; - # # extractFields = ["level" "caller"]; - # # }; - # extraLabels = { - # component = "example"; - # application = "example"; - # }; - # }; - } - { - homelab.services.${serviceName}.proxy = { - enableAuth = true; - }; - } - ]); -} diff --git a/modules/nixos/ente.nix b/modules/nixos/ente.nix index 7c26c57..283e4ec 100644 --- a/modules/nixos/ente.nix +++ b/modules/nixos/ente.nix @@ -72,11 +72,6 @@ in { type = types.str; description = "The domain under which the photos frontend will be served."; }; - - auth = mkOption { - type = types.str; - description = "The domain under which the auth frontend will be served."; - }; }; }; @@ -192,11 +187,6 @@ in { name = "ente"; user = "ente"; }; - key = { - encryption._secret = pkgs.writeText "encryption" "T0sn+zUVFOApdX4jJL4op6BtqqAfyQLH95fu8ASWfno="; - hash._secret = pkgs.writeText "hash" "g/dBZBs1zi9SXQ0EKr4RCt1TGr7ZCKkgrpjyjrQEKovWPu5/ce8dYM6YvMIPL23MMZToVuuG+Z6SGxxTbxg5NQ=="; - }; - jwt.secret._secret = pkgs.writeText "jwt" "i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8="; }; systemd.services.ente = { @@ -253,7 +243,6 @@ in { BindReadOnlyPaths = [ "${cfgApi.package}/share/museum/migrations:${dataDir}/migrations" "${cfgApi.package}/share/museum/mail-templates:${dataDir}/mail-templates" - "${cfgApi.package}/share/museum/web-templates:${dataDir}/web-templates" ]; User = cfgApi.user; @@ -322,12 +311,7 @@ in { in { enable = true; virtualHosts.${domainFor "accounts"} = { - listen = [ - { - addr = "0.0.0.0"; - port = 3001; - } - ]; + forceSSL = mkDefault false; locations."/" = { root = webPackage "accounts"; tryFiles = "$uri $uri.html /index.html"; @@ -337,12 +321,7 @@ in { }; }; virtualHosts.${domainFor "cast"} = { - listen = [ - { - addr = "0.0.0.0"; - port = 3004; - } - ]; + forceSSL = mkDefault false; locations."/" = { root = webPackage "cast"; tryFiles = "$uri $uri.html /index.html"; @@ -355,12 +334,7 @@ in { serverAliases = [ (domainFor "albums") # the albums app is shared with the photos frontend ]; - listen = [ - { - addr = "0.0.0.0"; - port = 3000; - } - ]; + forceSSL = mkDefault false; locations."/" = { root = webPackage "photos"; tryFiles = "$uri $uri.html /index.html"; @@ -369,21 +343,6 @@ in { ''; }; }; - virtualHosts.${domainFor "auth"} = { - listen = [ - { - addr = "0.0.0.0"; - port = 3003; - } - ]; - locations."/" = { - root = webPackage "auth"; - tryFiles = "$uri $uri.html /index.html"; - extraConfig = '' - add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}'; - ''; - }; - }; }; }) ]; diff --git a/nixos/README.md b/nixos/README.md index d892b5f..3c98a20 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -3,7 +3,7 @@ nixos-rebuild switch --flake .#proxmox --target-host root@192.168.1.205 --verbos nixos-rebuild switch --flake .#sandbox --target-host root@sandbox.lab --verbose nixos-rebuild switch --flake .#monitoring --target-host root@monitor.lab --verbose nixos-rebuild switch --flake .#forgejo --target-host root@forgejo.lab --verbose -nixos-rebuild switch --flake .#dns --target-host root@dns.lab --verbose +nixos-rebuild switch --flake .#dns --target-host root@192.168.1.140 --verbose nixos-rebuild switch --flake .#keycloak --target-host root@keycloak.lab --verbose nixos-rebuild switch --flake .#mail --target-host root@mail.lab --verbose nixos-rebuild switch --flake .#media --target-host root@media.lab --verbose diff --git a/nixos/base.nix b/nixos/base.nix index 8b6959d..96a60c2 100644 --- a/nixos/base.nix +++ b/nixos/base.nix @@ -1,8 +1,4 @@ -{ - config, - pkgs, - ... -}: { +{ config, pkgs, ... }: { system.stateVersion = "25.05"; services.openssh.enable = true; diff --git a/nixos/flake.nix b/nixos/flake.nix index cbd955f..0195467 100644 --- a/nixos/flake.nix +++ b/nixos/flake.nix @@ -114,6 +114,9 @@ system = "x86_64-linux"; overlays = []; }; + + defaults = {pkgs, ...}: { + }; }; host-b = { diff --git a/nixos/hosts/dns/host.nix b/nixos/hosts/dns/host.nix index 55f9c24..d346b22 100644 --- a/nixos/hosts/dns/host.nix +++ b/nixos/hosts/dns/host.nix @@ -1,10 +1,6 @@ +{ config, pkgs, modulesPath, lib, ... }: + { - config, - pkgs, - modulesPath, - lib, - ... -}: { imports = [ ../../templates/base.nix ./networking.nix diff --git a/nixos/hosts/dns/networking.nix b/nixos/hosts/dns/networking.nix index 991d682..0942d99 100644 --- a/nixos/hosts/dns/networking.nix +++ b/nixos/hosts/dns/networking.nix @@ -2,20 +2,18 @@ networking.hostName = "dns"; # networking.useHostResolvConf = false; # networking.interfaces.eth0.useDHCP = true; - networking.interfaces.eth0.ipv4.addresses = [ - { - address = "192.168.1.53"; - prefixLength = 24; - } - ]; + networking.interfaces.eth0.ipv4.addresses = [{ + address = "192.168.1.53"; + prefixLength = 24; + }]; networking.defaultGateway = "192.168.1.1"; # your router - networking.nameservers = ["8.8.8.8"]; # fallback resolvers + networking.nameservers = [ "8.8.8.8" ]; # fallback resolvers - networking.firewall.allowedTCPPorts = [53 67 80 443]; - networking.firewall.allowedUDPPorts = [53 67]; + networking.firewall.allowedTCPPorts = [ 53 67 80 443 ]; + networking.firewall.allowedUDPPorts = [ 53 67 ]; networking.hosts = { - "192.168.1.53" = ["dns" "dns.lab"]; + "192.168.1.53" = [ "dns" "dns.lab" ]; }; } diff --git a/nixos/hosts/forgejo-runner/networking.nix b/nixos/hosts/forgejo-runner/networking.nix index 2a38bab..df98995 100644 --- a/nixos/hosts/forgejo-runner/networking.nix +++ b/nixos/hosts/forgejo-runner/networking.nix @@ -1,9 +1,4 @@ +{ config, lib, pkgs, runnerId, ... }: { - config, - lib, - pkgs, - runnerId, - ... -}: { networking.hostName = "forgejo-runner-${runnerId}"; } diff --git a/nixos/hosts/forgejo-runner/sops.nix b/nixos/hosts/forgejo-runner/sops.nix index fdddc2b..1cbbeaf 100644 --- a/nixos/hosts/forgejo-runner/sops.nix +++ b/nixos/hosts/forgejo-runner/sops.nix @@ -1,8 +1,5 @@ +{ config, lib, ... }: { - config, - lib, - ... -}: { sops.secrets."forgejo-runner-registration-token" = { sopsFile = ../../secrets/forgejo/runner-secrets.yml; mode = "0440"; diff --git a/nixos/hosts/forgejo/README.md b/nixos/hosts/forgejo/README.md new file mode 100644 index 0000000..8420219 --- /dev/null +++ b/nixos/hosts/forgejo/README.md @@ -0,0 +1,17 @@ +🥇 Phase 1: Git + Secrets + + ✅ Set up Forgejo VM (NixOS declarative) + + ✅ Set up sops-nix + age keys (can live in the Git repo) + + ✅ Push flake + ansible + secrets to Forgejo + + ✅ Write a basic README with how to rebuild infra + +🥈 Phase 2: GitOps + + 🔁 Add CI runner VM + + 🔁 Configure runner to deploy (nixos-rebuild or ansible-playbook) on commit + + 🔁 Optional: add webhooks to auto-trigger via Forgejo diff --git a/nixos/hosts/forgejo/forgejo.nix b/nixos/hosts/forgejo/forgejo.nix index 049673a..11ed29f 100644 --- a/nixos/hosts/forgejo/forgejo.nix +++ b/nixos/hosts/forgejo/forgejo.nix @@ -18,7 +18,7 @@ in { stateDir = "/srv/forgejo"; secrets = { mailer = { - PASSWD = config.sops.secrets.forgejo-mailer-password.path; + PASSWD = ; }; }; settings = { @@ -76,12 +76,12 @@ in { ALLOW_DEACTIVATE_ALL = false; }; - # oauth2 = { - # }; - # oauth2_client = { - # ENABLE_AUTO_REGISTRATION = true; - # UPDATE_AVATAR = true; - # }; + oauth2 = { + }; + oauth2_client = { + ENABLE_AUTO_REGISTRATION = true; + UPDATE_AVATAR = true; + }; # log = { # ROOT_PATH = "/var/log/forgejo"; # MODE = "file"; diff --git a/nixos/hosts/forgejo/host.nix b/nixos/hosts/forgejo/host.nix index 184e269..1883cca 100644 --- a/nixos/hosts/forgejo/host.nix +++ b/nixos/hosts/forgejo/host.nix @@ -1,10 +1,6 @@ +{ config, pkgs, modulesPath, lib, ... }: + { - config, - pkgs, - modulesPath, - lib, - ... -}: { imports = [ ../../templates/base.nix ../../secrets/shared-sops.nix diff --git a/nixos/hosts/forgejo/networking.nix b/nixos/hosts/forgejo/networking.nix index 7406cb2..6e9eb1c 100644 --- a/nixos/hosts/forgejo/networking.nix +++ b/nixos/hosts/forgejo/networking.nix @@ -1,8 +1,4 @@ +{ config, lib, pkgs, ... }: { - config, - lib, - pkgs, - ... -}: { networking.hostName = "forgejo"; } diff --git a/nixos/hosts/forgejo/sops.nix b/nixos/hosts/forgejo/sops.nix index 4d2b560..7e96908 100644 --- a/nixos/hosts/forgejo/sops.nix +++ b/nixos/hosts/forgejo/sops.nix @@ -1,6 +1,7 @@ let forgejoSops = ../../secrets/forgejo/secrets.yml; -in { +in +{ sops.secrets = { "forgejo-admin-password" = { sopsFile = forgejoSops; @@ -14,9 +15,5 @@ in { sopsFile = forgejoSops; owner = "forgejo"; }; - "forgejo-mailer-password" = { - sopsFile = forgejoSops; - owner = "forgejo"; - }; }; } diff --git a/nixos/hosts/monitoring/loki.nix b/nixos/hosts/monitoring/loki.nix index 63be626..8cd9cc7 100644 --- a/nixos/hosts/monitoring/loki.nix +++ b/nixos/hosts/monitoring/loki.nix @@ -1,5 +1,5 @@ { - networking.firewall.allowedTCPPorts = [3100]; + networking.firewall.allowedTCPPorts = [ 3100 ]; services.loki = { enable = true; diff --git a/nixos/hosts/monitoring/networking.nix b/nixos/hosts/monitoring/networking.nix index 4a1d647..a8fd74e 100644 --- a/nixos/hosts/monitoring/networking.nix +++ b/nixos/hosts/monitoring/networking.nix @@ -1,8 +1,4 @@ +{ config, lib, pkgs, ... }: { - config, - lib, - pkgs, - ... -}: { networking.hostName = "monitor"; } diff --git a/nixos/hosts/traefik/configuration/infra/routers.nix b/nixos/hosts/traefik/configuration/infra/routers.nix index 0207f92..67ed719 100644 --- a/nixos/hosts/traefik/configuration/infra/routers.nix +++ b/nixos/hosts/traefik/configuration/infra/routers.nix @@ -15,13 +15,6 @@ middlewares = []; }; - roundcube = { - rule = "Host(`roundcube.procopius.dk`)"; - service = "roundcube"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - forgejo = { rule = "Host(`git.procopius.dk`)"; service = "forgejo"; @@ -41,11 +34,10 @@ entryPoints = ["websecure"]; tls.certResolver = "letsencrypt"; }; - - caddy = { - rule = "PathPrefix(`/`)"; - service = "caddy"; - entryPoints = ["web"]; - priority = 15; + catchAll = { + rule = "HostRegexp(`.+`)"; + service = "nginx"; + entryPoints = ["websecure"]; + tls.certResolver = "letsencrypt"; }; } diff --git a/nixos/hosts/traefik/configuration/infra/services.nix b/nixos/hosts/traefik/configuration/infra/services.nix index 0f997a7..27de8c8 100644 --- a/nixos/hosts/traefik/configuration/infra/services.nix +++ b/nixos/hosts/traefik/configuration/infra/services.nix @@ -2,13 +2,12 @@ traefik.loadBalancer.servers = [{url = "http://localhost:8080";}]; mail-acme.loadBalancer.servers = [{url = "http://mail.lab:80";}]; - roundcube.loadBalancer.servers = [{url = "http://mail.lab:80";}]; forgejo.loadBalancer.servers = [{url = "http://forgejo.lab:3000";}]; proxmox.loadBalancer.servers = [{url = "https://192.168.1.205:8006";}]; proxmox.loadBalancer.serversTransport = "insecureTransport"; nas.loadBalancer.servers = [{url = "https://192.168.1.226:5001";}]; nas.loadBalancer.serversTransport = "insecureTransport"; - - caddy.loadBalancer.servers = [{url = "http://sandbox.lab:80";}]; + nginx.loadBalancer.servers = [{url = "https://192.168.1.226:4433";}]; + nginx.loadBalancer.serversTransport = "insecureTransport"; } diff --git a/nixos/hosts/traefik/configuration/photos/routers.nix b/nixos/hosts/traefik/configuration/photos/routers.nix index c0b94c9..65da5a3 100644 --- a/nixos/hosts/traefik/configuration/photos/routers.nix +++ b/nixos/hosts/traefik/configuration/photos/routers.nix @@ -32,52 +32,4 @@ entryPoints = ["websecure"]; tls.certResolver = "letsencrypt"; }; - - ente-minio = { - rule = "Host(`ente-minio.procopius.dk`)"; - service = "ente-minio"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - ente-minio-api = { - rule = "Host(`ente-minio-api.procopius.dk`)"; - service = "ente-minio-api"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - - ente-museum = { - rule = "Host(`ente-museum.procopius.dk`)"; - service = "ente-museum"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - - ente-photos = { - rule = "Host(`ente-photos.procopius.dk`) || Host(`ente-albums.procopius.dk`)"; - service = "ente-photos"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - - ente-cast = { - rule = "Host(`ente-cast.procopius.dk`) "; - service = "ente-cast"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - - ente-accounts = { - rule = "Host(`ente-accounts.procopius.dk`) "; - service = "ente-accounts"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; - - ente-auth = { - rule = "Host(`ente-auth.procopius.dk`) "; - service = "ente-auth"; - entryPoints = ["websecure"]; - tls.certResolver = "letsencrypt"; - }; } diff --git a/nixos/hosts/traefik/configuration/photos/services.nix b/nixos/hosts/traefik/configuration/photos/services.nix index 7857a55..7bec8af 100644 --- a/nixos/hosts/traefik/configuration/photos/services.nix +++ b/nixos/hosts/traefik/configuration/photos/services.nix @@ -4,12 +4,4 @@ account.loadBalancer.servers = [{url = "http://192.168.1.226:3001";}]; minio.loadBalancer.servers = [{url = "http://192.168.1.226:3201";}]; minio-api.loadBalancer.servers = [{url = "http://192.168.1.226:3200";}]; - - ente-minio.loadBalancer.servers = [{url = "http://photos.lab:9001";}]; - ente-minio-api.loadBalancer.servers = [{url = "http://photos.lab:9000";}]; - ente-museum.loadBalancer.servers = [{url = "http://photos.lab:8080";}]; - ente-photos.loadBalancer.servers = [{url = "http://photos.lab:3000";}]; - ente-accounts.loadBalancer.servers = [{url = "http://photos.lab:3001";}]; - ente-cast.loadBalancer.servers = [{url = "http://photos.lab:3004";}]; - ente-auth.loadBalancer.servers = [{url = "http://photos.lab:3003";}]; } diff --git a/nixos/hosts/traefik/networking.nix b/nixos/hosts/traefik/networking.nix index ac4e9b2..4c09486 100644 --- a/nixos/hosts/traefik/networking.nix +++ b/nixos/hosts/traefik/networking.nix @@ -1,19 +1,13 @@ -{ - config, - lib, - pkgs, - ... -}: { +{ config, lib, pkgs, ... }: { + networking.hostName = "traefik"; - networking.interfaces.eth0.ipv4.addresses = [ - { - address = "192.168.1.80"; - prefixLength = 24; - } - ]; + networking.interfaces.eth0.ipv4.addresses = [{ + address = "192.168.1.80"; + prefixLength = 24; + }]; - networking.firewall.allowedTCPPorts = [80 443 8080 8082]; + networking.firewall.allowedTCPPorts = [ 80 443 8080 8082 ]; - networking.nameservers = ["192.168.1.53"]; + networking.nameservers = [ "192.168.1.53" ]; networking.defaultGateway = "192.168.1.1"; } diff --git a/nixos/hosts/traefik/traefik.nix b/nixos/hosts/traefik/traefik.nix index 10b4e3e..139161f 100644 --- a/nixos/hosts/traefik/traefik.nix +++ b/nixos/hosts/traefik/traefik.nix @@ -50,41 +50,14 @@ in { staticConfigOptions = staticConfig; - dynamicConfigOptions = { - # HTTP configuration (your existing setup) - http = { - routers = allRouters; - services = allServices; - middlewares = middlewares; - serversTransports = { - insecureTransport = { - insecureSkipVerify = true; - }; - }; - }; + dynamicConfigOptions.http = { + routers = allRouters; + services = allServices; + middlewares = middlewares; - tcp = { - routers = { - caddy-fallback = { - rule = "HostSNI(`*`)"; # Matches any SNI - service = "caddy-tls"; - entryPoints = ["websecure"]; - priority = 1; # Lowest priority - only if no HTTP router matches - tls = { - passthrough = true; - }; - }; - }; - services = { - caddy-tls = { - loadBalancer = { - servers = [ - { - address = "sandbox.lab:443"; - } - ]; - }; - }; + serversTransports = { + insecureTransport = { + insecureSkipVerify = true; }; }; }; diff --git a/nixos/hosts/vpn/README.md b/nixos/hosts/vpn/README.md new file mode 100644 index 0000000..e76e6ef --- /dev/null +++ b/nixos/hosts/vpn/README.md @@ -0,0 +1,127 @@ +Great question — and you're absolutely right to ask. + +You **don’t need** Innernet or Headscale+patch *if* you're comfortable building a small self-hosted portal that handles: + +* ✅ OIDC login +* ✅ WireGuard peer key management +* ✅ Config generation and download + +So let’s break it down: + +--- + +## 🧩 Why *Innernet* and *Headscale+patch* Exist + +Those tools solve **user/device coordination**, dynamic routing, and access control *automatically*, **with batteries included**. They're great if you want a more managed, “plug-and-play” experience. Specifically: + +| Tool | Purpose | +| ------------------------ | ----------------------------------------------------------------- | +| **Innernet** | Full WireGuard-based mesh + OIDC login + per-device management | +| **Headscale** | Tailscale backend, enables user login + ACL + device coordination | +| **Headscale+OIDC patch** | Add OIDC login to Headscale (by default, only pre-shared keys) | + +These tools do things like: + +* Assign IPs/DNS names to users +* Regenerate keys +* Handle coordination between peers +* Gate access with identity + +--- + +## 🛠️ Why You *Don’t* Need Them (Your Custom Portal Idea) + +If you're fine with: + +* Managing WireGuard server and IPs manually +* Writing a small web service to issue `.conf` files after login +* Using firewall rules for access control + +Then you absolutely **can** build a custom system around **WireGuard + OIDC** like this: + +--- + +### 🧱 Architecture: WireGuard + OIDC + Config Generator + +``` + ┌──────────────┐ + │ Your OIDC │ + │ provider │◄────── Google, Authentik, Keycloak + └─────┬────────┘ + │ + (OIDC Login) + │ + ┌────────▼────────┐ + │ Web Portal │◄────── nginx + Go, Node.js, Flask, etc. + │ (self-hosted) │ + └──┬────────────┬─┘ + Gen Keys │ │ Serve config + & register │ └─────> `client.conf` / QR code + ▼ + ┌──────────────────────┐ + │ WireGuard server │ (NixOS) + │ - Handles all peers │ + └──────────────────────┘ +``` + +--- + +### 🔐 What Your Portal Needs to Do + +* Authenticate via OIDC (OAuth2 flow) +* Store a list of "authorized users" +* When a user logs in: + + * Check if they already have a WireGuard peer entry + * If not: generate a keypair, assign IP (e.g. `10.100.0.5`) + * Append to server config (`/etc/wireguard/wg0.conf`) or use `wg set` + * Return config file or QR code for the mobile client + +--- + +## ✅ Benefits of This DIY Approach + +* 💡 Full control: you own every step +* 🔐 OIDC is the gatekeeper (centrally managed access) +* ⚡ No extra daemon/software overhead (just WireGuard + your app) +* 🧩 Easily extensible (e.g., device limits, IP lease expiration) + +--- + +## ⚙️ Tools You Can Use + +| Purpose | Tools | +| ---------------------- | -------------------------------------------------------------------------- | +| OIDC Auth | [Dex](https://github.com/dexidp/dex), Keycloak, Authentik, or Firebase | +| Web Portal Backend | Node.js (Express), Go, Flask, FastAPI | +| Config Template Engine | Go templates, EJS, Jinja2 | +| WireGuard Integration | `wg`, `wg-quick`, or directly edit `wg0.conf` | +| NixOS Support | Use `networking.wireguard.interfaces.wg0.peers` to declaratively add peers | + +--- + +## 🧪 Bonus Features You Could Build + +* 🔐 Per-user firewall rules +* 🔁 One-time config URLs (`vpn.mydomain.com/invite/xyz`) +* 🔐 Optional Authelia/Keycloak MFA +* 📉 Dashboard with currently active peers (use `wg show`) + +--- + +## ✨ Summary + +| Option | You Need Innernet / Headscale? | Login via OIDC? | DNS Routing? | Complexity | +| ----------------- | ------------------------------ | --------------- | ------------ | ---------- | +| Your own portal | ❌ No | ✅ Yes | ❌ Manual | 🟡 Medium | +| Innernet | ✅ Yes | ✅ Yes | ✅ Built-in | 🟠 Medium | +| Headscale + patch | ✅ Yes | ✅ Yes | ✅ Built-in | 🔴 High | + +You’re on a great path by choosing the custom VPN portal idea. +Let me know if you'd like: + +* A **sample architecture repo** +* A **NixOS module to support peer configs** +* Help building the **login + config generator backend** + +I can generate a Nix flake and a working OIDC portal template to kickstart the project. diff --git a/nixos/hosts/warpgate/host.nix b/nixos/hosts/warpgate/host.nix new file mode 100644 index 0000000..e69de29 diff --git a/nixos/hosts/warpgate/warpgate.nix b/nixos/hosts/warpgate/warpgate.nix new file mode 100644 index 0000000..bc48f1e --- /dev/null +++ b/nixos/hosts/warpgate/warpgate.nix @@ -0,0 +1,14 @@ +{ + virtualisation.oci-containers.containers = { + warpgate = { + image = "ghcr.io/warp-tech/warpgate"; + ports = [ + "2222:2222" + "8888:8888" + ]; + volumes = [ + "/srv/warpgate/data:/data" + ]; + }; + }; +} diff --git a/nixos/modules/node-exporter.nix b/nixos/modules/node-exporter.nix index 11f335e..b394be6 100644 --- a/nixos/modules/node-exporter.nix +++ b/nixos/modules/node-exporter.nix @@ -1,20 +1,18 @@ -{ - config, - pkgs, - ... -}: let +{ config, pkgs, ... }: +let prometheus_exporter_port = 9100; -in { - networking.firewall.allowedTCPPorts = [prometheus_exporter_port]; +in +{ + networking.firewall.allowedTCPPorts = [ prometheus_exporter_port ]; services.prometheus = { exporters = { node = { enable = true; - enabledCollectors = ["systemd"]; + enabledCollectors = [ "systemd" ]; port = prometheus_exporter_port; # /nix/store/zgsw0yx18v10xa58psanfabmg95nl2bb-node_exporter-1.8.1/bin/node_exporter --help - extraFlags = ["--collector.ethtool" "--collector.softirqs" "--collector.tcpstat" "--collector.wifi"]; + extraFlags = [ "--collector.ethtool" "--collector.softirqs" "--collector.tcpstat" "--collector.wifi" ]; }; }; }; diff --git a/nixos/secrets/forgejo/secrets.yml b/nixos/secrets/forgejo/secrets.yml index 2b11e9e..94f993a 100644 --- a/nixos/secrets/forgejo/secrets.yml +++ b/nixos/secrets/forgejo/secrets.yml @@ -1,7 +1,6 @@ forgejo-admin-password: ENC[AES256_GCM,data:S05b/J9AK2SuIKDSWmtRf72C7V5FwMgZv/o5yxzNXRZEH2eIm18sC6+FEg==,iv:Ig/c4K9Io0S07Ywl4JQtbfxhjXJ7Rvea7+N4KhLUqjc=,tag:rx44tRuAbERBZR45QN6b9A==,type:str] forgejo-db-password: ENC[AES256_GCM,data:5YwRl6HNa1LzJgr73ArllG9s+vWCS7m/s6QQh5YUz8I0anG7GQ==,iv:5ARq3unUy2xbDcAFkucvEhjz/QYC2rYgutEo4T2bw2E=,tag:k7eHKqeA7k6XzksLVcnXRw==,type:str] forgejo-secret-key: ENC[AES256_GCM,data:iserDzOnJkM4HLP4c6rekSFANtRmEXwuCPyfMqo=,iv:3CNqN/DyS4PIl/iOO4JCpWJn3ARlb5KQSCNv5Orx2mo=,tag:q34jEpGrK2EKf0bcBznpQQ==,type:str] -forgejo-mailer-password: ENC[AES256_GCM,data:6mX8wB7RkiCj/43G4vttusOPogUifKua3Ozgch8ewz8=,iv:BxFIto7L0A8YhhmiRYwUFDy8PeXaghE2j9SQbZ1GaZQ=,tag:gB6/9lUrz0HeQUl536Vp4A==,type:str] sops: age: - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y @@ -13,7 +12,7 @@ sops: LzBHRWZXODVDZTE2WnVZOGNQckk4KzAKdm3xnA03JnQnc07yhVVtYkVYS6654Zm1 4AcLRSCcWvWrvp26XYVE2UGqU7acfxrTsk07o0nHAQpa5LjgJ4oFKw== -----END AGE ENCRYPTED FILE----- - lastmodified: "2025-07-25T10:22:17Z" - mac: ENC[AES256_GCM,data:JiqFsbC6rxk3Pmc0vqHwElfT3kXDLJwiBZS50xo/iyOgwyWbwf5sCNdn9CMFciDsDHfd8jRp8hYfdr7VaPFwc/Iec5cwHY23+lzat1hwOkmwEDdxW7pY4IVXZEWdBaeVrFInnvdLgJAOi+KecZ2BIx0iyMEQZUKs6exxSXB2/fE=,iv:LWv0XKSBPz35+pIur98+js3ETnFDOf6aEY67L2RGpHU=,tag:VzTG6zhHVHpbVDAc2266qQ==,type:str] + lastmodified: "2025-06-06T18:38:08Z" + mac: ENC[AES256_GCM,data:BvpIz6tfVSR3m1l7g4ilUyoTKKqirt+k6tPizxCsAgjztt0IyDCio+cLTln4P1tGSy/frjvbxy1mR3tIDkWn6aDFoYz/gnsbTKHSo/K5Q77jJ3uJffoB3/Wruigojl3EBIQHALicq9xhF8rsH/RKjpWqh+TrQwO+ibbA6ff76cw=,iv:Z0ZwJ9aPpI9MtbsZnvFkW7zsFFOMj5/Gv+tF/mal+yI=,tag:knf01NC/XwgjPUHH+8RpSg==,type:str] unencrypted_suffix: _unencrypted version: 3.10.2 diff --git a/nixos/users/plasmagoat.nix b/nixos/users/plasmagoat.nix index 2ef4d70..72685be 100644 --- a/nixos/users/plasmagoat.nix +++ b/nixos/users/plasmagoat.nix @@ -1,9 +1,4 @@ -{ - config, - lib, - pkgs, - ... -}: { +{ config, lib, pkgs, ... }: { users.users.plasmagoat = { isNormalUser = true; description = "plasmagoat"; diff --git a/pkgs/default.nix b/pkgs/default.nix index 657eca0..f5abee4 100644 --- a/pkgs/default.nix +++ b/pkgs/default.nix @@ -3,5 +3,4 @@ pkgs: { # example = pkgs.callPackage ./example { }; ente-web = pkgs.callPackage ./ente-web.nix {}; - homelab-docs = pkgs.callPackage ./homelab-docs {}; } diff --git a/pkgs/ente-web.nix b/pkgs/ente-web.nix index 0be16a2..8aa0fa6 100644 --- a/pkgs/ente-web.nix +++ b/pkgs/ente-web.nix @@ -9,7 +9,7 @@ nix-update-script, extraBuildEnv ? {}, # This package contains serveral sub-applications. This specifies which of them you want to build. - enteApp ? "auth", + enteApp ? "photos", # Accessing some apps (such as account) directly will result in a hardcoded redirect to ente.io. # To prevent users from accidentally logging in to ente.io instead of the selfhosted instance, you # can set this parameter to override these occurrences with your own url. Must include the schema. @@ -18,7 +18,7 @@ }: stdenv.mkDerivation (finalAttrs: { pname = "ente-web-${enteApp}"; - version = "1.1.57"; + version = "1.0.4"; src = fetchFromGitHub { owner = "ente-io"; @@ -26,13 +26,13 @@ stdenv.mkDerivation (finalAttrs: { sparseCheckout = ["web"]; tag = "photos-v${finalAttrs.version}"; fetchSubmodules = true; - hash = "sha256-SCkxGm/w0kES7wDuLBsUTgwrFYNLvLD51NyioAVTLrg="; # lib.fakeHash; + hash = "sha256-M1kAZgqjbWNn6LqymtWRmAk/v0vWEGbyS50lVrsr85o="; }; sourceRoot = "${finalAttrs.src.name}/web"; offlineCache = fetchYarnDeps { yarnLock = "${finalAttrs.src}/web/yarn.lock"; - hash = "sha256-FnLMXOpIVNOhaM7VjNEDlwpew9T/5Ch5eFed9tLpDsI="; + hash = "sha256-EYhYwy6+7bgWckU/7SfL1PREWw9JUgKxWadSVtoZwXs="; }; nativeBuildInputs = [ diff --git a/pkgs/homelab-docs/default.nix b/pkgs/homelab-docs/default.nix deleted file mode 100644 index 30dc69e..0000000 --- a/pkgs/homelab-docs/default.nix +++ /dev/null @@ -1,50 +0,0 @@ -# homelab-docs.nix - Main documentation generator package -{ - lib, - stdenv, - writeShellScriptBin, - jq, - nixfmt, -}: let - # Import individual CLI generators - docsGenerator = import ./main.nix {inherit writeShellScriptBin;}; - fleetDocsGenerator = import ./fleet.nix {inherit writeShellScriptBin jq;}; - nodeDocsGenerator = import ./nodes.nix {inherit writeShellScriptBin jq;}; - serviceDocsGenerator = import ./services.nix {inherit writeShellScriptBin jq;}; - deploymentDocsGenerator = import ./deployment.nix {inherit writeShellScriptBin jq;}; - readmeGenerator = import ./readme.nix {inherit writeShellScriptBin jq;}; -in - stdenv.mkDerivation { - pname = "homelab-docs"; - version = "1.0.0"; - - dontUnpack = true; - dontBuild = true; - - installPhase = '' - mkdir -p $out/bin - - # Install all the generators - cp ${docsGenerator}/bin/homelab-generate-docs $out/bin/ - cp ${fleetDocsGenerator}/bin/homelab-docs-fleet $out/bin/ - cp ${nodeDocsGenerator}/bin/homelab-docs-nodes $out/bin/ - cp ${serviceDocsGenerator}/bin/homelab-docs-services $out/bin/ - cp ${deploymentDocsGenerator}/bin/homelab-docs-deployment $out/bin/ - cp ${readmeGenerator}/bin/homelab-docs-readme $out/bin/ - - # Make sure they're executable - chmod +x $out/bin/* - ''; - - meta = with lib; { - description = "Documentation generator for homelab colmena deployments"; - longDescription = '' - A collection of tools to generate comprehensive documentation - for homelab deployments managed with colmena. Extracts configuration - from flakes and generates markdown documentation. - ''; - license = licenses.mit; - maintainers = []; - platforms = platforms.all; - }; - } diff --git a/pkgs/homelab-docs/deployment.nix b/pkgs/homelab-docs/deployment.nix deleted file mode 100644 index 8d7e18a..0000000 --- a/pkgs/homelab-docs/deployment.nix +++ /dev/null @@ -1,89 +0,0 @@ -# homelab-docs-deployment.nix - Deployment documentation generator CLI -{ - writeShellScriptBin, - jq, -}: -writeShellScriptBin "homelab-docs-deployment" '' - #!/usr/bin/env bash - set -euo pipefail - - cat << EOF - # Current Deployment State - - > Current homelab deployment configuration - > - > Generated on: $(date -R) - > Working directory: $(pwd) - - ## Deployment Summary - - EOF - - # Get deployment summary - deployment_summary=$(colmena eval -E '{ nodes, pkgs, lib, ... }: - let - homelabNodes = lib.filterAttrs (name: node: node.config.homelab.enable or false) nodes; - allServices = lib.flatten (lib.mapAttrsToList (nodeName: node: - lib.attrNames (lib.filterAttrs (n: v: v.enable or false) (node.config.homelab.services or {})) - ) homelabNodes); - in { - totalNodes = lib.length (lib.attrNames nodes); - homelabEnabledNodes = lib.length (lib.attrNames homelabNodes); - uniqueServices = lib.length (lib.unique allServices); - totalServiceInstances = lib.length allServices; - nodeNames = lib.attrNames nodes; - homelabNodeNames = lib.attrNames homelabNodes; - }') - - total_nodes=$(echo "$deployment_summary" | ${jq}/bin/jq -r '.totalNodes') - homelab_nodes=$(echo "$deployment_summary" | ${jq}/bin/jq -r '.homelabEnabledNodes') - unique_services=$(echo "$deployment_summary" | ${jq}/bin/jq -r '.uniqueServices') - service_instances=$(echo "$deployment_summary" | ${jq}/bin/jq -r '.totalServiceInstances') - - echo "| Metric | Count |" - echo "|--------|-------|" - echo "| Total Nodes | $total_nodes |" - echo "| Homelab-Enabled Nodes | $homelab_nodes |" - echo "| Unique Services | $unique_services |" - echo "| Service Instances | $service_instances |" - echo - - echo "## Node Status" - echo - - # Get detailed node status - node_status=$(colmena eval -E '{ nodes, pkgs, lib, ... }: - lib.mapAttrs (name: node: { - homelabEnabled = node.config.homelab.enable or false; - environment = node.config.homelab.environment or "unknown"; - serviceCount = if (node.config.homelab.enable or false) then - lib.length (lib.attrNames (lib.filterAttrs (n: v: v.enable or false) (node.config.homelab.services or {}))) - else 0; - monitoringEnabled = if (node.config.homelab.enable or false) then - node.config.homelab.monitoring.enable or false - else false; - backupsEnabled = if (node.config.homelab.enable or false) then - node.config.homelab.backups.enable or false - else false; - proxyEnabled = if (node.config.homelab.enable or false) then - node.config.homelab.reverseProxy.enable or false - else false; - }) nodes') - - echo "| Node | Homelab | Environment | Services | Monitoring | Backups | Proxy |" - echo "|------|---------|-------------|----------|------------|---------|-------|" - - echo "$node_status" | ${jq}/bin/jq -r 'to_entries[] | - [.key, (.value.homelabEnabled | if . then "✅" else "❌" end), .value.environment, (.value.serviceCount | tostring), - (.value.monitoringEnabled | if . then "✅" else "❌" end), - (.value.backupsEnabled | if . then "✅" else "❌" end), - (.value.proxyEnabled | if . then "✅" else "❌" end)] | - @tsv' | while IFS=$'\t' read -r node homelab env services monitoring backups proxy; do - echo "| \`$node\` | $homelab | $env | $services | $monitoring | $backups | $proxy |" - done - - echo - echo "---" - echo - echo "*Deployment state extracted from live colmena configuration*" -'' diff --git a/pkgs/homelab-docs/fleet.nix b/pkgs/homelab-docs/fleet.nix deleted file mode 100644 index fc14680..0000000 --- a/pkgs/homelab-docs/fleet.nix +++ /dev/null @@ -1,91 +0,0 @@ -{ - writeShellScriptBin, - jq, -}: -writeShellScriptBin "homelab-docs-fleet" '' - #!/usr/bin/env bash - set -euo pipefail - - cat << EOF - # Homelab Fleet Overview - - > Auto-generated fleet overview - > - > Generated on: $(date -R) - > Source: $(pwd) - - ## Fleet Statistics - - EOF - - # Get basic fleet stats - echo "### Basic Information" - echo - - fleet_stats=$(colmena eval -E '{ nodes, pkgs, lib, ... }: { - totalNodes = lib.length (lib.attrNames nodes); - nodeNames = lib.attrNames nodes; - }') - - total_nodes=$(echo "$fleet_stats" | ${jq}/bin/jq -r '.totalNodes') - node_names=$(echo "$fleet_stats" | ${jq}/bin/jq -r '.nodeNames[]' | tr '\n' ' ') - - echo "| Metric | Value |" - echo "|--------|-------|" - echo "| Total Nodes | $total_nodes |" - echo "| Node Names | $node_names |" - echo - - # Get homelab-enabled nodes - echo "### Homelab Configuration" - echo - - homelab_info=$(colmena eval -E '{ nodes, pkgs, lib, ... }: { - homelabNodes = lib.mapAttrs (name: node: { - enabled = node.config.homelab.enable or false; - hostname = node.config.homelab.hostname or null; - environment = node.config.homelab.environment or null; - }) nodes; - }') - - echo "| Node | Homelab Enabled | Hostname | Environment |" - echo "|------|----------------|----------|-------------|" - - echo "$homelab_info" | ${jq}/bin/jq -r '.homelabNodes | to_entries[] | - [.key, (.value.enabled | tostring), (.value.hostname // "N/A"), (.value.environment // "N/A")] | - @tsv' | while IFS=$'\t' read -r node enabled hostname environment; do - enabled_icon=$(if [[ "$enabled" == "true" ]]; then echo "✅"; else echo "❌"; fi) - echo "| \`$node\` | $enabled_icon | $hostname | $environment |" - done - - echo - - # Get service distribution - echo "### Service Distribution" - echo - - service_info=$(colmena eval -E '{ nodes, pkgs, lib, ... }: - lib.mapAttrs (name: node: - if (node.config.homelab.enable or false) then { - serviceCount = lib.length (lib.attrNames (lib.filterAttrs (n: v: v.enable or false) (node.config.homelab.services or {}))); - serviceNames = lib.attrNames (lib.filterAttrs (n: v: v.enable or false) (node.config.homelab.services or {})); - } else { - serviceCount = 0; - serviceNames = []; - } - ) nodes') - - echo "| Node | Service Count | Services |" - echo "|------|---------------|----------|" - - echo "$service_info" | ${jq}/bin/jq -r 'to_entries[] | - [.key, (.value.serviceCount | tostring), (.value.serviceNames | join(", "))] | - @tsv' | while IFS=$'\t' read -r node count services; do - echo "| \`$node\` | $count | $services |" - done - - echo - echo "---" - echo - echo "*Fleet overview generated from colmena evaluation*" -'' diff --git a/pkgs/homelab-docs/main.nix b/pkgs/homelab-docs/main.nix deleted file mode 100644 index e3389d2..0000000 --- a/pkgs/homelab-docs/main.nix +++ /dev/null @@ -1,73 +0,0 @@ -{writeShellScriptBin}: -writeShellScriptBin "homelab-generate-docs" '' - #!/usr/bin/env bash - set -euo pipefail - - # Colors - BLUE='\033[0;34m' - GREEN='\033[0;32m' - YELLOW='\033[1;33m' - RED='\033[0;31m' - NC='\033[0m' - - info() { echo -e "''${BLUE}$1''${NC}"; } - success() { echo -e "''${GREEN}$1''${NC}"; } - warn() { echo -e "''${YELLOW}$1''${NC}"; } - error() { echo -e "''${RED}$1''${NC}"; } - - # Configuration - DOCS_DIR="''${1:-./docs}" - - info "📚 Generating homelab documentation..." - echo " Output directory: $DOCS_DIR" - echo - - # Check if we're in a directory with a flake - if [[ ! -f flake.nix ]]; then - error "No flake.nix found in current directory" - echo "Please run this command from your homelab flake directory" - exit 1 - fi - - # Check if colmena is available - if ! command -v colmena >/dev/null 2>&1; then - error "colmena command not found." - echo "Please ensure colmena is available in your environment" - exit 1 - fi - - # Create docs directory - mkdir -p "$DOCS_DIR" - - # Generate fleet overview - info " 🌐 Generating fleet overview..." - homelab-docs-fleet > "$DOCS_DIR/fleet-overview.md" - - # Generate node documentation - info " 🖥️ Generating node configurations..." - homelab-docs-nodes > "$DOCS_DIR/nodes.md" - - # Generate service documentation - info " ⚙️ Generating service configurations..." - homelab-docs-services > "$DOCS_DIR/services.md" - - # Generate current deployment - info " 🏠 Generating current deployment..." - homelab-docs-deployment > "$DOCS_DIR/current-deployment.md" - - # Generate README - info " 📋 Generating README..." - homelab-docs-readme > "$DOCS_DIR/README.md" - - success "✅ Documentation generated successfully!" - echo - echo "Generated files:" - echo " 🌐 fleet-overview.md - Fleet statistics and overview" - echo " 🖥️ nodes.md - Per-node configurations" - echo " ⚙️ services.md - Service configurations" - echo " 🏠 current-deployment.md - Current deployment state" - echo " 📋 README.md - Documentation index" - echo - echo "💡 Tip: Add these files to your repository and set up GitHub Actions" - echo " to automatically regenerate documentation on changes!" -'' diff --git a/pkgs/homelab-docs/nodes.nix b/pkgs/homelab-docs/nodes.nix deleted file mode 100644 index 7ea32fd..0000000 --- a/pkgs/homelab-docs/nodes.nix +++ /dev/null @@ -1,123 +0,0 @@ -{ - writeShellScriptBin, - jq, -}: -writeShellScriptBin "homelab-docs-nodes" '' - #!/usr/bin/env bash - set -euo pipefail - - cat << EOF - # Node Configurations - - > Detailed per-node configuration - > - > Generated on: $(date -R) - - EOF - - # Get all node information - node_info=$(colmena eval -E '{ nodes, pkgs, lib, ... }: - lib.mapAttrs (name: node: { - # Basic system info - nixosVersion = node.config.system.nixos.version; - hostName = node.config.networking.hostName; - system = node.config.nixpkgs.system; - - # Homelab config (safe extraction) - homelab = if (node.config.homelab.enable or false) then { - enabled = true; - hostname = node.config.homelab.hostname or null; - domain = node.config.homelab.domain or null; - externalDomain = node.config.homelab.externalDomain or null; - environment = node.config.homelab.environment or null; - location = node.config.homelab.location or null; - tags = node.config.homelab.tags or []; - } else { - enabled = false; - }; - - # Services (safe extraction) - services = if (node.config.homelab.enable or false) then - lib.mapAttrs (svcName: svc: { - enabled = svc.enable or false; - port = svc.port or null; - description = svc.description or svcName; - tags = svc.tags or []; - }) (node.config.homelab.services or {}) - else {}; - }) nodes') - - echo "$node_info" | ${jq}/bin/jq -r 'to_entries[] | .key' | while read -r node; do - echo "## Node: $node" - echo - - # Basic system information - echo "### System Information" - echo - - nixos_version=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].nixosVersion") - hostname=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].hostName") - system=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].system") - - echo "| Property | Value |" - echo "|----------|-------|" - echo "| NixOS Version | \`$nixos_version\` |" - echo "| Hostname | \`$hostname\` |" - echo "| System | \`$system\` |" - echo - - # Homelab configuration - homelab_enabled=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.enabled") - - if [[ "$homelab_enabled" == "true" ]]; then - echo "### Homelab Configuration" - echo - - hl_hostname=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.hostname // \"N/A\"") - hl_domain=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.domain // \"N/A\"") - hl_external=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.externalDomain // \"N/A\"") - hl_env=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.environment // \"N/A\"") - hl_location=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.location // \"N/A\"") - hl_tags=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].homelab.tags | join(\", \")") - - echo "| Property | Value |" - echo "|----------|-------|" - echo "| Homelab Hostname | \`$hl_hostname\` |" - echo "| Domain | \`$hl_domain\` |" - echo "| External Domain | \`$hl_external\` |" - echo "| Environment | \`$hl_env\` |" - echo "| Location | \`$hl_location\` |" - echo "| Tags | $hl_tags |" - echo - - # Services - echo "### Services" - echo - - services_data=$(echo "$node_info" | ${jq}/bin/jq -r ".[\"$node\"].services") - service_count=$(echo "$services_data" | ${jq}/bin/jq 'length') - - if [[ "$service_count" -gt 0 ]]; then - echo "| Service | Enabled | Port | Description | Tags |" - echo "|---------|---------|------|-------------|------|" - - echo "$services_data" | ${jq}/bin/jq -r 'to_entries[] | - [.key, (.value.enabled | tostring), (.value.port // "N/A" | tostring), (.value.description // "N/A"), (.value.tags | join(", "))] | - @tsv' | while IFS=$'\t' read -r service enabled port description tags; do - enabled_icon=$(if [[ "$enabled" == "true" ]]; then echo "✅"; else echo "❌"; fi) - echo "| \`$service\` | $enabled_icon | $port | $description | $tags |" - done - else - echo "No services configured." - fi - else - echo "### Homelab Configuration" - echo - echo "❌ Homelab is not enabled on this node." - fi - - echo - echo "---" - echo - done -'' diff --git a/pkgs/homelab-docs/readme.nix b/pkgs/homelab-docs/readme.nix deleted file mode 100644 index ea72b8f..0000000 --- a/pkgs/homelab-docs/readme.nix +++ /dev/null @@ -1,124 +0,0 @@ -# homelab-docs-readme.nix - README generator CLI -{ - writeShellScriptBin, - jq, -}: -writeShellScriptBin "homelab-docs-readme" '' - #!/usr/bin/env bash - set -euo pipefail - - cat << EOF - # Homelab Documentation - - > Auto-generated documentation for the homelab deployment - > - > Generated on: $(date -R) - > Source: $(pwd) - - ## 📚 Documentation Files - - This documentation is automatically generated from your colmena flake configuration. - - ### 📊 Overview Documents - - **[Fleet Overview](fleet-overview.md)** - High-level fleet statistics and service distribution - - **[Current Deployment](current-deployment.md)** - Current deployment state and node status - - ### 📖 Detailed Configuration - - **[Node Configurations](nodes.md)** - Per-node detailed configuration and services - - **[Service Configurations](services.md)** - Service configurations across the fleet - - ## 🚀 Quick Actions - - ### View Current Status - \`\`\`bash - # Service status across fleet (if homelab CLI is available) - homelab services --global - - # Backup status - homelab backups --global - - # Overall status - homelab status - \`\`\` - - ### Update Documentation - \`\`\`bash - # Regenerate all documentation - homelab-generate-docs ./docs - - # Generate in different directory - homelab-generate-docs /path/to/output - \`\`\` - - ## 📋 Quick Stats - - EOF - - # Add live stats - quick_stats=$(colmena eval -E '{ nodes, pkgs, lib, ... }: - let - homelabNodes = lib.filterAttrs (name: node: node.config.homelab.enable or false) nodes; - in { - totalNodes = lib.length (lib.attrNames nodes); - homelabNodes = lib.length (lib.attrNames homelabNodes); - }') - - total_nodes=$(echo "$quick_stats" | ${jq}/bin/jq -r '.totalNodes') - homelab_nodes=$(echo "$quick_stats" | ${jq}/bin/jq -r '.homelabNodes') - - echo "- **Total Nodes**: $total_nodes" - echo "- **Homelab-Enabled Nodes**: $homelab_nodes" - echo "- **Generated**: $(date -R)" - echo - echo "## 🛠️ Management Tools" - echo - echo "### Documentation Commands" - echo "- \`homelab-generate-docs\` - Regenerate this documentation" - echo "- \`homelab-docs-fleet\` - Generate fleet overview only" - echo "- \`homelab-docs-nodes\` - Generate node configurations only" - echo "- \`homelab-docs-services\` - Generate service configurations only" - echo "- \`homelab-docs-deployment\` - Generate deployment state only" - echo - echo "### Colmena Commands" - echo "- \`colmena eval\` - Evaluate flake expressions" - echo "- \`colmena apply\` - Deploy configuration changes" - echo "- \`colmena build\` - Build configurations without deploying" - echo - echo "## 🎯 Integration with CI/CD" - echo - echo "### GitHub Actions Example" - echo - echo "\`\`\`yaml" - echo "name: Generate Documentation" - echo "on:" - echo " push:" - echo " branches: [ main ]" - echo "" - echo "jobs:" - echo " docs:" - echo " runs-on: ubuntu-latest" - echo " steps:" - echo " - uses: actions/checkout@v4" - echo " - uses: cachix/install-nix-action@v24" - echo " - name: Generate docs" - echo " run: nix develop --command homelab-generate-docs ./docs" - echo " - name: Commit docs" - echo " run: |" - echo " git add docs/" - echo " git commit -m \"docs: update homelab documentation\" || exit 0" - echo " git push" - echo "\`\`\`" - echo - echo "### Manual Generation" - echo - echo "\`\`\`bash" - echo "# From your homelab directory" - echo "nix develop" - echo "homelab-generate-docs ./docs" - echo "git add docs/ && git commit -m \"Update docs\"" - echo "\`\`\`" - echo - echo "---" - echo - echo "*This documentation reflects the live state of your homelab deployment as evaluated by colmena.*" -'' diff --git a/pkgs/homelab-docs/services.nix b/pkgs/homelab-docs/services.nix deleted file mode 100644 index 2fed267..0000000 --- a/pkgs/homelab-docs/services.nix +++ /dev/null @@ -1,384 +0,0 @@ -{ - writeShellScriptBin, - jq, -}: -writeShellScriptBin "homelab-docs-services" '' - #!/usr/bin/env bash - set -euo pipefail - - cat << EOF - # Service Catalog - - > Complete service documentation with core options, feature integrations, and smart defaults - > - > Generated on: $(date -R) - - This document provides comprehensive documentation for homelab services, organized by: - - **Core Service Options**: The main service configuration - - **Feature Integrations**: Available monitoring, logging, and proxy features - - **Service Defaults**: What this service configures by default for each feature - - EOF - - # Extract comprehensive service information - echo "Extracting service information..." >&2 - services_catalog=$(colmena eval -E ' - { nodes, pkgs, lib, ... }: - let - # Helper to extract option information - extractOptions = path: options: - lib.flatten (lib.mapAttrsToList (name: value: - let - currentPath = path ++ [name]; - pathStr = lib.concatStringsSep "." currentPath; - in - if (value._type or null) == "option" then - [{ - name = pathStr; - type = value.type.description or "unknown"; - default = value.default or null; - defaultText = if value ? defaultText then value.defaultText.text or null else null; - description = value.description or "No description"; - readOnly = value.readOnly or false; - }] - else if lib.isAttrs value && !(lib.hasAttr "_type" value) then - extractOptions currentPath value - else [] - ) options); - - # Get first node for option definitions - firstNode = lib.head (lib.attrValues nodes); - homelabServices = firstNode.options.homelab.services or {}; - - # Process each service - serviceInfo = lib.mapAttrs (serviceName: serviceOptions: - let - allOptions = extractOptions [] serviceOptions; - - # Separate core options from feature options - coreOptions = lib.filter (opt: - !(lib.hasPrefix "monitoring." opt.name) && - !(lib.hasPrefix "logging." opt.name) && - !(lib.hasPrefix "proxy." opt.name) - ) allOptions; - - monitoringOptions = lib.filter (opt: lib.hasPrefix "monitoring." opt.name) allOptions; - loggingOptions = lib.filter (opt: lib.hasPrefix "logging." opt.name) allOptions; - proxyOptions = lib.filter (opt: lib.hasPrefix "proxy." opt.name) allOptions; - - # Get actual service configuration to see what defaults are set - serviceConfigs = lib.mapAttrs (nodeName: node: - let - serviceConfig = node.config.homelab.services.''${serviceName} or null; - in - if serviceConfig != null then { - exists = true; - enabled = serviceConfig.enable or false; - # Extract the computed configuration values - monitoring = serviceConfig.monitoring or {}; - logging = serviceConfig.logging or {}; - proxy = serviceConfig.proxy or {}; - # Get other core options - coreConfig = removeAttrs serviceConfig ["monitoring" "logging" "proxy"]; - } else { - exists = false; - } - ) nodes; - - # Find a node where this service exists to get default values - nodeWithService = lib.findFirst (nodeName: serviceConfigs.''${nodeName}.exists) null (lib.attrNames nodes); - exampleConfig = if nodeWithService != null then serviceConfigs.''${nodeWithService} else null; - - in { - inherit serviceName; - coreOptions = coreOptions; - features = { - monitoring = { - available = monitoringOptions != []; - options = monitoringOptions; - defaults = if exampleConfig != null then exampleConfig.monitoring else {}; - }; - logging = { - available = loggingOptions != []; - options = loggingOptions; - defaults = if exampleConfig != null then exampleConfig.logging else {}; - }; - proxy = { - available = proxyOptions != []; - options = proxyOptions; - defaults = if exampleConfig != null then exampleConfig.proxy else {}; - }; - }; - deployment = { - totalNodes = lib.length (lib.filter (cfg: cfg.exists) (lib.attrValues serviceConfigs)); - enabledNodes = lib.length (lib.filter (cfg: cfg.exists && cfg.enabled) (lib.attrValues serviceConfigs)); - }; - } - ) homelabServices; - - in { - services = serviceInfo; - totalServices = lib.length (lib.attrNames serviceInfo); - } - ') - - total_services=$(echo "$services_catalog" | ${jq}/bin/jq -r '.totalServices') - - echo "## Overview" - echo - echo "**Total Available Services:** $total_services" - echo - - # Service matrix - echo "## Service Integration Matrix" - echo - echo "| Service | Core Options | Monitoring | Logging | Proxy | Deployments |" - echo "|---------|--------------|------------|---------|-------|-------------|" - - echo "$services_catalog" | ${jq}/bin/jq -r '.services | keys[]' | sort | while read -r service; do - service_data=$(echo "$services_catalog" | ${jq}/bin/jq -r ".services[\"$service\"]") - - core_count=$(echo "$service_data" | ${jq}/bin/jq -r '.coreOptions | length') - has_monitoring=$(echo "$service_data" | ${jq}/bin/jq -r '.features.monitoring.available') - has_logging=$(echo "$service_data" | ${jq}/bin/jq -r '.features.logging.available') - has_proxy=$(echo "$service_data" | ${jq}/bin/jq -r '.features.proxy.available') - enabled_deployments=$(echo "$service_data" | ${jq}/bin/jq -r '.deployment.enabledNodes') - - monitoring_icon=$(if [[ "$has_monitoring" == "true" ]]; then echo "📊"; else echo "❌"; fi) - logging_icon=$(if [[ "$has_logging" == "true" ]]; then echo "📝"; else echo "❌"; fi) - proxy_icon=$(if [[ "$has_proxy" == "true" ]]; then echo "🔀"; else echo "❌"; fi) - - echo "| \`$service\` | $core_count | $monitoring_icon | $logging_icon | $proxy_icon | $enabled_deployments |" - done - - echo - echo "**Legend:** 📊📝🔀 = Feature available, ❌ = Feature not available" - echo - - echo "## Service Documentation" - echo - - # Process each service - echo "$services_catalog" | ${jq}/bin/jq -r '.services | keys[]' | sort | while read -r service; do - echo "### $service" - echo - - service_data=$(echo "$services_catalog" | ${jq}/bin/jq -r ".services[\"$service\"]") - enabled_deployments=$(echo "$service_data" | ${jq}/bin/jq -r '.deployment.enabledNodes') - total_deployments=$(echo "$service_data" | ${jq}/bin/jq -r '.deployment.totalNodes') - - if [[ "$total_deployments" -gt 0 ]]; then - echo "**Deployment Status:** $enabled_deployments/$total_deployments nodes have this service enabled" - else - echo "**Deployment Status:** Available but not configured" - fi - echo - - # Core Service Configuration - echo "#### Core Service Options" - echo - echo "The main configuration options for $service:" - echo - echo '```nix' - echo "homelab.services.$service = {" - - echo "$service_data" | ${jq}/bin/jq -r '.coreOptions[] | @base64' | while IFS= read -r option_b64; do - option=$(echo "$option_b64" | base64 -d) - - name=$(echo "$option" | ${jq}/bin/jq -r '.name') - type=$(echo "$option" | ${jq}/bin/jq -r '.type') - default_val=$(echo "$option" | ${jq}/bin/jq -r '.default') - description=$(echo "$option" | ${jq}/bin/jq -r '.description') - read_only=$(echo "$option" | ${jq}/bin/jq -r '.readOnly') - - if [[ "$read_only" == "true" ]]; then - continue - fi - - clean_description=$(echo "$description" | sed 's/"/\\"/g' | tr -d $'\n\r') - - if [[ "$default_val" == "null" ]]; then - echo " # $name = <$type>; # $clean_description" - else - echo " $name = $default_val; # $clean_description" - fi - done - - echo "};" - echo '```' - echo - - # Feature Integrations - has_monitoring=$(echo "$service_data" | ${jq}/bin/jq -r '.features.monitoring.available') - has_logging=$(echo "$service_data" | ${jq}/bin/jq -r '.features.logging.available') - has_proxy=$(echo "$service_data" | ${jq}/bin/jq -r '.features.proxy.available') - - if [[ "$has_monitoring" == "true" || "$has_logging" == "true" || "$has_proxy" == "true" ]]; then - echo "#### Feature Integrations" - echo - - # Monitoring Feature - if [[ "$has_monitoring" == "true" ]]; then - echo "##### 📊 Monitoring Integration" - echo - echo "Available monitoring options:" - echo - echo '```nix' - echo "homelab.services.$service = {" - echo " # ... core options above ..." - echo - - echo "$service_data" | ${jq}/bin/jq -r '.features.monitoring.options[] | @base64' | while IFS= read -r option_b64; do - option=$(echo "$option_b64" | base64 -d) - - name=$(echo "$option" | ${jq}/bin/jq -r '.name') - type=$(echo "$option" | ${jq}/bin/jq -r '.type') - default_val=$(echo "$option" | ${jq}/bin/jq -r '.default') - description=$(echo "$option" | ${jq}/bin/jq -r '.description') - read_only=$(echo "$option" | ${jq}/bin/jq -r '.readOnly') - - if [[ "$read_only" == "true" ]]; then - continue - fi - - clean_description=$(echo "$description" | sed 's/"/\\"/g' | tr -d $'\n\r') - - if [[ "$default_val" == "null" ]]; then - echo " # $name = <$type>; # $clean_description" - else - echo " $name = $default_val; # $clean_description" - fi - done - - echo "};" - echo '```' - - # Show service-specific monitoring defaults - monitoring_defaults=$(echo "$service_data" | ${jq}/bin/jq -r '.features.monitoring.defaults') - if [[ "$monitoring_defaults" != "{}" && "$monitoring_defaults" != "null" ]]; then - echo - echo "**$service sets these monitoring defaults:**" - echo '```nix' - echo "$monitoring_defaults" | ${jq}/bin/jq -r 'to_entries[] | " \(.key) = \(.value);"' - echo '```' - fi - echo - fi - - # Logging Feature - if [[ "$has_logging" == "true" ]]; then - echo "##### 📝 Logging Integration" - echo - echo "Available logging options:" - echo - echo '```nix' - echo "homelab.services.$service = {" - echo " # ... core options above ..." - echo - - echo "$service_data" | ${jq}/bin/jq -r '.features.logging.options[] | @base64' | while IFS= read -r option_b64; do - option=$(echo "$option_b64" | base64 -d) - - name=$(echo "$option" | ${jq}/bin/jq -r '.name') - type=$(echo "$option" | ${jq}/bin/jq -r '.type') - default_val=$(echo "$option" | ${jq}/bin/jq -r '.default') - description=$(echo "$option" | ${jq}/bin/jq -r '.description') - read_only=$(echo "$option" | ${jq}/bin/jq -r '.readOnly') - - if [[ "$read_only" == "true" ]]; then - continue - fi - - clean_description=$(echo "$description" | sed 's/"/\\"/g' | tr -d $'\n\r') - - if [[ "$default_val" == "null" ]]; then - echo " # $name = <$type>; # $clean_description" - else - echo " $name = $default_val; # $clean_description" - fi - done - - echo "};" - echo '```' - - # Show service-specific logging defaults - logging_defaults=$(echo "$service_data" | ${jq}/bin/jq -r '.features.logging.defaults') - if [[ "$logging_defaults" != "{}" && "$logging_defaults" != "null" ]]; then - echo - echo "**$service sets these logging defaults:**" - echo '```nix' - echo "$logging_defaults" | ${jq}/bin/jq -r 'to_entries[] | " \(.key) = \(.value);"' - echo '```' - fi - echo - fi - - # Proxy Feature - if [[ "$has_proxy" == "true" ]]; then - echo "##### 🔀 Proxy Integration" - echo - echo "Available proxy options:" - echo - echo '```nix' - echo "homelab.services.$service = {" - echo " # ... core options above ..." - echo - - echo "$service_data" | ${jq}/bin/jq -r '.features.proxy.options[] | @base64' | while IFS= read -r option_b64; do - option=$(echo "$option_b64" | base64 -d) - - name=$(echo "$option" | ${jq}/bin/jq -r '.name') - type=$(echo "$option" | ${jq}/bin/jq -r '.type') - default_val=$(echo "$option" | ${jq}/bin/jq -r '.default') - description=$(echo "$option" | ${jq}/bin/jq -r '.description') - read_only=$(echo "$option" | ${jq}/bin/jq -r '.readOnly') - - if [[ "$read_only" == "true" ]]; then - continue - fi - - clean_description=$(echo "$description" | sed 's/"/\\"/g' | tr -d $'\n\r') - - if [[ "$default_val" == "null" ]]; then - echo " # $name = <$type>; # $clean_description" - else - echo " $name = $default_val; # $clean_description" - fi - done - - echo "};" - echo '```' - - # Show service-specific proxy defaults - proxy_defaults=$(echo "$service_data" | ${jq}/bin/jq -r '.features.proxy.defaults') - if [[ "$proxy_defaults" != "{}" && "$proxy_defaults" != "null" ]]; then - echo - echo "**$service sets these proxy defaults:**" - echo '```nix' - echo "$proxy_defaults" | ${jq}/bin/jq -r 'to_entries[] | " \(.key) = \(.value);"' - echo '```' - fi - echo - fi - fi - - echo "---" - echo - done - - echo "## Feature Reference" - echo - echo "### Integration Features" - echo - echo "Homelab services can integrate with three main features:" - echo - echo "- **📊 Monitoring**: Prometheus metrics and health checks" - echo "- **📝 Logging**: Centralized log collection with Promtail/Loki" - echo "- **🔀 Proxy**: Reverse proxy with SSL and authentication" - echo - echo "Each service can import these features and set service-specific defaults." - echo - echo "---" - echo - echo "*This documentation is generated from actual NixOS module evaluations.*" -'' diff --git a/profiles/proxmox-vm.nix b/profiles/proxmox-vm.nix deleted file mode 100644 index b3fb3aa..0000000 --- a/profiles/proxmox-vm.nix +++ /dev/null @@ -1,43 +0,0 @@ -# profiles/proxmox-vm.nix - Proxmox VM specific profile -{ - config, - lib, - modulesPath, - ... -}: { - imports = [ - (modulesPath + "/profiles/qemu-guest.nix") - ]; - - # Proxmox VM specific configuration - services.qemuGuest.enable = true; - - # Boot configuration for Proxmox VMs - boot = { - loader.grub = { - enable = true; - devices = ["nodev"]; - }; - growPartition = true; - tmp.cleanOnBoot = true; - - # Proxmox specific kernel modules - initrd.availableKernelModules = ["ata_piix" "uhci_hcd" "virtio_pci" "sr_mod" "virtio_blk"]; - }; - - # Standard Proxmox VM filesystem - fileSystems."/" = lib.mkDefault { - device = "/dev/disk/by-label/nixos"; - autoResize = true; - fsType = "ext4"; - }; - - # Update global config with Proxmox-specific info - homelab = { - location = lib.mkDefault "proxmox-cluster"; - tags = lib.mkDefault ["proxmox-vm" "homelab"]; - }; - - # VM-specific optimizations - services.fstrim.enable = true; -} diff --git a/proxmox-infra/.gitignore b/proxmox-infra/.gitignore new file mode 100644 index 0000000..9b8ce00 --- /dev/null +++ b/proxmox-infra/.gitignore @@ -0,0 +1,7 @@ +# proxmox-infra/.gitignore +.terraform/ + +*.tfstate +.tfstate. +crash.log +*.tfvars diff --git a/proxmox-infra/.terraform.lock.hcl b/proxmox-infra/.terraform.lock.hcl new file mode 100644 index 0000000..978a610 --- /dev/null +++ b/proxmox-infra/.terraform.lock.hcl @@ -0,0 +1,24 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/telmate/proxmox" { + version = "3.0.2-rc01" + constraints = "3.0.2-rc01" + hashes = [ + "h1:571ROPuTMC0w5lr9hbUXi7NVLsG3SpmZxXXZx8cAT+Q=", + "zh:34d264243a4513f4e30c01fb37cc6a3e592d7823dfd182c5edfb170ac7b7de3a", + "zh:544428311ad20fbb3ad2cd854e893bbf036023cb57c3acc5093d141976dac670", + "zh:5c2396b328edee8de7ac144c15a6b7e668e81063699bc8c110d7c39fb8da70e9", + "zh:5ca8e33476ad06a0259071120a59477e8f107f30c1178ea7b9f6cafe1a461ade", + "zh:5ea56eb8275edc754a01a0180750e9c939cd997d3a50659617770211f4337da9", + "zh:9dd3482df6bbe00a4a6152be3567b6c08d35c3644a327a1f5ac30fd95ccd449f", + "zh:a76075fafadcc94a825151aff169bae4e0c05e3c7717e16dcdcf16ffa61a0780", + "zh:b1d95f97b22f671db762f7adf428b409e6736c078bcf267d8391985b8847d6e3", + "zh:cc94255cd1b18e6a341c15089015c457c8c639c25c426b07f278d5ea9850b3b5", + "zh:ce991103cb69b0b3e275127e3ab92c88bb3b6b0f4e5a2cb082aeaef70a7f7d61", + "zh:d24838bce87b38e12544a1329f5ad30e2be045968e639a3f4ddd5c84aa648e04", + "zh:e106ebd4eea8d62d62e62f261a262febc615e17466b54ac18f7e65c7e79e0008", + "zh:e254ca76c95e6e92da973b7bddc36bfa0a1e31d7c7e758ef4b01315db969388b", + "zh:f1d1d5f4c39267cacebe0ab7e9e06caf9692707f3b5369685541b65bc8b840ce", + ] +} diff --git a/proxmox-infra/main.tf b/proxmox-infra/main.tf new file mode 100644 index 0000000..e9ef2ce --- /dev/null +++ b/proxmox-infra/main.tf @@ -0,0 +1,52 @@ +# # This calls the module to define a new VM (e.g., if you were creating one) +# resource "proxmox_vm_qemu" "sandbox" { +# name = "sandbox" +# desc = "OpenTofu testing" +# target_nodes = [var.proxmox_node] +# vmid= 100 +# full_clone = true +# clone_id = 9100 +# agent = 1 +# scsihw = "virtio-scsi-single" +# ciuser = "root" +# ipconfig0 = "ip=dhcp" +# cpu { +# cores = 2 +# } +# memory = 2048 +# disks { +# virtio { +# virtio0 { +# disk { +# size = "9452M" +# storage = "local-lvm" +# } +# } +# } +# ide { +# ide2 { +# cloudinit { +# storage = "local-lvm" +# } +# } +# } +# } +# network { +# id = 0 +# bridge = "vmbr0" +# model = "virtio" +# } +# serial { +# id = 0 +# } +# } + +# output "sandbox_vmid" { +# description = "sandbox VM ID" +# value = proxmox_vm_qemu.sandbox.id +# } + +# output "sandbox_ipv4" { +# description = "sandbox public IPv4 address" +# value = proxmox_vm_qemu.sandbox.default_ipv4_address +# } diff --git a/proxmox-infra/outputs.tf b/proxmox-infra/outputs.tf new file mode 100644 index 0000000..e69de29 diff --git a/proxmox-infra/provider.tf b/proxmox-infra/provider.tf new file mode 100644 index 0000000..0d04a2a --- /dev/null +++ b/proxmox-infra/provider.tf @@ -0,0 +1,9 @@ +provider "proxmox" { + pm_tls_insecure = true + pm_api_url = var.proxmox_api_url + pm_user = var.proxmox_user + pm_password = var.proxmox_password + # Or use API token for better security: + # pm_api_token_id = var.proxmox_api_token_id + # pm_api_token_secret = var.proxmox_api_token_secret +} diff --git a/proxmox-infra/sandbox.tf b/proxmox-infra/sandbox.tf new file mode 100644 index 0000000..7932732 --- /dev/null +++ b/proxmox-infra/sandbox.tf @@ -0,0 +1,106 @@ +# proxmox_vm_qemu.sandbox: +resource "proxmox_vm_qemu" "sandbox" { + agent = 1 + bios = "seabios" + boot = " " + ciuser = "root" + cores = 0 + current_node = "proxmox-01" + define_connection_info = false + desc = " generated by NixOS" + force_create = false + full_clone = false + hotplug = "network,disk,usb" + id = "proxmox-01/qemu/100" + ipconfig0 = "ip=dhcp" + kvm = true + linked_vmid = 0 + memory = 2048 + name = "sandbox" + numa = false + onboot = true + protection = false + qemu_os = "l26" + reboot_required = false + scsihw = "virtio-scsi-single" + sockets = 0 + sshkeys = <<-EOT + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCljEOf8Lv7Ptgsc1+CYzXpnrctPy7LFXXOyVZTI9uN7R4HY5aEdZTKEGSsU/+p+JtXWzzI65fnrZU8pTMG/wvCK+gYyNZcEM4g/TXMVa+CWZR3y13zGky88R7dKiBl5L00U4BePDD1ci3EU3/Mjr/GVTQHtkbJfLtvhR9zkCNZzxbu+rySWDroUPWPvE3y60/iLjBsh5ZmHo59CW67lh1jgbAlZjKWZzLWo0Bc5wgbxoQPWcO4BCh17N4g8llrRxGOwJzHeaipBnXn9J1AGIm9Zls6pxT9j6MKltcCOb7tQZwc3hlPOW2ku6f7OHTrziKw37drIDM0UDublAOcnIfBjE+XuWsp5t6ojdIzIDMrzaYW2MyMA3PHuf7VESUQdP4TZ1XUwtRRzOjn5AZJi9DPoowPaxKL92apRpFG+ovaFpWZsG7s8NWXHAC79IpgMUzscEmM15OMQ36RQ5xeytGDVCmVT8DbHGrMT9HUfR5fBSWD3aDQiOOiIIhrbY35m+U65Sz/GpZMk6HlaiV3tKNB0m+xE+84MUEmm4fFzt3B/0N4kscMArnLAm/OMUblihPwbKAUAUWErGRBfP+u+zjRCi1D9/pffpl2OQ2QIuVM82g6/EPa1ZsXZP+4iHooQoJbrqVGzkfiA1EKLfcdGfkP/O4nRl+D5UgkGdqqvm20NQ== root@proxmox-01 + EOT + tablet = true + target_nodes = [ + "proxmox-01", + ] + unused_disk = [] + vcpus = 0 + vm_state = "running" + vmid = 100 + + cpu { + cores = 2 + limit = 0 + numa = false + sockets = 1 + type = "host" + units = 0 + vcores = 0 + } + + disks { + ide { + ide2 { + cloudinit { + storage = "local-lvm" + } + } + } + virtio { + virtio0 { + disk { + backup = true + discard = false + format = "raw" + id = 0 + iops_r_burst = 0 + iops_r_burst_length = 0 + iops_r_concurrent = 0 + iops_wr_burst = 0 + iops_wr_burst_length = 0 + iops_wr_concurrent = 0 + iothread = false + linked_disk_id = -1 + mbps_r_burst = 0 + mbps_r_concurrent = 0 + mbps_wr_burst = 0 + mbps_wr_concurrent = 0 + readonly = false + replicate = true + size = "9452M" + storage = "local-lvm" + } + } + } + } + + network { + bridge = "vmbr0" + firewall = true + id = 0 + link_down = false + macaddr = "bc:24:11:a7:e8:2a" + model = "virtio" + mtu = 0 + queues = 0 + rate = 0 + tag = 0 + } + + serial { + id = 0 + type = "socket" + } + + smbios { + uuid = "37cd09d5-29a5-42e2-baba-f21b691130e8" + } +} diff --git a/proxmox-infra/terraform.tfstate.backup b/proxmox-infra/terraform.tfstate.backup new file mode 100644 index 0000000..8a8181b --- /dev/null +++ b/proxmox-infra/terraform.tfstate.backup @@ -0,0 +1 @@ +{"version":4,"terraform_version":"1.9.1","serial":2,"lineage":"ecd6c5f8-5352-bf30-6117-d55763366399","outputs":{"sandbox_ipv4":{"value":"192.168.1.206","type":"string"},"sandbox_vmid":{"value":"proxmox-01/qemu/999","type":"string"}},"resources":[{"mode":"managed","type":"proxmox_vm_qemu","name":"sandbox","provider":"provider[\"registry.opentofu.org/telmate/proxmox\"]","instances":[{"schema_version":0,"attributes":{"additional_wait":5,"agent":1,"agent_timeout":90,"args":"","automatic_reboot":true,"balloon":0,"bios":"seabios","boot":" ","bootdisk":"","ci_wait":null,"cicustom":null,"cipassword":"","ciupgrade":false,"ciuser":"root","clone":null,"clone_id":9100,"clone_wait":10,"cores":0,"cpu":[{"affinity":"","cores":2,"flags":[],"limit":0,"numa":false,"sockets":1,"type":"host","units":0,"vcores":0}],"cpu_type":"","current_node":"proxmox-01","default_ipv4_address":"192.168.1.206","default_ipv6_address":"2a05:f6c7:2030:0:be24:11ff:feb9:919f","define_connection_info":true,"desc":"OpenTofu testing","disk":[],"disks":[{"ide":[{"ide0":[],"ide1":[],"ide2":[{"cdrom":[],"cloudinit":[{"storage":"local-lvm"}],"disk":[],"ignore":false,"passthrough":[]}],"ide3":[]}],"sata":[],"scsi":[],"virtio":[{"virtio0":[{"cdrom":[],"disk":[{"asyncio":"","backup":true,"cache":"","discard":false,"format":"raw","id":0,"iops_r_burst":0,"iops_r_burst_length":0,"iops_r_concurrent":0,"iops_wr_burst":0,"iops_wr_burst_length":0,"iops_wr_concurrent":0,"iothread":false,"linked_disk_id":-1,"mbps_r_burst":0,"mbps_r_concurrent":0,"mbps_wr_burst":0,"mbps_wr_concurrent":0,"readonly":false,"replicate":false,"serial":"","size":"9452M","storage":"local-lvm","wwn":""}],"ignore":false,"passthrough":[]}],"virtio1":[],"virtio10":[],"virtio11":[],"virtio12":[],"virtio13":[],"virtio14":[],"virtio15":[],"virtio2":[],"virtio3":[],"virtio4":[],"virtio5":[],"virtio6":[],"virtio7":[],"virtio8":[],"virtio9":[]}]}],"efidisk":[],"force_create":false,"force_recreate_on_change_of":null,"full_clone":true,"hagroup":"","hastate":"","hostpci":[],"hotplug":"network,disk,usb","id":"proxmox-01/qemu/999","ipconfig0":"ip=dhcp","ipconfig1":null,"ipconfig10":null,"ipconfig11":null,"ipconfig12":null,"ipconfig13":null,"ipconfig14":null,"ipconfig15":null,"ipconfig2":null,"ipconfig3":null,"ipconfig4":null,"ipconfig5":null,"ipconfig6":null,"ipconfig7":null,"ipconfig8":null,"ipconfig9":null,"kvm":true,"linked_vmid":0,"machine":"","memory":2048,"name":"sandbox2","nameserver":null,"network":[{"bridge":"vmbr0","firewall":false,"id":0,"link_down":false,"macaddr":"bc:24:11:b9:91:9f","model":"virtio","mtu":0,"queues":0,"rate":0,"tag":0}],"numa":false,"onboot":false,"os_network_config":null,"os_type":null,"pci":[],"pcis":[],"pool":"","protection":false,"pxe":null,"qemu_os":"l26","reboot_required":false,"scsihw":"virtio-scsi-single","searchdomain":null,"serial":[{"id":0,"type":"socket"}],"skip_ipv4":false,"skip_ipv6":false,"smbios":[{"family":"","manufacturer":"","product":"","serial":"","sku":"","uuid":"51a93ec4-4afa-428b-911a-daab70390a8c","version":""}],"sockets":0,"ssh_forward_ip":null,"ssh_host":"192.168.1.206","ssh_port":"22","ssh_private_key":null,"ssh_user":null,"sshkeys":null,"startup":"","tablet":true,"tags":"v0.0.2","target_node":null,"target_nodes":["proxmox-01"],"timeouts":null,"tpm_state":[],"unused_disk":[],"usb":[],"usbs":[],"vcpus":0,"vga":[],"vm_state":"running","vmid":999},"sensitive_attributes":[[{"type":"get_attr","value":"cipassword"}],[{"type":"get_attr","value":"ssh_private_key"}]],"private":"eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWZhdWx0IjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19"}]}],"check_results":null} diff --git a/proxmox-infra/variables.tf b/proxmox-infra/variables.tf new file mode 100644 index 0000000..71653f0 --- /dev/null +++ b/proxmox-infra/variables.tf @@ -0,0 +1,30 @@ +# proxmox-infra/variables.tf + +variable "proxmox_api_url" { + description = "The URL of the Proxmox API (e.g., https://192.168.1.10:8006/api2/json)" + type = string + # No default here, so OpenTofu will prompt or expect a .tfvars file/env var +} + +variable "proxmox_user" { + description = "Proxmox user (e.g., root@pam or user@pve)" + type = string +} + +variable "proxmox_password" { + description = "Proxmox user password" + type = string + sensitive = true # Mark as sensitive to hide in logs +} + +variable "proxmox_node" { + description = "The Proxmox node name where VMs will be deployed (e.g., 'pve')" + type = string +} + +# Example for templates - you might have different templates +variable "nixos_template_id" { + description = "VMID of the nixos cloud-init template" + type = number + # Example: default = 100 +} diff --git a/proxmox-infra/versions.tf b/proxmox-infra/versions.tf new file mode 100644 index 0000000..3ca35cc --- /dev/null +++ b/proxmox-infra/versions.tf @@ -0,0 +1,9 @@ +# versions.tf +terraform { + required_providers { + proxmox = { + source = "Telmate/proxmox" + version = "3.0.2-rc01" + } + } +} diff --git a/scripts/config.nix b/scripts/config.nix deleted file mode 100644 index 36603b2..0000000 --- a/scripts/config.nix +++ /dev/null @@ -1,18 +0,0 @@ -{ - nodes, - lib, - ... -}: let - extractGlobal = name: node: - if node ? config.homelab.global - then { - ${name} = { - hostname = node.config.homelab.global.hostname; - monitoring = map (e: "${e.name}:${toString e.port}") node.config.homelab.global.monitoring.endpoints; - backups = map (b: "${b.name}(${b.backend})") node.config.homelab.global.backups.jobs; - proxy = map (p: "${p.subdomain}.${node.config.homelab.global.domain}") node.config.homelab.global.reverseProxy.entries; - }; - } - else {}; -in - lib.foldl (acc: name: acc // (extractGlobal name nodes.${name})) {} (builtins.attrNames nodes) diff --git a/scripts/deploy-homelab.sh b/scripts/deploy-homelab.sh deleted file mode 100755 index f5bdb17..0000000 --- a/scripts/deploy-homelab.sh +++ /dev/null @@ -1,115 +0,0 @@ -# Helper script: scripts/deploy-homelab.sh -#!/bin/bash -set -euo pipefail - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -echo -e "${GREEN}=== Homelab Deployment Script ===${NC}" - -# Function to print colored output -log() { - echo -e "${GREEN}[INFO]${NC} $1" -} - -warn() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Check if colmena is available -if ! command -v colmena &> /dev/null; then - error "colmena is not installed. Please install it first." - exit 1 -fi - -# Parse arguments -COMMAND=${1:-"deploy"} -TARGET=${2:-""} - -case $COMMAND in - "deploy") - if [ -n "$TARGET" ]; then - log "Deploying to specific target: $TARGET" - colmena apply --on "$TARGET" - else - log "Deploying to all targets" - colmena apply - fi - ;; - "build") - if [ -n "$TARGET" ]; then - log "Building specific target: $TARGET" - colmena build --on "$TARGET" - else - log "Building all targets" - colmena build - fi - ;; - "status") - log "Checking deployment status" - colmena apply --dry-run - ;; - "config") - log "Showing global configuration summary" - # Extract global configs from all nodes - colmena eval ./scripts/config.nix | jq . - ;; - "backup-status") - log "Checking backup status across all nodes" - if [ -n "$TARGET" ]; then - colmena exec --on "$TARGET" -- backup-status - else - colmena exec -- backup-status - fi - ;; - "monitoring") - log "Collecting monitoring endpoints" - nix eval --json .#colmena --apply 'colmena: - let - lib = (import {}).lib; - nodes = removeAttrs colmena ["meta"]; - collectEndpoints = lib.flatten ( - lib.mapAttrsToList (name: node: - if node ? config.homelab.global.monitoring.endpoints then - map (e: { - node = name; - hostname = node.config.homelab.global.hostname; - endpoint = "${e.name}:${toString e.port}${e.path}"; - job = e.jobName; - }) node.config.homelab.global.monitoring.endpoints - else [] - ) nodes - ); - in collectEndpoints - ' | jq . - ;; - "help") - echo "Usage: $0 [COMMAND] [TARGET]" - echo "" - echo "Commands:" - echo " deploy [TARGET] Deploy to all nodes or specific target" - echo " build [TARGET] Build configuration for all nodes or specific target" - echo " status Show deployment status (dry-run)" - echo " config Show global configuration summary" - echo " backup-status Check backup status on all nodes" - echo " monitoring List all monitoring endpoints" - echo " help Show this help message" - echo "" - echo "Examples:" - echo " $0 deploy media-server # Deploy only to media-server" - echo " $0 build # Build all configurations" - echo " $0 config # Show global config summary" - ;; - *) - error "Unknown command: $COMMAND" - echo "Run '$0 help' for usage information" - exit 1 - ;; -esac diff --git a/scripts/generate-docs.sh b/scripts/generate-docs.sh deleted file mode 100755 index 8ab1f4a..0000000 --- a/scripts/generate-docs.sh +++ /dev/null @@ -1,41 +0,0 @@ -# scripts/generate-docs.sh -#!/bin/bash - -echo "# Homelab Global Configuration Documentation" -echo -echo "This document describes the global configuration system for the NixOS homelab." -echo -echo "## Available Services" -echo - -# List all service modules -find modules/nixos/services -name "*.nix" | while read -r file; do - service=$(basename "$file" .nix) - echo "### $service" - echo - # Extract description from the module - grep -m1 "mkEnableOption" "$file" | sed 's/.*mkEnableOption "\([^"]*\)".*/\1/' || echo "Service module for $service" - echo -done - -echo "## Configuration Examples" -echo -echo "### Basic Media Server Setup" -echo '```nix' -echo 'media-server = { ... }: {' -echo ' homelab.global = {' -echo ' enable = true;' -echo ' hostname = "media-server";' -echo ' domain = "homelab.local";' -echo ' };' -echo ' services.jellyfin.enable = true;' -echo '};' -echo '```' -echo - -echo "### Monitoring Configuration" -echo '```nix' -echo 'monitoring = { nodes, ... }: {' -echo ' services.prometheus.scrapeConfigs = collectMonitoringEndpoints nodes;' -echo '};' -echo '```'# modules/global-config.nix diff --git a/scripts/validate-config.nix b/scripts/validate-config.nix deleted file mode 100644 index c30369d..0000000 --- a/scripts/validate-config.nix +++ /dev/null @@ -1,79 +0,0 @@ -# scripts/validate-config.nix -{ - lib, - pkgs, -}: let - inherit (lib) types mkOption; - - # Validation functions - validateBackupJob = job: let - errors = - [] - ++ ( - if job.paths == [] - then ["Backup job '${job.name}' has no paths defined"] - else [] - ) - ++ ( - if !(builtins.elem job.backend ["restic" "borg" "rclone"]) - then ["Invalid backup backend: ${job.backend}"] - else [] - ) - ++ ( - if job.schedule == "" - then ["Backup job '${job.name}' has no schedule defined"] - else [] - ); - in - errors; - - validateMonitoringEndpoint = endpoint: let - errors = - [] - ++ ( - if endpoint.port < 1 || endpoint.port > 65535 - then ["Invalid port ${toString endpoint.port} for endpoint '${endpoint.name}'"] - else [] - ) - ++ ( - if endpoint.jobName == "" - then ["Monitoring endpoint '${endpoint.name}' has no job name"] - else [] - ); - in - errors; - - validateReverseProxyEntry = entry: let - errors = - [] - ++ ( - if entry.subdomain == "" - then ["Reverse proxy entry has no subdomain defined"] - else [] - ) - ++ ( - if entry.port < 1 || entry.port > 65535 - then ["Invalid port ${toString entry.port} for subdomain '${entry.subdomain}'"] - else [] - ); - in - errors; - - validateGlobalConfig = config: let - backupErrors = lib.flatten (map validateBackupJob config.backups.jobs); - monitoringErrors = lib.flatten (map validateMonitoringEndpoint config.monitoring.endpoints); - proxyErrors = lib.flatten (map validateReverseProxyEntry config.reverseProxy.entries); - allErrors = backupErrors ++ monitoringErrors ++ proxyErrors; - in - if allErrors == [] - then { - valid = true; - errors = []; - } - else { - valid = false; - errors = allErrors; - }; -in { - inherit validateGlobalConfig validateBackupJob validateMonitoringEndpoint validateReverseProxyEntry; -} diff --git a/secrets/.gitignore b/secrets/.gitignore deleted file mode 100644 index c996e50..0000000 --- a/secrets/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.key diff --git a/secrets/default.nix b/secrets/default.nix deleted file mode 100644 index 0b1b9c4..0000000 --- a/secrets/default.nix +++ /dev/null @@ -1,42 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: { - # SOPS configuration - sops = { - age.keyFile = "/run/keys/age.key"; - defaultSopsFile = ./secrets.yaml; - - # Define secrets that all systems need - secrets = { - # SSH keys - # "ssh/plasmagoat_private_key" = { - # owner = "plasmagoat"; - # mode = "0600"; - # path = "/home/plasmagoat/.ssh/id_rsa"; - # }; - - # # Age key for the system - # "age/system_key" = { - # mode = "0600"; - # path = "/run/keys/age.key"; - # }; - - # # Backup credentials - # "backup/restic_password" = { - # path = "/etc/backup/restic-password"; - # mode = "0600"; - # }; - }; - }; - - # Deployment keys for colmena - deployment.keys = { - "age.key" = { - destDir = "/run/keys"; - keyFile = "/home/plasmagoat/.config/age/age.key"; # Your local age key - }; - }; -} diff --git a/secrets/secrets.yaml b/secrets/secrets.yaml index f2eaf9b..6a8d487 100644 --- a/secrets/secrets.yaml +++ b/secrets/secrets.yaml @@ -25,18 +25,6 @@ service_accounts: password: ENC[AES256_GCM,data:PpUHEhNfnR1eg7DmnO7tyNciNE4Tsx/Y4uL92gqiods=,iv:DNKQfymvgEu/iEW8t79m0ZmKTU0Ffasu+gp2KOIAK3o=,tag:lGKw5dbXqImDJNVX6p8kLg==,type:str] mail: password: ENC[AES256_GCM,data:6lfziq1zXlFxCAFWv5co3MkBgwaWixjHHX9riQXCbe0=,iv:/t4CnW3bKUDxfpE/qGf1LPs0ciivRMkfgJ1nMseruy4=,tag:TWApzLsm2HV+JMaZLG/Kig==,type:str] - ente: - password: ENC[AES256_GCM,data:bQxiCr9OgFU7oSGjkEO43iH9L2nikvvFQZsjGurtOFM=,iv:LIwzaZARQgiGdOLfpebJkKO0I71I3kX8mq8W1WC2lT4=,tag:VxK6oON6th9b1YhvC7cjjA==,type:str] -ente: - minio: - root_password: ENC[AES256_GCM,data:TIurrIEjWKdMYzIZY3dp00ert90=,iv:5kT06lUUlRC9J4DVwo7RDdxAM8zCJwwjWOF9YAZbbmk=,tag:qk7Cszn39kPijkr71ckxvg==,type:str] - root_user: ENC[AES256_GCM,data:wPj8SBzeohdG,iv:bSgCKGc+X+oofpYN0yV1aQNhAvWzcw9CTaK3FzUBKj4=,tag:ArUwveqBWXDRc5eSPZYa9g==,type:str] -cloudflare: - dns-api-token: ENC[AES256_GCM,data:/NroEdwOwqY30nwLLzO9RvEYPttDIw85A0M81fOPJSzEodtF95VCPA==,iv:BN5xZhSyvoZiXZk096KYpj59qns6hHg3PvhWC6c2sXo=,tag:3DaP3/p/JTM+beBRacGzSA==,type:str] - zone-api-token: ENC[AES256_GCM,data:FoMHKi5q+d97+pxUsyyNZAxGGgBRsZsNMePL5OeD3pcBIqtZP9MP5g==,iv:yRno4aJRlVyFTZVmat9tmFGFI1ghLw2vW+Py0+viFdE=,tag:pooTMSOsBZdbN/AE5J04MA==,type:str] -restic: - default-password: ENC[AES256_GCM,data:9gHH8V00XFveogOhVl0nLvq3olI=,iv:+wdSlZXnkTw1VKXesx3GMy5yz+kPf2FlYSPNXMB0Y0o=,tag:jHKQTfvm+G+L+Fb+3qP+rA==,type:str] - default-repository: ENC[AES256_GCM,data:znNTSZknMvL5ceINgt0iHA==,iv:taobWUuT4nKfzegk329dzFGIOdL03d6kw8JlgO1E78Y=,tag:kgM4551xZcaxzZw58AqBBQ==,type:str] sops: age: - recipient: age1n20y9kmdh324m3tkclvhmyuc7c8hk4w84zsal725adahwl8nzq0s04aq4y @@ -48,7 +36,7 @@ sops: QzNYRk5ERmR4aGtLQ3dwQ1lPeDZyaEkKJMLXqv6tBBql7VVnWDIwAh24SfQ2O6Ca CEOQTGEonbqr5doWqTsXUXrdQAS0amL45UdT6ITFtfNAjaHwCMfhZg== -----END AGE ENCRYPTED FILE----- - lastmodified: "2025-07-27T17:17:02Z" - mac: ENC[AES256_GCM,data:i0S0G7D+yPiCWaiCmI++N0EKpED0uGpsEs+3Mc1LbLaHj5kFUMAbOPPl/QGDGhq2eL99+w1PKOfmdHYe2AdtsIhkIQ0F0FUkgItSjdOKlh0hKI+Hk2OqpfA6PRLlZT5dh8r0q0WcI1JPE46egNogjN2za4i6KrkjnTRSchhrxNg=,iv:k0BZ8b+5kmMqaKi9dx6fibIGVVJLZRa3oApwa/fWVdE=,tag:DbV4ZS/ciZVSi+aE0wOZfg==,type:str] + lastmodified: "2025-07-16T15:33:06Z" + mac: ENC[AES256_GCM,data:nZm7N8qfANzHadtW+3eTJljpmYejJdKGFO44iw40CnwlGgb454us9LZbQIAYkNiS7UkivoWa5BqvgLcpObHNAn3tVi+ha0jySIrAmp43y5ilmg76fvL4znel4Nk7eRiGoF3t3xiCR39/3l7PPffx2RJ6PerEyGBpiUZ6mBcWoTE=,iv:UmhSynpMdTnY0R6jwDJts13b0rKsaRFlCizdM2oargE=,tag:Q2xh/QXFOQYqqkxKs7nujA==,type:str] unencrypted_suffix: _unencrypted version: 3.10.2 diff --git a/users/default.nix b/users/default.nix deleted file mode 100644 index b4edc6d..0000000 --- a/users/default.nix +++ /dev/null @@ -1,3 +0,0 @@ -{ - defaultUser = import ./plasmagoat.nix; -} diff --git a/users/plasmagoat.nix b/users/plasmagoat.nix deleted file mode 100644 index 037b202..0000000 --- a/users/plasmagoat.nix +++ /dev/null @@ -1,30 +0,0 @@ -{ - config, - lib, - pkgs, - ... -}: { - users.users.plasmagoat = { - isNormalUser = true; - description = "plasmagoat"; - extraGroups = ["wheel" "docker" "backup"]; - shell = pkgs.bash; # or pkgs.zsh, pkgs.fish - - # SSH keys managed through secrets - openssh.authorizedKeys.keys = [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\dbs@ALPHA-DBS-P14sG2" - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT" - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" - ]; - }; - - # Root SSH access (for deployment) - users.users.root.openssh.authorizedKeys.keys = [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCeg/n/vst9KME8byhxX2FhA+FZNQ60W38kkNt45eNzK5zFqBYuwo1nDXVanJSh9unRvB13b+ygpZhrb4sHvkETGWiEioc49MiWr8czEhu6Wpo0vv5MAJkiYvGZUYPdUW52jUzWcYdw8PukG2rowrxL5G0CmsqLwHMPU2FyeCe5aByFI/JZb8R80LoEacgjUiipJcoLWUVgG2koMomHClqGu+16kB8nL5Ja3Kc9lgLfDK7L0A5R8JXhCjrlEsmXbxZmwDKuxvjDAZdE9Sl1VZmMDfWkyrRlenrt01eR3t3Fec6ziRm5ZJk9e2Iu1DPoz+PoHH9aZGVwmlvvnr/gMF3OILxcqb0qx+AYlCCnb6D6pJ9zufhZkKcPRS1Q187F6fz+v2oD1xLZWFHJ92+7ItM0WmbDOHOC29s5EA6wNm3iXZCq86OI3n6T34njDtPqh6Z7Pk2sdK4GBwnFj4KwEWXvdKZKSX1qb2EVlEBE9QI4Gf3eg4SiBu2cAFt3nOSzs8c= asol\\dbs@ALPHA-DBS-P14sG2" - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+U3DWOrklcA8n8wdbLBGyli5LsJI3dpL2Zod8mx8eOdC4H127ZT1hzuk2uSmkic4c73BykPyQv8rcqwaRGW94xdMRanKmHYxnbHXo5FBiGrCkNlNNZuahthAGO49c6sUhJMq0eLhYOoFWjtf15sr5Zu7Ug2YTUL3HXB1o9PZ3c9sqYHo2rC/Il1x2j3jNAMKST/qUZYySvdfNJEeQhMbQcdoKJsShcE3oGRL6DFBoV/mjJAJ+wuDhGLDnqi79nQjYfbYja1xKcrKX+D3MfkFxFl6ZIzomR1t75AnZ+09oaWcv1J7ehZ3h9PpDBFNXvzyLwDBMNS+UYcH6SyFjkUbF David@NZXT" - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICUP7m8jZJiclZGfSje8CeBYFhX10SrdtjYziuChmj1X plasmagoat@macbook-air" - ]; - - # Home directory management (optional) - # You could add home-manager here if you want -}