
[{"content":" Accessing the Cluster Remotely with Netbird # To access the cluster from outside of the home network we can deploy the Netbird operator for Kubernetes. Similar to tailscale, this allows us to access all of our services through a private mesh network. The operator handles the creation of network router nodes to create secure Wireguard tunnels to our exposed services.\nPrerequisites # Using the Netbird dashboard, a DNS zone, named \u0026ldquo;homelab\u0026rdquo; in this case, must be creted along with a srevice user api key with admin permissions.\nInstallation # Helm # We can use the official Helm chart to install the Netbird Operator: { self, inputs, ... }: { flake.modules.nixos.netbird-operator-charts = { config, lib, pkgs, ... }: let netbirdOperatorChart = { name = \u0026#34;kubernetes-operator\u0026#34;; repo = \u0026#34;https://netbirdio.github.io/helms\u0026#34;; version = \u0026#34;0.3.1\u0026#34;; hash = \u0026#34;sha256-MWO1YYzbXxT+OCmjAeGchsp1bl/Dw4D0TQKXHlwIvw0=\u0026#34;; }; in { config = lib.mkIf config.netbird-operator.enable { services.k3s.autoDeployCharts = { netbird-operator = netbirdOperatorChart // { targetNamespace = \u0026#34;netbird\u0026#34;; createNamespace = true; }; }; }; }; } Preloading the Operator and Router Images # The images can be preloaded as usual with their corresponding helm values set: { self, inputs, ... }: { flake.modules.nixos.netbird-operator-images = { config, lib, pkgs, ... }: let operatorImage = pkgs.dockerTools.pullImage { imageName = \u0026#34;netbirdio/kubernetes-operator\u0026#34;; imageDigest = \u0026#34;sha256:57740157b4d7c0ce1356f6c1c3cc0f4c6573600eadbee642334b3070fb51899a\u0026#34;; sha256 = \u0026#34;sha256-bnpB50R8k6POBq+IuZ9UpA0qdw6qhEmIoQXx9EzYrbY=\u0026#34;; finalImageTag = \u0026#34;0.3.1\u0026#34;; arch = \u0026#34;amd64\u0026#34;; }; routerImage = pkgs.dockerTools.pullImage { imageName = \u0026#34;netbirdio/netbird\u0026#34;; imageDigest = \u0026#34;sha256:b1487a94f432aa706275ebbbbdff3605bf927b056d63855f3d43966cb68c64dc\u0026#34;; sha256 = \u0026#34;sha256-fMR/IP3PM/fQfYkl+IeoTWkp++oFY9NGu7MP/qb29W8=\u0026#34;; finalImageTag = \u0026#34;0.70.0-rootless\u0026#34;; arch = \u0026#34;amd64\u0026#34;; }; in { config = lib.mkIf config.netbird-operator.enable { services.k3s = { images = [ operatorImage routerImage ]; autoDeployCharts.netbird-operator.values = { operator.image = { repository = operatorImage.imageName; tag = operatorImage.imageTag; }; routingClientImage = \u0026#34;${routerImage.imageName}:${routerImage.imageTag}\u0026#34;; }; }; }; }; } Configuring the Network Router # Using the netbird.io api we can deploy our network routers: { self, inputs, ... }: { flake.modules.nixos.netbird-operator-router = { config, lib, pkgs, ... }: { config = lib.mkIf config.netbird-operator.enable { services.k3s.autoDeployCharts.netbird-operator.extraDeploy = [ { apiVersion = \u0026#34;netbird.io/v1alpha1\u0026#34;; kind = \u0026#34;NetworkRouter\u0026#34;; metadata = { name = \u0026#34;homelab\u0026#34;; namespace = \u0026#34;netbird\u0026#34;; }; spec = { dnsZoneRef.name = \u0026#34;homelab\u0026#34;; workloadOverride = { replicas = 1; podTemplate.spec = { dnsConfig.options = [ { name = \u0026#34;ndots\u0026#34;; value = \u0026#34;0\u0026#34;; } ]; resources = { requests.cpu = \u0026#34;100m\u0026#34;; requests.memory = \u0026#34;128Mi\u0026#34;; limits.cpu = \u0026#34;250m\u0026#34;; limits.memory = \u0026#34;256Mi\u0026#34;; }; }; }; }; } ]; }; }; } Adding the Netbird Secret # Using sops-nix we can add a separate manifest to deploy the required Kubernetes secrets: { self, inputs, ... }: { flake.modules.nixos.netbird-operator-secrets = { config, lib, pkgs, ... }: { config = lib.mkIf (config.netbird-operator.enable \u0026amp;\u0026amp; config.secrets.enable \u0026amp;\u0026amp; config.secrets.netbird-operator.enable) { sops = { secrets = { \u0026#34;netbird/key\u0026#34; = { }; }; templates = { netbirdMgmtApiKey = { content = builtins.toJSON { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;Secret\u0026#34;; metadata = { name = \u0026#34;netbird-mgmt-api-key\u0026#34;; namespace = \u0026#34;netbird\u0026#34;; }; type = \u0026#34;Opaque\u0026#34;; immutable = true; stringData = { NB_API_KEY = config.sops.placeholder.\u0026#34;netbird/key\u0026#34;; }; }; path = \u0026#34;/var/lib/rancher/k3s/server/manifests/netbird-mgmt-api-key.json\u0026#34;; }; }; }; }; }; } Exposing Services # Once installed, we can deploy NetworkResource manifests to expose Kubernetes services: { self, inputs, ... }: { flake.modules.nixos.immich-services = { config, lib, pkgs, ... }: { config = lib.mkIf config.immich.enable { services.k3s.autoDeployCharts.immich = { values.service.main = { type = \u0026#34;LoadBalancer\u0026#34;; annotations = { \u0026#34;metallb.io/address-pool\u0026#34; = \u0026#34;default\u0026#34;; \u0026#34;metallb.io/allow-shared-ip\u0026#34; = \u0026#34;immich\u0026#34;; \u0026#34;metallb.io/loadBalancerIPs\u0026#34; = \u0026#34;192.168.1.205\u0026#34;; }; }; extraDeploy = [ { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;Service\u0026#34;; metadata = { name = \u0026#34;immich\u0026#34;; namespace = \u0026#34;immich\u0026#34;; }; spec = { type = \u0026#34;ClusterIP\u0026#34;; selector = { \u0026#34;app.kubernetes.io/controller\u0026#34; = \u0026#34;main\u0026#34;; \u0026#34;app.kubernetes.io/instance\u0026#34; = \u0026#34;immich\u0026#34;; \u0026#34;app.kubernetes.io/name\u0026#34; = \u0026#34;server\u0026#34;; }; ports = [ { name = \u0026#34;http\u0026#34;; port = 80; targetPort = 2283; protocol = \u0026#34;TCP\u0026#34;; } ]; }; } { apiVersion = \u0026#34;netbird.io/v1alpha1\u0026#34;; kind = \u0026#34;NetworkResource\u0026#34;; metadata = { name = \u0026#34;immich\u0026#34;; namespace = \u0026#34;immich\u0026#34;; }; spec = { networkRouterRef = { name = \u0026#34;homelab\u0026#34;; namespace = \u0026#34;netbird\u0026#34;; }; serviceRef = { name = \u0026#34;immich\u0026#34;; namespace = \u0026#34;immich\u0026#34;; }; groups = [ { name = \u0026#34;All\u0026#34;; } ]; }; } ]; }; }; }; } Managing the Cluster Network in Netbird # With our services deployed, we can manage the generate resources in the \u0026ldquo;homelab\u0026rdquo; network on the Netbird dashboard: ","externalUrl":null,"permalink":"/docs/kubernetes/03-netbird-operator/","section":"Docs","summary":"","title":"Kubernetes: Netbird Operator","type":"docs"},{"content":" Deploying Helm Charts # To install a Helm chart on NixOS we can use the services.k3s.autoDeployCharts.\u0026lt;chart\u0026gt; config value to define the chart to be imported, the namespace it is to be deployed to as well as the values to be passed to the chart. Images can also be preloaded using services.k3s.images config value. This can be done like:\n{ inputs, ... }: { flake.modules.nixos.grafana = { config, lib, pkgs, ... }: let chart = { name = \u0026#34;grafana\u0026#34;; repo = \u0026#34;https://grafana-community.github.io/helm-charts\u0026#34;; version = \u0026#34;11.1.7\u0026#34;; hash = \u0026#34;sha256-KSHxBROOLZeaf7CeqFm6mStp58AnRgQaclWRHyJL/FU=\u0026#34;; }; image = pkgs.dockerTools.pullImage { imageName = \u0026#34;grafana/grafana\u0026#34;; imageDigest = \u0026#34;sha256:62a54c76afbeea0b8523b7afcd9e7ee1f0e39806035fd90ffc333a19e9358f2f\u0026#34;; sha256 = \u0026#34;sha256-OhTmnRsqpgJbNxOD4zNUehEaX2l28HNxKJ9Nec2XLfs=\u0026#34;; finalImageTag = \u0026#34;12.3.3\u0026#34;; arch = \u0026#34;amd64\u0026#34;; }; in { options = { monitoring.grafana.enable = lib.mkEnableOption \u0026#34;prometheus service on k3s\u0026#34;; secrets.grafana.enable = lib.mkEnableOption \u0026#34;grafana secrets\u0026#34;; }; config = lib.mkIf config.monitoring.grafana.enable { services.k3s = { images = [ image ]; autoDeployCharts.grafana = chart // { targetNamespace = \u0026#34;monitoring\u0026#34;; createNamespace = true; values = { replicas = 1; image = { repository = image.imageName; tag = image.imageTag; }; adminUser = \u0026#34;admin\u0026#34;; adminPassword = \u0026#34;changeme\u0026#34;; admin = if (config.secrets.enable \u0026amp;\u0026amp; config.secrets.grafana.enable) then { existingSecret = \u0026#34;grafana-secrets\u0026#34;; } else { }; persistence = { enabled = true; storageClassName = \u0026#34;longhorn\u0026#34;; size = \u0026#34;10Gi\u0026#34;; }; service.enable = false; resources = { requests.cpu = \u0026#34;50m\u0026#34;; requests.memory = \u0026#34;128Mi\u0026#34;; limits.cpu = \u0026#34;300m\u0026#34;; limits.memory = \u0026#34;256Mi\u0026#34;; }; datasources = { \u0026#34;datasources.yaml\u0026#34; = { apiVersion = 1; datasources = [ { name = \u0026#34;Prometheus\u0026#34;; type = \u0026#34;prometheus\u0026#34;; access = \u0026#34;proxy\u0026#34;; url = \u0026#34;http://192.168.1.210:9090\u0026#34;; isDefault = true; editable = false; } { name = \u0026#34;Loki\u0026#34;; type = \u0026#34;loki\u0026#34;; access = \u0026#34;proxy\u0026#34;; url = \u0026#34;http://192.168.1.210:3100\u0026#34;; editable = false; } ]; }; }; }; extraDeploy = [ { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;Service\u0026#34;; metadata = { name = \u0026#34;grafana-lb\u0026#34;; namespace = \u0026#34;monitoring\u0026#34;; annotations = { \u0026#34;metallb.io/address-pool\u0026#34; = \u0026#34;default\u0026#34;; \u0026#34;metallb.io/allow-shared-ip\u0026#34; = \u0026#34;monitoring\u0026#34;; }; }; spec = { type = \u0026#34;LoadBalancer\u0026#34;; loadBalancerIP = \u0026#34;192.168.1.210\u0026#34;; selector = { \u0026#34;app.kubernetes.io/name\u0026#34; = \u0026#34;grafana\u0026#34;; \u0026#34;app.kubernetes.io/instance\u0026#34; = \u0026#34;grafana\u0026#34;; }; ports = [ { name = \u0026#34;http\u0026#34;; port = 3000; targetPort = 3000; } ]; }; } ]; }; }; } }; } Deploying Raw Manifests # To install a service using raw manifests on NixOS we can use the services.k3s.manifests.\u0026lt;service-name\u0026gt; config value to define the manifests to be deployed. Images can also be preloaded using services.k3s.images config value. This can be done like:\n{ inputs, ... }: { flake.modules.nixos.transmission = { config, lib, pkgs, ... }: let image = pkgs.dockerTools.pullImage { imageName = \u0026#34;linuxserver/transmission\u0026#34;; imageDigest = \u0026#34;sha256:978b9e0b06eda2cfed79c861fc8ca440b8b29e45dc9dc2522daa67c3818a0d88\u0026#34;; sha256 = \u0026#34;sha256-uQWuUyhumbEmxTgYzhWtLjg6z+67qQqlRZ2W134ZHbA=\u0026#34;; finalImageTag = \u0026#34;4.0.6\u0026#34;; arch = \u0026#34;amd64\u0026#34;; }; in { options = { media-server.transmission.enable = lib.mkEnableOption \u0026#34;transmission manifest on k3s\u0026#34;; }; config = lib.mkIf (config.media-server.enable \u0026amp;\u0026amp; config.media-server.transmission.enable) { services.k3s = { images = [ image ]; manifests.transmission.content = [ { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;PersistentVolumeClaim\u0026#34;; metadata = { name = \u0026#34;transmission-config\u0026#34;; namespace = \u0026#34;media\u0026#34;; }; spec = { accessModes = [ \u0026#34;ReadWriteOnce\u0026#34; ]; storageClassName = \u0026#34;longhorn\u0026#34;; resources.requests.storage = \u0026#34;5Gi\u0026#34;; }; } { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;PersistentVolumeClaim\u0026#34;; metadata = { name = \u0026#34;transmission-watch\u0026#34;; namespace = \u0026#34;media\u0026#34;; }; spec = { accessModes = [ \u0026#34;ReadWriteOnce\u0026#34; ]; storageClassName = \u0026#34;longhorn\u0026#34;; resources.requests.storage = \u0026#34;5Gi\u0026#34;; }; } { apiVersion = \u0026#34;apps/v1\u0026#34;; kind = \u0026#34;Deployment\u0026#34;; metadata = { name = \u0026#34;transmission\u0026#34;; namespace = \u0026#34;media\u0026#34;; }; spec = { replicas = 1; selector.matchLabels.app = \u0026#34;transmission\u0026#34;; template = { metadata.labels.app = \u0026#34;transmission\u0026#34;; spec = { containers = [ { name = \u0026#34;transmission\u0026#34;; image = \u0026#34;${image.imageName}:${image.imageTag}\u0026#34;; ports = [ { containerPort = 9091; } { containerPort = 51413; protocol = \u0026#34;TCP\u0026#34;; } { containerPort = 51413; protocol = \u0026#34;UDP\u0026#34;; } ]; env = [ { name = \u0026#34;PUID\u0026#34;; value = \u0026#34;1000\u0026#34;; } { name = \u0026#34;PGID\u0026#34;; value = \u0026#34;1000\u0026#34;; } ]; resources = { requests.cpu = \u0026#34;50m\u0026#34;; requests.memory = \u0026#34;128Mi\u0026#34;; limits.cpu = \u0026#34;300m\u0026#34;; limits.memory = \u0026#34;256Mi\u0026#34;; }; startupProbe = { httpGet = { path = \u0026#34;/transmission/web/\u0026#34;; port = 9091; }; failureThreshold = 30; periodSeconds = 5; }; readinessProbe = { httpGet = { path = \u0026#34;/transmission/web/\u0026#34;; port = 9091; }; initialDelaySeconds = 15; periodSeconds = 10; timeoutSeconds = 2; failureThreshold = 3; }; livenessProbe = { httpGet = { path = \u0026#34;/transmission/web/\u0026#34;; port = 9091; }; initialDelaySeconds = 30; periodSeconds = 20; timeoutSeconds = 2; failureThreshold = 3; }; volumeMounts = [ { name = \u0026#34;config\u0026#34;; mountPath = \u0026#34;/config\u0026#34;; } { name = \u0026#34;watch\u0026#34;; mountPath = \u0026#34;/watch\u0026#34;; } { name = \u0026#34;downloads\u0026#34;; mountPath = \u0026#34;/downloads\u0026#34;; } ]; } ]; volumes = [ { name = \u0026#34;config\u0026#34;; persistentVolumeClaim.claimName = \u0026#34;transmission-config\u0026#34;; } { name = \u0026#34;watch\u0026#34;; persistentVolumeClaim.claimName = \u0026#34;transmission-watch\u0026#34;; } { name = \u0026#34;downloads\u0026#34;; persistentVolumeClaim.claimName = \u0026#34;downloads\u0026#34;; } ]; dnsConfig.options = [ { name = \u0026#34;ndots\u0026#34;; value = \u0026#34;0\u0026#34;; } ]; }; }; }; } { apiVersion = \u0026#34;v1\u0026#34;; kind = \u0026#34;Service\u0026#34;; metadata = { name = \u0026#34;transmission-lb\u0026#34;; namespace = \u0026#34;media\u0026#34;; annotations = { \u0026#34;metallb.io/address-pool\u0026#34; = \u0026#34;default\u0026#34;; \u0026#34;metallb.io/allow-shared-ip\u0026#34; = \u0026#34;media\u0026#34;; }; }; spec = { type = \u0026#34;LoadBalancer\u0026#34;; loadBalancerIP = \u0026#34;192.168.1.202\u0026#34;; selector = { \u0026#34;app\u0026#34; = \u0026#34;transmission\u0026#34;; }; ports = [ { name = \u0026#34;http\u0026#34;; port = 9091; targetPort = 9091; } { name = \u0026#34;peer\u0026#34;; port = 51413; targetPort = 51413; protocol = \u0026#34;TCP\u0026#34;; } { name = \u0026#34;peer-udp\u0026#34;; port = 51413; targetPort = 51413; protocol = \u0026#34;UDP\u0026#34;; } ]; }; } ]; }; }; }; } ","externalUrl":null,"permalink":"/docs/kubernetes/02-deploying-services/","section":"Docs","summary":"","title":"Kubernetes: Deploying Services","type":"docs"},{"content":" Introduction # This documentation follows on from my NixOS documentation and describes the creation of a single node k3s cluster using nix. A mixture of helm and raw manifests will be used but all will be defined in pure nix as part of my nix-config flake.\nFeatures # ⚖️ MetalLB 🗃️ Longhorn 🛢️ CloudnativePG ⛽ Valkey 🐦 Netbird 🏠 Homepage 📊 Grafana 🪣 Forgejo ☁️ Nextcloud 📼 Jellyfin 🏴‍☠️ Servarr Extra Reading # Defining k3s in Pure Nix\n","externalUrl":null,"permalink":"/docs/kubernetes/01-introduction/","section":"Docs","summary":"","title":"Kubernetes: Introduction","type":"docs"},{"content":" Introduction # The Dendritic Pattern is a simple, yet powerful approach to defining complex Nix projects containing a mix of nixos and home-manager modules without the need for spaghetti code or complex wiring functions. This is accomplished using the Flake Parts module to define every aspect as a top level function, all of which may be merged at evaluation time. Each module can be imported at the flake level using the handy Import-Tree module.\nDefining a dendritic flake # The flake.nix of a dendritic nixos configuration should contain only three elements:\nA description of the flake A list of imports for the flake A single output making use of the flake parts mkFlake function A simple flake to create a nixos-configuration with modules defined in the modules subdirectory looks like:\n{ description = \u0026#34;Robbie\u0026#39;s NixOS flake\u0026#34;; inputs = { nixpkgs = { url = \u0026#34;github:NixOS/nixpkgs/nixos-25.11\u0026#34;; }; }; outputs = inputs: inputs.flake-parts.lib.mkFlake { inherit inputs; } (inputs.import-tree ./modules); } Defining a module for basic settings # A single module can be defined for re-used nix and home-manager settings to reduce code duplication across configurations: { self, inputs, ... }: { flake.modules.nixos.settings = { config, lib, pkgs, ... }: { imports = [ inputs.home-manager.nixosModules.home-manager inputs.impermanence.nixosModules.impermanence inputs.sops-nix.nixosModules.sops inputs.disko.nixosModules.disko inputs.stylix.nixosModules.stylix ]; nix.settings = { auto-optimise-store = true; experimental-features = [ \u0026#34;nix-command\u0026#34; \u0026#34;flakes\u0026#34; ]; }; nixpkgs = { config.allowUnfree = true; overlays = [ self.overlays.unstable-packages self.overlays.additional-packages ]; }; home-manager = { useGlobalPkgs = true; useUserPackages = true; backupFileExtension = \u0026#34;backup\u0026#34;; sharedModules = [ { imports = [ inputs.sops-nix.homeManagerModules.sops inputs.stylix.homeModules.stylix ]; home.stateVersion = \u0026#34;25.11\u0026#34;; } ]; }; stylix.homeManagerIntegration.autoImport = false; users.mutableUsers = false; system.stateVersion = \u0026#34;25.11\u0026#34;; }; } Defining a feature module # Though modules are composable, each module file should handle a single repsonsibility. For example, a desktop module may contain many individual feature modules to cover the desktop environments, audio interfaces, etc\u0026hellip;\\\nSingle feature # An \u0026ldquo;audio\u0026rdquo; module may look like: { inputs, ... }: { flake.modules.nixos.audio = { config, lib, pkgs, ... }: { options = { audio.enable = lib.mkEnableOption \u0026#34;audio using pipewire\u0026#34;; }; config = lib.mkIf config.audio.enable { services.pipewire = { enable = true; pulse.enable = lib.mkDefault true; }; }; }; } Multi-feature # This may be imported into a high-level \u0026ldquo;desktop\u0026rdquo; module like: { inputs, ... }: { flake.modules.nixos.desktop = { config, lib, pkgs, ... }: { imports = [ inputs.self.modules.nixos.audio inputs.self.modules.nixos.bluetooth inputs.self.modules.nixos.cosmic-desktop inputs.self.modules.nixos.kde-plasma inputs.self.modules.nixos.kde-connect inputs.self.modules.nixos.printing inputs.self.modules.nixos.scanning inputs.self.modules.nixos.steam inputs.self.modules.nixos.virtualisation inputs.self.modules.nixos.qmk ]; options = { desktopEnvironment = lib.mkOption { type = lib.types.enum [ \u0026#34;plasma\u0026#34; \u0026#34;cosmic\u0026#34; ]; default = \u0026#34;plasma\u0026#34;; description = \u0026#34;Select desktop environment: Plasma or COSMIC.\u0026#34;; }; }; config = { services.flatpak.enable = true; bootloader.pretty = lib.mkDefault true; audio.enable = lib.mkDefault true; bluetooth.enable = lib.mkDefault true; cosmic-desktop.enable = if config.desktopEnvironment == \u0026#34;cosmic\u0026#34; then true else false; kde-plasma.enable = if config.desktopEnvironment == \u0026#34;plasma\u0026#34; then true else false; kde-connect.enable = lib.mkDefault true; printing.enable = lib.mkDefault true; scanning.enable = lib.mkDefault true; steam.enable = lib.mkDefault true; virtualisation.enable = lib.mkDefault true; qmk.enable = lib.mkDefault true; home-manager.sharedModules = [ { imports = [ inputs.nix-flatpak.homeManagerModules.nix-flatpak inputs.plasma-manager.homeModules.plasma-manager inputs.cosmic-manager.homeManagerModules.cosmic-manager ]; } ]; assertions = [ { assertion = !(config.cosmic-desktop.enable \u0026amp;\u0026amp; config.kde-plasma.enable); message = \u0026#34;Cannot enable both COSMIC and Plasma at the same time.\u0026#34;; } ]; }; }; } The factory method # Adding a named \u0026ldquo;factory\u0026rdquo; flake module with an unspecified attribute list as its type will allow for the creation of factory modules. These modules look similar to typical nixos or home-manager modules with the exception that they take an addtional attribute set as the initial argument and can then be re-used to instantiate multiple modules using the same logic.\nThe factory module should look like: { lib, ... }: { options.flake.factory = lib.mkOption { type = lib.types.attrsOf lib.types.unspecified; default = { }; }; } This flake module can then be used to instantiate a desktop-user nixos module like: { self, inputs, ... }: { config.flake.factory.desktop-user = { username, isAdmin, }: { config, lib, pkgs, ... }: { config = lib.mkMerge [ { users.users.\u0026#34;${username}\u0026#34; = { initialPassword = lib.mkDefault \u0026#34;password\u0026#34;; isNormalUser = true; home = \u0026#34;/home/${username}\u0026#34;; extraGroups = [ \u0026#34;networkmanager\u0026#34; \u0026#34;docker\u0026#34; \u0026#34;libvirtd\u0026#34; ] ++ lib.optionals isAdmin [ \u0026#34;wheel\u0026#34; ]; }; home-manager.users.\u0026#34;${username}\u0026#34; = { imports = [ inputs.self.modules.homeManager.development inputs.self.modules.homeManager.utilities inputs.self.modules.homeManager.web inputs.self.modules.homeManager.gaming inputs.self.modules.homeManager.editing inputs.self.modules.homeManager.backup ]; }; } (lib.mkIf (config.secrets.enable \u0026amp;\u0026amp; config.secrets.passwords.enable) { sops.secrets.\u0026#34;passwords/${username}\u0026#34;.neededForUsers = true; users.users.${username} = { initialPassword = null; hashedPasswordFile = config.sops.secrets.\u0026#34;passwords/${username}\u0026#34;.path; }; }) (lib.mkIf config.impermanence.enable { environment.persistence.\u0026#34;/persist\u0026#34;.users.${username} = { directories = [ \u0026#34;Desktop\u0026#34; \u0026#34;Documents\u0026#34; \u0026#34;Downloads\u0026#34; \u0026#34;Music\u0026#34; \u0026#34;Pictures\u0026#34; \u0026#34;Videos\u0026#34; \u0026#34;Games\u0026#34; \u0026#34;Books\u0026#34; \u0026#34;nix-config\u0026#34; { directory = \u0026#34;.ssh\u0026#34;; mode = \u0026#34;0700\u0026#34;; } { directory = \u0026#34;.local/share/keyrings\u0026#34;; mode = \u0026#34;0700\u0026#34;; } { directory = \u0026#34;.local/share/kwalletd\u0026#34;; mode = \u0026#34;0700\u0026#34;; } \u0026#34;.local/share/flatpak\u0026#34; \u0026#34;.local/share/Steam\u0026#34; \u0026#34;.local/share/PrismLauncher\u0026#34; \u0026#34;.local/state/cosmic\u0026#34; \u0026#34;.local/state/cosmic-comp\u0026#34; \u0026#34;.config\u0026#34; \u0026#34;.var\u0026#34; \u0026#34;.vscode-oss/extensions\u0026#34; ]; files = [ \u0026#34;.bash_history\u0026#34; \u0026#34;.zsh_history\u0026#34; ]; }; }) ]; }; } Creating a system configuration # Bringing everything together, we can create a complete NixOS system configuration using the flake.nixosConfigurations.\u0026lt;hostname\u0026gt; function. A laptop system module may look like: { self, inputs, lib, ... }: { flake.modules.nixos.robbie-laptop = lib.mkMerge [ (self.factory.desktop-user { username = \u0026#34;robbie\u0026#34;; isAdmin = true; }) { home-manager.users.robbie = { programs.git = { enable = true; settings.user = { name = \u0026#34;robbiejennings\u0026#34;; email = \u0026#34;robbie.jennings97@gmail.com\u0026#34;; }; }; theme = { image = { url = \u0026#34;https://raw.githubusercontent.com/AngelJumbo/gruvbox-wallpapers/refs/heads/main/wallpapers/photography/forest-2.jpg\u0026#34;; hash = \u0026#34;sha256-RqzCCnn4b5kU7EYgaPF19Gr9I5cZrkEdsTu+wGaaMFI=\u0026#34;; }; base16Scheme = \u0026#34;gruvbox-material-dark-hard\u0026#34;; }; secrets = { enable = true; vuescan.enable = true; rclone.enable = true; restic.enable = true; }; }; } ]; flake.nixosConfigurations.xps15 = inputs.nixpkgs.lib.nixosSystem { modules = [ inputs.self.modules.nixos.settings inputs.self.modules.nixos.xps15 inputs.self.modules.nixos.core inputs.self.modules.nixos.desktop inputs.self.modules.nixos.robbie-laptop { networking.hostName = \u0026#34;xps15\u0026#34;; desktopEnvironment = \u0026#34;cosmic\u0026#34;; secrets = { enable = true; passwords.enable = true; }; impermanence.enable = true; environment.persistence.\u0026#34;/persist\u0026#34; = { hideMounts = true; directories = [ \u0026#34;/var/log\u0026#34; \u0026#34;/var/lib/bluetooth\u0026#34; \u0026#34;/var/lib/nixos\u0026#34; \u0026#34;/var/lib/systemd/coredump\u0026#34; \u0026#34;/var/lib/libvirt\u0026#34; \u0026#34;/var/lib/netbird\u0026#34; \u0026#34;/etc/NetworkManager/system-connections\u0026#34; \u0026#34;/etc/nixos\u0026#34; \u0026#34;/root/.ssh\u0026#34; ]; }; } ]; }; } ","externalUrl":null,"permalink":"/docs/nixos/03-dendritic-pattern/","section":"Docs","summary":"","title":"NixOS: The Dendritic Pattern","type":"docs"},{"content":" Installation # Provision Disks using Disko # Disks must first be prepared for installation. Thankfully, this can be fully automated in Nix using the handy Disko tool. Be careful to take full backups as all data on the host will be erased upon formatting. If installing a system with LUKS encryption, you will be prompted to add a secure password.\n# Provision disks sudo nix run --experimental-features \u0026#34;nix-command flakes\u0026#34; github:nix-community/disko/latest -- --mode destroy,format,mount --flake github:robbiejennings/nix-config#\u0026lt;system\u0026gt; Install a NixOS system # Once disko is finished formatting disks and initialising the required filesystems we can move onto the NixOS installation. Again, this is a simple one-line command where you will be asked for a secure root password.\n# Install NixOS sudo nixos-install --flake github:robbiejennings/nix-config#\u0026lt;system\u0026gt; ","externalUrl":null,"permalink":"/docs/nixos/02-installation/","section":"Docs","summary":"","title":"NixOS: Installation","type":"docs"},{"content":" Introduction # This documentation covers my NixOS configuration used to deploy my desktop and homelab systems. Specifically, the patterns adopted and unique features are described with links to source-code where applicable.\nFeatures # 🔒 Secrets 💾 Impermanence 💽 Disko 🪝 Git-hooks 🔃 Automatic updates 📦 Flatpak installation 🖌️ Stylix Theming 🗄️ Restic backup Extra Reading # My Nix Config\nDendritic Pattern\nDendritic Pattern - Extra Documentation\n","externalUrl":null,"permalink":"/docs/nixos/01-introduction/","section":"Docs","summary":"","title":"NixOS: Introduction","type":"docs"},{"content":"By far the biggest limiting factor to setting up a darkroom at home is space. This is why I set out to make the most out of Ilford\u0026rsquo;s pop-up darkroom tent to make the smallest darkroom capable of making large (16\u0026quot;x20\u0026quot;) prints possible.\nA 60\u0026quot;x120\u0026quot; tabletop with cheap workbench and DIY sliding shelves made from ikea wardrobe furniture make up the basic structure. On top sits a Durst M605 colour enlarger, AP safelight, Patterson timer and focus finder and Beard adjustable masking easel. A pair of flexible lamps allow for easy inspection of developed prints without the need to open the lightproof tent.\nThe next tier consists of a sliding \u0026ldquo;wet section\u0026rdquo; which contains three 8\u0026quot;x10\u0026quot; development trays colour coded for development (red), stop bath (white) and fixer (grey). A fourth tray used for washing test prints sits above the fixer tray for easy transfer with minimal risk for cross contamination. Larger prints can be made using the JOBO 2850 processing drum and a set of rollers.\nThe third tier is a shelf for additional printing papers and easels of various sized along with a Patterson contact printer. Below is a larger shelf for drum, measuring cylinder and chemistry storage along with an ikea storage box for accessories such as a squeegees, scissors, gloves and film hanging clips.\nAll of this fits snugly in my garden shed taking up less than 1.5sqm of floor space and without the need for blacking out the whole room!\n","date":"11 May 2026","externalUrl":null,"permalink":"/posts/darkroom/","section":"Posts","summary":"","title":"Building my Darkroom Shed","type":"posts"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/categories/","section":"Categories","summary":"","title":"Categories","type":"categories"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/tags/darkroom/","section":"Tags","summary":"","title":"Darkroom","type":"tags"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/categories/photography/","section":"Categories","summary":"","title":"Photography","type":"categories"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/posts/","section":"Posts","summary":"","title":"Posts","type":"posts"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/","section":"Robbie Jennings","summary":"","title":"Robbie Jennings","type":"page"},{"content":"","date":"11 May 2026","externalUrl":null,"permalink":"/tags/","section":"Tags","summary":"","title":"Tags","type":"tags"},{"content":"In December of 2025 I made my first ever trip to the US to visit my girlfriend\u0026rsquo;s parents and see the city of Houston where she grew up. As part of this trip, we ventured out for a three-day trip to New Orleans, the birthblace of jazz and the city of Voodoo.\nThe day prior Nola was hit with tragedy as a terrorist attack took the lives of fifteen innocent people celebrating the new year. I remember a sense of unease and serious questioning as to whether or not it would be safe for us to go that morning. Upon our arrival however I was touched by the tenacity and solidarity shown by the people of the Big Easy.\nWreaths and memorials were laid out at the site of the incident, not five minutes from my hotel. What was more striking though was the upbeat music that was played, the artists exhibiting their fine works, the poets clacking away at typewriters on folding tables. People here were deeply hurt by the tragedy, but they were in no way going to let it divide them or dim the rich culture that lights up their streets.\nLive bands were at every corner to treat the ears to jazz, blues and more than a few renditions of \u0026ldquo;House of the Rising Sun\u0026rdquo;. Without question this was the most musical place I had ever been and I loved every second of it. A quiet night at a jazz bar with drinks and songs dispersed with light-hearted stories told by the biggest piano man I had ever seen topped the experience off perfectly.\nMy girlfriend and her mother both got Voodoo readings while I had my first ever po\u0026rsquo;boy sandwich. I don\u0026rsquo;t know about the whole fortune-telling thing but the food in Louisiana is enough to want to come back for more.\n","date":"9 May 2026","externalUrl":null,"permalink":"/posts/new-orleans/","section":"Posts","summary":"","title":"My Trip to New Orleans","type":"posts"},{"content":"In November 2024 I visited Vienna for a weekend getaway as well as to see my girfriend\u0026rsquo;s favourite band \u0026ldquo;Cigarettes After Sex\u0026rdquo; perform. An incredibly picturesque city, I couldn\u0026rsquo;t help but feel they had things just that little bit more figured out than the rest of us. Little noise, clean streets and pretty and affordable housing make for quite the calming environment in contrast to Dublin town.\nArt galleries and music halls are the most notorious attractions and though I didn\u0026rsquo;t get to hear any Mozart, I did get to see a limited-time exhibition of the works of Rembrandt. I am by no means an art aficionado but wow, did those painting pop out of the frame. Honestly, I was surprised by just how much I enjoyed my day there.\nUnfortunately, we were just a tad too early to see the Christmas markets but at least we have a good excuse to return. I fancy another pint of Stiegl.\n","date":"8 May 2026","externalUrl":null,"permalink":"/posts/vienna/","section":"Posts","summary":"","title":"My Trip to Vienna","type":"posts"},{"content":" Phoenix Park Deer Bray Head Tree Bray Head Trunk Bray Head Moon Charlie Headshot Charlie Leaf Millie Papal Cross Seafront Kiosk New Orleans Vienna ","date":"1 January 2000","externalUrl":null,"permalink":"/gallery/","section":"Robbie Jennings","summary":"","title":"Gallery","type":"page"},{"content":"Welcome to my blog!\nA space for me to post my photography, document my code and share my travels and thoughts. I run an old-school black \u0026amp; white darkroom in my garden shed and a new-school Kubernetes and Nixos based homelab in my office.\nPreviously, I have worked professionally as a backend engineer at Murex and Aerlytix with experience in building responsive APIs for modern web services as well as database management.\nMy camera collection includes a Nikon FE, Rolleiflex Automat and Bronica SQ-A. My darkroom consists of a Durst M605 Colour enlarger and Ilford pop-up darkroom tent. I also digitise my prints and negatives using an Epson v550 flatbed scanner and Vuescan software.\nMy homelab serves all kinds of useful applications including Nextcloud, Forgejo, Immich, Jellyfin and the suite of Servarr applications. All of this accessible anywhere in the world using my Netbird mesh network.\nIf any of this sounds interesting to you then you\u0026rsquo;re in the right place! I hope you enjoy my projects and stories.\n","externalUrl":null,"permalink":"/about/","section":"Robbie Jennings","summary":"","title":"About Me","type":"page"},{"content":"","externalUrl":null,"permalink":"/authors/","section":"Authors","summary":"","title":"Authors","type":"authors"},{"content":"","externalUrl":null,"permalink":"/categories/development/","section":"Categories","summary":"","title":"Development","type":"categories"},{"content":"","externalUrl":null,"permalink":"/docs/","section":"Docs","summary":"","title":"Docs","type":"docs"},{"content":"","externalUrl":null,"permalink":"/categories/homelab/","section":"Categories","summary":"","title":"Homelab","type":"categories"},{"content":"","externalUrl":null,"permalink":"/series/kubernetes/","section":"Series","summary":"","title":"Kubernetes","type":"series"},{"content":"","externalUrl":null,"permalink":"/tags/kubernetes/","section":"Tags","summary":"","title":"Kubernetes","type":"tags"},{"content":"","externalUrl":null,"permalink":"/series/nixos/","section":"Series","summary":"","title":"NixOS","type":"series"},{"content":"","externalUrl":null,"permalink":"/tags/nixos/","section":"Tags","summary":"","title":"NixOS","type":"tags"},{"content":"","externalUrl":null,"permalink":"/series/","section":"Series","summary":"","title":"Series","type":"series"}]