re-format with nixfmt
checks / check-links (pull_request) Successful in 21s Details
checks / checks-impure (pull_request) Successful in 1m50s Details
checks / checks (pull_request) Successful in 4m23s Details

This commit is contained in:
Jörg Thalheim 2024-03-17 19:48:49 +01:00
parent 916e4dff84
commit e296a3019d
87 changed files with 2122 additions and 1650 deletions

View File

@ -14,21 +14,27 @@ let
};
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_backup_client; };
flake.nixosConfigurations = {
inherit (clan.nixosConfigurations) test_backup_client;
};
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_backup_server = { ... }: {
imports = [
self.clanModules.borgbackup
];
services.sshd.enable = true;
services.borgbackup.repos.testrepo = {
authorizedKeys = [
(builtins.readFile ../lib/ssh/pubkey)
];
test_backup_server =
{ ... }:
{
imports = [ self.clanModules.borgbackup ];
services.sshd.enable = true;
services.borgbackup.repos.testrepo = {
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
};
};
test_backup_client = { pkgs, lib, config, ... }:
test_backup_client =
{
pkgs,
lib,
config,
...
}:
let
dependencies = [
self
@ -38,14 +44,10 @@ in
closureInfo = pkgs.closureInfo { rootPaths = dependencies; };
in
{
imports = [
self.clanModules.borgbackup
];
imports = [ self.clanModules.borgbackup ];
networking.hostName = "client";
services.sshd.enable = true;
users.users.root.openssh.authorizedKeys.keyFiles = [
../lib/ssh/pubkey
];
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup.ssh" = {
@ -78,65 +80,64 @@ in
clan.borgbackup.destinations.test_backup_server.repo = "borg@server:.";
};
};
perSystem = { nodes, pkgs, ... }: {
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
test-backups =
(import ../lib/test-base.nix)
{
name = "test-backups";
nodes.server = {
imports = [
self.nixosModules.test_backup_server
self.nixosModules.clanCore
{
clanCore.machineName = "server";
clanCore.clanDir = ../..;
}
];
};
nodes.client = {
imports = [
self.nixosModules.test_backup_client
self.nixosModules.clanCore
{
clanCore.machineName = "client";
clanCore.clanDir = ../..;
}
];
};
perSystem =
{ nodes, pkgs, ... }:
{
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
test-backups = (import ../lib/test-base.nix) {
name = "test-backups";
nodes.server = {
imports = [
self.nixosModules.test_backup_server
self.nixosModules.clanCore
{
clanCore.machineName = "server";
clanCore.clanDir = ../..;
}
];
};
nodes.client = {
imports = [
self.nixosModules.test_backup_client
self.nixosModules.clanCore
{
clanCore.machineName = "client";
clanCore.clanDir = ../..;
}
];
};
testScript = ''
import json
start_all()
testScript = ''
import json
start_all()
# setup
client.succeed("mkdir -m 700 /root/.ssh")
client.succeed(
"cat ${../lib/ssh/privkey} > /root/.ssh/id_ed25519"
)
client.succeed("chmod 600 /root/.ssh/id_ed25519")
client.wait_for_unit("sshd", timeout=30)
client.succeed("ssh -o StrictHostKeyChecking=accept-new root@client hostname")
# setup
client.succeed("mkdir -m 700 /root/.ssh")
client.succeed(
"cat ${../lib/ssh/privkey} > /root/.ssh/id_ed25519"
)
client.succeed("chmod 600 /root/.ssh/id_ed25519")
client.wait_for_unit("sshd", timeout=30)
client.succeed("ssh -o StrictHostKeyChecking=accept-new root@client hostname")
# dummy data
client.succeed("mkdir /var/test-backups")
client.succeed("echo testing > /var/test-backups/somefile")
# dummy data
client.succeed("mkdir /var/test-backups")
client.succeed("echo testing > /var/test-backups/somefile")
# create
client.succeed("clan --debug --flake ${../..} backups create test_backup_client")
client.wait_until_succeeds("! systemctl is-active borgbackup-job-test_backup_server")
# create
client.succeed("clan --debug --flake ${../..} backups create test_backup_client")
client.wait_until_succeeds("! systemctl is-active borgbackup-job-test_backup_server")
# list
backup_id = json.loads(client.succeed("borg-job-test_backup_server list --json"))["archives"][0]["archive"]
assert(backup_id in client.succeed("clan --debug --flake ${../..} backups list test_backup_client"))
# list
backup_id = json.loads(client.succeed("borg-job-test_backup_server list --json"))["archives"][0]["archive"]
assert(backup_id in client.succeed("clan --debug --flake ${../..} backups list test_backup_client"))
# restore
client.succeed("rm -f /var/test-backups/somefile")
client.succeed(f"clan --debug --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
assert(client.succeed("cat /var/test-backups/somefile").strip() == "testing")
'';
}
{ inherit pkgs self; };
# restore
client.succeed("rm -f /var/test-backups/somefile")
client.succeed(f"clan --debug --flake ${../..} backups restore test_backup_client borgbackup {backup_id}")
assert(client.succeed("cat /var/test-backups/somefile").strip() == "testing")
'';
} { inherit pkgs self; };
};
};
};
}

View File

@ -1,48 +1,51 @@
(import ../lib/test-base.nix) ({ ... }: {
name = "borgbackup";
(import ../lib/test-base.nix) (
{ ... }:
{
name = "borgbackup";
nodes.machine = { self, pkgs, ... }: {
imports = [
self.clanModules.borgbackup
self.nixosModules.clanCore
nodes.machine =
{ self, pkgs, ... }:
{
services.openssh.enable = true;
services.borgbackup.repos.testrepo = {
authorizedKeys = [
(builtins.readFile ../lib/ssh/pubkey)
];
};
}
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
clanCore.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
imports = [
self.clanModules.borgbackup
self.nixosModules.clanCore
{
services.openssh.enable = true;
services.borgbackup.repos.testrepo = {
authorizedKeys = [ (builtins.readFile ../lib/ssh/pubkey) ];
};
};
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
}
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
clanCore.state.testState.folders = [ "/etc/state" ];
environment.etc.state.text = "hello world";
systemd.tmpfiles.settings."vmsecrets" = {
"/etc/secrets/borgbackup.ssh" = {
C.argument = "${../lib/ssh/privkey}";
z = {
mode = "0400";
user = "root";
};
};
"/etc/secrets/borgbackup.repokey" = {
C.argument = builtins.toString (pkgs.writeText "repokey" "repokey12345");
z = {
mode = "0400";
user = "root";
};
};
};
};
};
clanCore.secretStore = "vm";
clanCore.secretStore = "vm";
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}
];
};
testScript = ''
start_all()
machine.systemctl("start --wait borgbackup-job-test.service")
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
'';
})
clan.borgbackup.destinations.test.repo = "borg@localhost:.";
}
];
};
testScript = ''
start_all()
machine.systemctl("start --wait borgbackup-job-test.service")
assert "machine-test" in machine.succeed("BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes /run/current-system/sw/bin/borg-job-test list")
'';
}
)

View File

@ -1,14 +1,19 @@
(import ../lib/container-test.nix) ({ ... }: {
name = "secrets";
(import ../lib/container-test.nix) (
{ ... }:
{
name = "secrets";
nodes.machine = { ... }: {
networking.hostName = "machine";
services.openssh.enable = true;
services.openssh.startWhenNeeded = false;
};
testScript = ''
start_all()
machine.succeed("systemctl status sshd")
machine.wait_for_unit("sshd")
'';
})
nodes.machine =
{ ... }:
{
networking.hostName = "machine";
services.openssh.enable = true;
services.openssh.startWhenNeeded = false;
};
testScript = ''
start_all()
machine.succeed("systemctl status sshd")
machine.wait_for_unit("sshd")
'';
}
)

View File

@ -1,24 +1,29 @@
(import ../lib/container-test.nix) ({ pkgs, ... }: {
name = "secrets";
(import ../lib/container-test.nix) (
{ pkgs, ... }:
{
name = "secrets";
nodes.machine = { self, ... }: {
imports = [
self.clanModules.deltachat
self.nixosModules.clanCore
nodes.machine =
{ self, ... }:
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("maddy")
# imap
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 143")
# smtp submission
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 587")
# smtp
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 25")
'';
})
imports = [
self.clanModules.deltachat
self.nixosModules.clanCore
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("maddy")
# imap
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 143")
# smtp submission
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 587")
# smtp
machine.succeed("${pkgs.netcat}/bin/nc -z -v ::1 25")
'';
}
)

View File

@ -1,41 +1,20 @@
{ self, ... }: {
{ self, ... }:
{
imports = [
./impure/flake-module.nix
./backups/flake-module.nix
./installation/flake-module.nix
./flash/flake-module.nix
];
perSystem = { pkgs, lib, self', ... }: {
checks =
let
nixosTestArgs = {
# reference to nixpkgs for the current system
inherit pkgs;
# this gives us a reference to our flake but also all flake inputs
inherit self;
};
nixosTests = lib.optionalAttrs (pkgs.stdenv.isLinux) {
# import our test
secrets = import ./secrets nixosTestArgs;
container = import ./container nixosTestArgs;
deltachat = import ./deltachat nixosTestArgs;
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
borgbackup = import ./borgbackup nixosTestArgs;
syncthing = import ./syncthing nixosTestArgs;
wayland-proxy-virtwl = import ./wayland-proxy-virtwl nixosTestArgs;
};
schemaTests = pkgs.callPackages ./schemas.nix {
inherit self;
};
flakeOutputs = lib.mapAttrs' (name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel) self.nixosConfigurations
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (self'.legacyPackages.homeConfigurations or { });
in
nixosTests // schemaTests // flakeOutputs;
legacyPackages = {
nixosTests =
perSystem =
{
pkgs,
lib,
self',
...
}:
{
checks =
let
nixosTestArgs = {
# reference to nixpkgs for the current system
@ -43,12 +22,44 @@
# this gives us a reference to our flake but also all flake inputs
inherit self;
};
nixosTests = lib.optionalAttrs (pkgs.stdenv.isLinux) {
# import our test
secrets = import ./secrets nixosTestArgs;
container = import ./container nixosTestArgs;
deltachat = import ./deltachat nixosTestArgs;
zt-tcp-relay = import ./zt-tcp-relay nixosTestArgs;
borgbackup = import ./borgbackup nixosTestArgs;
syncthing = import ./syncthing nixosTestArgs;
wayland-proxy-virtwl = import ./wayland-proxy-virtwl nixosTestArgs;
};
schemaTests = pkgs.callPackages ./schemas.nix { inherit self; };
flakeOutputs =
lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
) self.nixosConfigurations
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (
self'.legacyPackages.homeConfigurations or { }
);
in
lib.optionalAttrs (pkgs.stdenv.isLinux) {
# import our test
secrets = import ./secrets nixosTestArgs;
container = import ./container nixosTestArgs;
};
nixosTests // schemaTests // flakeOutputs;
legacyPackages = {
nixosTests =
let
nixosTestArgs = {
# reference to nixpkgs for the current system
inherit pkgs;
# this gives us a reference to our flake but also all flake inputs
inherit self;
};
in
lib.optionalAttrs (pkgs.stdenv.isLinux) {
# import our test
secrets = import ./secrets nixosTestArgs;
container = import ./container nixosTestArgs;
};
};
};
};
}

View File

@ -1,6 +1,12 @@
{ self, ... }:
{
perSystem = { nodes, pkgs, lib, ... }:
perSystem =
{
nodes,
pkgs,
lib,
...
}:
let
dependencies = [
self
@ -14,33 +20,30 @@
in
{
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
flash =
(import ../lib/test-base.nix)
{
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 3000;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
flash = (import ../lib/test-base.nix) {
name = "flash";
nodes.target = {
virtualisation.emptyDiskImages = [ 4096 ];
virtualisation.memorySize = 3000;
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
};
testScript = ''
start_all()
machine.succeed("clan --flake ${../..} flash --debug --yes --disk main /dev/vdb test_install_machine")
'';
}
{ inherit pkgs self; };
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
};
testScript = ''
start_all()
machine.succeed("clan --flake ${../..} flash --debug --yes --disk main /dev/vdb test_install_machine")
'';
} { inherit pkgs self; };
};
};
}

View File

@ -1,18 +1,22 @@
{
perSystem = { pkgs, lib, ... }: {
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
perSystem =
{ pkgs, lib, ... }:
{
# a script that executes all other checks
packages.impure-checks = pkgs.writeShellScriptBin "impure-checks" ''
#!${pkgs.bash}/bin/bash
set -euo pipefail
export PATH="${lib.makeBinPath [
pkgs.gitMinimal
pkgs.nix
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -s -m impure ./tests $@"
'';
};
export PATH="${
lib.makeBinPath [
pkgs.gitMinimal
pkgs.nix
pkgs.rsync # needed to have rsync installed on the dummy ssh server
]
}"
ROOT=$(git rev-parse --show-toplevel)
cd "$ROOT/pkgs/clan-cli"
nix develop "$ROOT#clan-cli" -c bash -c "TMPDIR=/tmp python -m pytest -s -m impure ./tests $@"
'';
};
}

View File

@ -12,26 +12,34 @@ let
};
in
{
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) test_install_machine; };
flake.nixosConfigurations = {
inherit (clan.nixosConfigurations) test_install_machine;
};
flake.clanInternals = clan.clanInternals;
flake.nixosModules = {
test_install_machine = { lib, modulesPath, ... }: {
imports = [
self.clanModules.diskLayouts
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
];
clan.diskLayouts.singleDiskExt4.device = "/dev/vdb";
test_install_machine =
{ lib, modulesPath, ... }:
{
imports = [
self.clanModules.diskLayouts
(modulesPath + "/testing/test-instrumentation.nix") # we need these 2 modules always to be able to run the tests
(modulesPath + "/profiles/qemu-guest.nix")
];
clan.diskLayouts.singleDiskExt4.device = "/dev/vdb";
environment.etc."install-successful".text = "ok";
environment.etc."install-successful".text = "ok";
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [
"boot.shell_on_fail"
];
};
boot.consoleLogLevel = lib.mkForce 100;
boot.kernelParams = [ "boot.shell_on_fail" ];
};
};
perSystem = { nodes, pkgs, lib, ... }:
perSystem =
{
nodes,
pkgs,
lib,
...
}:
let
dependencies = [
self
@ -45,74 +53,69 @@ in
in
{
checks = pkgs.lib.mkIf (pkgs.stdenv.isLinux) {
test-installation =
(import ../lib/test-base.nix)
{
name = "test-installation";
nodes.target = {
services.openssh.enable = true;
users.users.root.openssh.authorizedKeys.keyFiles = [
../lib/ssh/pubkey
];
system.nixos.variant_id = "installer";
virtualisation.emptyDiskImages = [ 4096 ];
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
};
nodes.client = {
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 2048;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
system.extraDependencies = dependencies;
};
test-installation = (import ../lib/test-base.nix) {
name = "test-installation";
nodes.target = {
services.openssh.enable = true;
users.users.root.openssh.authorizedKeys.keyFiles = [ ../lib/ssh/pubkey ];
system.nixos.variant_id = "installer";
virtualisation.emptyDiskImages = [ 4096 ];
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
};
nodes.client = {
environment.systemPackages = [ self.packages.${pkgs.system}.clan-cli ];
environment.etc."install-closure".source = "${closureInfo}/store-paths";
virtualisation.memorySize = 2048;
nix.settings = {
substituters = lib.mkForce [ ];
hashed-mirrors = null;
connect-timeout = lib.mkForce 3;
flake-registry = pkgs.writeText "flake-registry" ''{"flakes":[],"version":2}'';
experimental-features = [
"nix-command"
"flakes"
];
};
system.extraDependencies = dependencies;
};
testScript = ''
def create_test_machine(oldmachine=None, args={}): # taken from <nixpkgs/nixos/tests/installer.nix>
startCommand = "${pkgs.qemu_test}/bin/qemu-kvm"
startCommand += " -cpu max -m 1024 -virtfs local,path=/nix/store,security_model=none,mount_tag=nix-store"
startCommand += f' -drive file={oldmachine.state_dir}/empty0.qcow2,id=drive1,if=none,index=1,werror=report'
startCommand += ' -device virtio-blk-pci,drive=drive1'
machine = create_machine({
"startCommand": startCommand,
} | args)
driver.machines.append(machine)
return machine
testScript = ''
def create_test_machine(oldmachine=None, args={}): # taken from <nixpkgs/nixos/tests/installer.nix>
startCommand = "${pkgs.qemu_test}/bin/qemu-kvm"
startCommand += " -cpu max -m 1024 -virtfs local,path=/nix/store,security_model=none,mount_tag=nix-store"
startCommand += f' -drive file={oldmachine.state_dir}/empty0.qcow2,id=drive1,if=none,index=1,werror=report'
startCommand += ' -device virtio-blk-pci,drive=drive1'
machine = create_machine({
"startCommand": startCommand,
} | args)
driver.machines.append(machine)
return machine
start_all()
start_all()
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
client.succeed("${pkgs.coreutils}/bin/install -Dm 600 ${../lib/ssh/privkey} /root/.ssh/id_ed25519")
client.wait_until_succeeds("ssh -o StrictHostKeyChecking=accept-new -v root@target hostname")
client.succeed("clan --debug --flake ${../..} machines install --yes test_install_machine root@target >&2")
try:
target.shutdown()
except BrokenPipeError:
# qemu has already exited
pass
client.succeed("clan --debug --flake ${../..} machines install --yes test_install_machine root@target >&2")
try:
target.shutdown()
except BrokenPipeError:
# qemu has already exited
pass
new_machine = create_test_machine(oldmachine=target, args={ "name": "new_machine" })
assert(new_machine.succeed("cat /etc/install-successful").strip() == "ok")
'';
}
{ inherit pkgs self; };
new_machine = create_test_machine(oldmachine=target, args={ "name": "new_machine" })
assert(new_machine.succeed("cat /etc/install-successful").strip() == "ok")
'';
} { inherit pkgs self; };
};
};
}

View File

@ -1,17 +1,23 @@
{ hostPkgs, lib, config, ... }:
{
hostPkgs,
lib,
config,
...
}:
let
testDriver = hostPkgs.python3.pkgs.callPackage ./package.nix {
inherit (config) extraPythonPackages;
inherit (hostPkgs.pkgs) util-linux systemd;
};
containers = map (m: m.system.build.toplevel) (lib.attrValues config.nodes);
pythonizeName = name:
pythonizeName =
name:
let
head = lib.substring 0 1 name;
tail = lib.substring 1 (-1) name;
in
(if builtins.match "[A-z_]" head == null then "_" else head) +
lib.stringAsChars (c: if builtins.match "[A-z0-9_]" c == null then "_" else c) tail;
(if builtins.match "[A-z_]" head == null then "_" else head)
+ lib.stringAsChars (c: if builtins.match "[A-z0-9_]" c == null then "_" else c) tail;
nodeHostNames =
let
nodesList = map (c: c.system.name) (lib.attrValues config.nodes);
@ -21,68 +27,72 @@ let
pythonizedNames = map pythonizeName nodeHostNames;
in
{
driver = lib.mkForce (hostPkgs.runCommand "nixos-test-driver-${config.name}"
{
nativeBuildInputs = [
hostPkgs.makeWrapper
] ++ lib.optionals (!config.skipTypeCheck) [ hostPkgs.mypy ];
buildInputs = [ testDriver ];
testScript = config.testScriptString;
preferLocalBuild = true;
passthru = config.passthru;
meta = config.meta // {
mainProgram = "nixos-test-driver";
driver = lib.mkForce (
hostPkgs.runCommand "nixos-test-driver-${config.name}"
{
nativeBuildInputs = [
hostPkgs.makeWrapper
] ++ lib.optionals (!config.skipTypeCheck) [ hostPkgs.mypy ];
buildInputs = [ testDriver ];
testScript = config.testScriptString;
preferLocalBuild = true;
passthru = config.passthru;
meta = config.meta // {
mainProgram = "nixos-test-driver";
};
}
''
mkdir -p $out/bin
containers=(${toString containers})
${lib.optionalString (!config.skipTypeCheck) ''
# prepend type hints so the test script can be type checked with mypy
cat "${./test-script-prepend.py}" >> testScriptWithTypes
echo "${builtins.toString machineNames}" >> testScriptWithTypes
echo -n "$testScript" >> testScriptWithTypes
echo "Running type check (enable/disable: config.skipTypeCheck)"
echo "See https://nixos.org/manual/nixos/stable/#test-opt-skipTypeCheck"
mypy --no-implicit-optional \
--pretty \
--no-color-output \
testScriptWithTypes
''}
echo -n "$testScript" >> $out/test-script
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
wrapProgram $out/bin/nixos-test-driver \
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
--add-flags "--test-script '$out/test-script'"
''
);
test = lib.mkForce (
lib.lazyDerivation {
# lazyDerivation improves performance when only passthru items and/or meta are used.
derivation = hostPkgs.stdenv.mkDerivation {
name = "vm-test-run-${config.name}";
requiredSystemFeatures = [ "uid-range" ];
buildCommand = ''
mkdir -p $out
# effectively mute the XMLLogger
export LOGFILE=/dev/null
${config.driver}/bin/nixos-test-driver -o $out
'';
passthru = config.passthru;
meta = config.meta;
};
inherit (config) passthru meta;
}
''
mkdir -p $out/bin
containers=(${toString containers})
${lib.optionalString (!config.skipTypeCheck) ''
# prepend type hints so the test script can be type checked with mypy
cat "${./test-script-prepend.py}" >> testScriptWithTypes
echo "${builtins.toString machineNames}" >> testScriptWithTypes
echo -n "$testScript" >> testScriptWithTypes
echo "Running type check (enable/disable: config.skipTypeCheck)"
echo "See https://nixos.org/manual/nixos/stable/#test-opt-skipTypeCheck"
mypy --no-implicit-optional \
--pretty \
--no-color-output \
testScriptWithTypes
''}
echo -n "$testScript" >> $out/test-script
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
wrapProgram $out/bin/nixos-test-driver \
${lib.concatStringsSep " " (map (name: "--add-flags '--container ${name}'") containers)} \
--add-flags "--test-script '$out/test-script'"
'');
test = lib.mkForce (lib.lazyDerivation {
# lazyDerivation improves performance when only passthru items and/or meta are used.
derivation = hostPkgs.stdenv.mkDerivation {
name = "vm-test-run-${config.name}";
requiredSystemFeatures = [ "uid-range" ];
buildCommand = ''
mkdir -p $out
# effectively mute the XMLLogger
export LOGFILE=/dev/null
${config.driver}/bin/nixos-test-driver -o $out
'';
passthru = config.passthru;
meta = config.meta;
};
inherit (config) passthru meta;
});
);
}

View File

@ -1,8 +1,18 @@
{ extraPythonPackages, python3Packages, buildPythonApplication, setuptools, util-linux, systemd }:
{
extraPythonPackages,
python3Packages,
buildPythonApplication,
setuptools,
util-linux,
systemd,
}:
buildPythonApplication {
pname = "test-driver";
version = "0.0.1";
propagatedBuildInputs = [ util-linux systemd ] ++ extraPythonPackages python3Packages;
propagatedBuildInputs = [
util-linux
systemd
] ++ extraPythonPackages python3Packages;
nativeBuildInputs = [ setuptools ];
format = "pyproject";
src = ./.;

View File

@ -1,33 +1,33 @@
test:
{ pkgs
, self
, ...
}:
{ pkgs, self, ... }:
let
inherit (pkgs) lib;
nixos-lib = import (pkgs.path + "/nixos/lib") { };
in
(nixos-lib.runTest ({ hostPkgs, ... }: {
hostPkgs = pkgs;
# speed-up evaluation
defaults = {
documentation.enable = lib.mkDefault false;
boot.isContainer = true;
(nixos-lib.runTest (
{ hostPkgs, ... }:
{
hostPkgs = pkgs;
# speed-up evaluation
defaults = {
documentation.enable = lib.mkDefault false;
boot.isContainer = true;
# undo qemu stuff
system.build.initialRamdisk = "";
virtualisation.sharedDirectories = lib.mkForce { };
networking.useDHCP = false;
# undo qemu stuff
system.build.initialRamdisk = "";
virtualisation.sharedDirectories = lib.mkForce { };
networking.useDHCP = false;
# we have not private networking so far
networking.interfaces = lib.mkForce { };
#networking.primaryIPAddress = lib.mkForce null;
systemd.services.backdoor.enable = false;
};
# to accept external dependencies such as disko
node.specialArgs.self = self;
imports = [
test
./container-driver/module.nix
];
})).config.result
# we have not private networking so far
networking.interfaces = lib.mkForce { };
#networking.primaryIPAddress = lib.mkForce null;
systemd.services.backdoor.enable = false;
};
# to accept external dependencies such as disko
node.specialArgs.self = self;
imports = [
test
./container-driver/module.nix
];
}
)).config.result

View File

@ -1,8 +1,5 @@
test:
{ pkgs
, self
, ...
}:
{ pkgs, self, ... }:
let
inherit (pkgs) lib;
nixos-lib = import (pkgs.path + "/nixos/lib") { };

View File

@ -1,35 +1,48 @@
{ self, runCommand, check-jsonschema, pkgs, lib, ... }:
{
self,
runCommand,
check-jsonschema,
pkgs,
lib,
...
}:
let
clanModules.clanCore = self.nixosModules.clanCore;
baseModule = {
imports =
(import (pkgs.path + "/nixos/modules/module-list.nix"))
++ [{
imports = (import (pkgs.path + "/nixos/modules/module-list.nix")) ++ [
{
nixpkgs.hostPlatform = "x86_64-linux";
clanCore.clanName = "dummy";
}];
}
];
};
optionsFromModule = module:
optionsFromModule =
module:
let
evaled = lib.evalModules {
modules = [ module baseModule ];
modules = [
module
baseModule
];
};
in
evaled.options.clan;
clanModuleSchemas = lib.mapAttrs (_: module: self.lib.jsonschema.parseOptions (optionsFromModule module)) clanModules;
clanModuleSchemas = lib.mapAttrs (
_: module: self.lib.jsonschema.parseOptions (optionsFromModule module)
) clanModules;
mkTest = name: schema: runCommand "schema-${name}" { } ''
${check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${builtins.toFile "schema-${name}" (builtins.toJSON schema)}
touch $out
'';
mkTest =
name: schema:
runCommand "schema-${name}" { } ''
${check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${builtins.toFile "schema-${name}" (builtins.toJSON schema)}
touch $out
'';
in
lib.mapAttrs'
(name: schema: {
name = "schema-${name}";
value = mkTest name schema;
})
clanModuleSchemas
lib.mapAttrs' (name: schema: {
name = "schema-${name}";
value = mkTest name schema;
}) clanModuleSchemas

View File

@ -1,19 +1,19 @@
(import ../lib/test-base.nix) {
name = "secrets";
nodes.machine = { self, config, ... }: {
imports = [
(self.nixosModules.clanCore)
];
environment.etc."secret".source = config.sops.secrets.secret.path;
environment.etc."group-secret".source = config.sops.secrets.group-secret.path;
sops.age.keyFile = ./key.age;
nodes.machine =
{ self, config, ... }:
{
imports = [ (self.nixosModules.clanCore) ];
environment.etc."secret".source = config.sops.secrets.secret.path;
environment.etc."group-secret".source = config.sops.secrets.group-secret.path;
sops.age.keyFile = ./key.age;
clanCore.clanDir = "${./.}";
clanCore.machineName = "machine";
clanCore.clanDir = "${./.}";
clanCore.machineName = "machine";
networking.hostName = "machine";
};
networking.hostName = "machine";
};
testScript = ''
machine.succeed("cat /etc/secret >&2")
machine.succeed("cat /etc/group-secret >&2")

View File

@ -1,25 +1,35 @@
import ../lib/test-base.nix ({ config, pkgs, lib, ... }: {
name = "wayland-proxy-virtwl";
import ../lib/test-base.nix (
{
config,
pkgs,
lib,
...
}:
{
name = "wayland-proxy-virtwl";
nodes.machine = { self, ... }: {
imports = [
self.nixosModules.clanCore
nodes.machine =
{ self, ... }:
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
services.wayland-proxy-virtwl.enable = true;
imports = [
self.nixosModules.clanCore
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
services.wayland-proxy-virtwl.enable = true;
virtualisation.qemu.options = [
"-vga none -device virtio-gpu-rutabaga,cross-domain=on,hostmem=4G,wsi=headless"
];
virtualisation.qemu.options = [
"-vga none -device virtio-gpu-rutabaga,cross-domain=on,hostmem=4G,wsi=headless"
];
virtualisation.qemu.package = lib.mkForce pkgs.qemu_kvm;
};
testScript = ''
start_all()
# use machinectl
machine.succeed("machinectl shell .host ${config.nodes.machine.systemd.package}/bin/systemctl --user start wayland-proxy-virtwl >&2")
'';
})
virtualisation.qemu.package = lib.mkForce pkgs.qemu_kvm;
};
testScript = ''
start_all()
# use machinectl
machine.succeed("machinectl shell .host ${config.nodes.machine.systemd.package}/bin/systemctl --user start wayland-proxy-virtwl >&2")
'';
}
)

View File

@ -1,20 +1,25 @@
(import ../lib/container-test.nix) ({ pkgs, ... }: {
name = "zt-tcp-relay";
(import ../lib/container-test.nix) (
{ pkgs, ... }:
{
name = "zt-tcp-relay";
nodes.machine = { self, ... }: {
imports = [
self.nixosModules.clanCore
self.clanModules.zt-tcp-relay
nodes.machine =
{ self, ... }:
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("zt-tcp-relay.service")
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
print(out)
'';
})
imports = [
self.nixosModules.clanCore
self.clanModules.zt-tcp-relay
{
clanCore.machineName = "machine";
clanCore.clanDir = ./.;
}
];
};
testScript = ''
start_all()
machine.wait_for_unit("zt-tcp-relay.service")
out = machine.succeed("${pkgs.netcat}/bin/nc -z -v localhost 4443")
print(out)
'';
}
)

View File

@ -1,69 +1,88 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.clan.borgbackup;
in
{
options.clan.borgbackup.destinations = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "the name of the backup job";
};
repo = lib.mkOption {
type = lib.types.str;
description = "the borgbackup repository to backup to";
};
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${config.clanCore.secrets.borgbackup.secrets."borgbackup.ssh".path} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
}));
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = "the name of the backup job";
};
repo = lib.mkOption {
type = lib.types.str;
description = "the borgbackup repository to backup to";
};
rsh = lib.mkOption {
type = lib.types.str;
default = "ssh -i ${
config.clanCore.secrets.borgbackup.secrets."borgbackup.ssh".path
} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null";
description = "the rsh to use for the backup";
};
};
}
)
);
default = { };
description = ''
destinations where the machine should be backuped to
'';
};
imports = [ (lib.mkRemovedOptionModule [ "clan" "borgbackup" "enable" ] "Just define clan.borgbackup.destinations to enable it") ];
imports = [
(lib.mkRemovedOptionModule [
"clan"
"borgbackup"
"enable"
] "Just define clan.borgbackup.destinations to enable it")
];
config = lib.mkIf (cfg.destinations != { }) {
services.borgbackup.jobs = lib.mapAttrs
(_: dest: {
paths = lib.flatten (map (state: state.folders) (lib.attrValues config.clanCore.state));
exclude = [ "*.pyc" ];
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
preHook = ''
set -x
'';
services.borgbackup.jobs = lib.mapAttrs (_: dest: {
paths = lib.flatten (map (state: state.folders) (lib.attrValues config.clanCore.state));
exclude = [ "*.pyc" ];
repo = dest.repo;
environment.BORG_RSH = dest.rsh;
compression = "auto,zstd";
startAt = "*-*-* 01:00:00";
persistentTimer = true;
preHook = ''
set -x
'';
encryption = {
mode = "repokey";
passCommand = "cat ${config.clanCore.secrets.borgbackup.secrets."borgbackup.repokey".path}";
};
encryption = {
mode = "repokey";
passCommand = "cat ${config.clanCore.secrets.borgbackup.secrets."borgbackup.repokey".path}";
};
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = 0;
};
})
cfg.destinations;
prune.keep = {
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = 0;
};
}) cfg.destinations;
clanCore.secrets.borgbackup = {
facts."borgbackup.ssh.pub" = { };
secrets."borgbackup.ssh" = { };
secrets."borgbackup.repokey" = { };
generator.path = [ pkgs.openssh pkgs.coreutils pkgs.xkcdpass ];
generator.path = [
pkgs.openssh
pkgs.coreutils
pkgs.xkcdpass
];
generator.script = ''
ssh-keygen -t ed25519 -N "" -f "$secrets"/borgbackup.ssh
mv "$secrets"/borgbackup.ssh.pub "$facts"/borgbackup.ssh.pub
@ -75,8 +94,9 @@ in
# TODO list needs to run locally or on the remote machine
list = ''
# we need yes here to skip the changed url verification
${lib.concatMapStringsSep "\n" (dest: ''yes y | borg-job-${dest.name} list --json | jq -r '. + {"job-name": "${dest.name}"}' '')
(lib.attrValues cfg.destinations)}
${lib.concatMapStringsSep "\n" (
dest: ''yes y | borg-job-${dest.name} list --json | jq -r '. + {"job-name": "${dest.name}"}' ''
) (lib.attrValues cfg.destinations)}
'';
create = ''
${lib.concatMapStringsSep "\n" (dest: ''

View File

@ -1,4 +1,5 @@
{ config, pkgs, ... }: {
{ config, pkgs, ... }:
{
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 25 ]; # smtp with other hosts
environment.systemPackages = [ pkgs.deltachat-desktop ];
@ -134,9 +135,7 @@
storage &local_mailboxes
}
'';
ensureAccounts = [
"user@${domain}"
];
ensureAccounts = [ "user@${domain}" ];
ensureCredentials = {
"user@${domain}".passwordFile = pkgs.writeText "dummy" "foobar";
};

View File

@ -41,4 +41,3 @@
};
};
}

View File

@ -1,4 +1,5 @@
{ inputs, ... }: {
{ inputs, ... }:
{
flake.clanModules = {
diskLayouts = {
imports = [

View File

@ -1,4 +1 @@
_:
{
fonts.enableDefaultPackages = true;
}
_: { fonts.enableDefaultPackages = true; }

View File

@ -1,7 +1,8 @@
{ config
, pkgs
, lib
, ...
{
config,
pkgs,
lib,
...
}:
{
# Integration can be improved, if the following issues get implemented:

View File

@ -1,4 +1,5 @@
{ pkgs, ... }: {
{ pkgs, ... }:
{
hardware.opengl.enable = true;
environment.systemPackages = [ pkgs.moonlight-qt ];
}

View File

@ -1,15 +1,21 @@
{ config, pkgs, ... }: {
{ config, pkgs, ... }:
{
services.openssh.enable = true;
services.openssh.hostKeys = [{
path = config.clanCore.secrets.openssh.secrets."ssh.id_ed25519".path;
type = "ed25519";
}];
services.openssh.hostKeys = [
{
path = config.clanCore.secrets.openssh.secrets."ssh.id_ed25519".path;
type = "ed25519";
}
];
clanCore.secrets.openssh = {
secrets."ssh.id_ed25519" = { };
facts."ssh.id_ed25519.pub" = { };
generator.path = [ pkgs.coreutils pkgs.openssh ];
generator.path = [
pkgs.coreutils
pkgs.openssh
];
generator.script = ''
ssh-keygen -t ed25519 -N "" -f $secrets/ssh.id_ed25519
mv $secrets/ssh.id_ed25519.pub $facts/ssh.id_ed25519.pub

View File

@ -1,7 +1,7 @@
{ pkgs, options, ... }:
let
apps = pkgs.writeText "apps.json" (builtins.toJSON
{
apps = pkgs.writeText "apps.json" (
builtins.toJSON {
env = {
PATH = "$(PATH):$(HOME)/.local/bin:/run/current-system/sw/bin";
};
@ -22,13 +22,12 @@ let
}
{
name = "Steam Big Picture";
detached = [
"setsid steam steam://open/bigpicture"
];
detached = [ "setsid steam steam://open/bigpicture" ];
image-path = "steam.png";
}
];
});
}
);
sunshineConfiguration = pkgs.writeText "sunshine.conf" ''
address_family = both
channels = 5
@ -78,11 +77,9 @@ in
environment.systemPackages = [
pkgs.sunshine
(pkgs.writers.writeDashBin "sun" ''
${pkgs.sunshine}/bin/sunshine -1 ${
pkgs.writeText "sunshine.conf" ''
address_family = both
''
} "$@"
${pkgs.sunshine}/bin/sunshine -1 ${pkgs.writeText "sunshine.conf" ''
address_family = both
''} "$@"
'')
# Create a dummy account, for easier setup,
# don't use this account in actual production yet.
@ -113,11 +110,7 @@ in
};
};
systemd.tmpfiles.rules = [
"d '/var/lib/sunshine' 0770 'user' 'users' - -"
];
systemd.tmpfiles.rules = [ "d '/var/lib/sunshine' 0770 'user' 'users' - -" ];
systemd.user.services.sunshine = {
enable = true;
@ -128,9 +121,7 @@ in
serviceConfig = {
Restart = "on-failure";
RestartSec = "5s";
ReadWritePaths = [
"/var/lib/sunshine"
];
ReadWritePaths = [ "/var/lib/sunshine" ];
};
wantedBy = [ "graphical-session.target" ];
};

View File

@ -1,7 +1,8 @@
{ config
, pkgs
, lib
, ...
{
config,
pkgs,
lib,
...
}:
{
options.clan.syncthing = {
@ -53,9 +54,9 @@
assertions = [
{
assertion =
lib.all (attr: builtins.hasAttr attr config.services.syncthing.settings.folders)
config.clan.syncthing.autoShares;
assertion = lib.all (
attr: builtins.hasAttr attr config.services.syncthing.settings.folders
) config.clan.syncthing.autoShares;
message = ''
Syncthing: If you want to AutoShare a folder, you need to have it configured on the sharing device.
'';
@ -80,12 +81,8 @@
group = "syncthing";
key =
lib.mkDefault
config.clan.secrets.syncthing.secrets."syncthing.key".path or null;
cert =
lib.mkDefault
config.clan.secrets.syncthing.secrets."syncthing.cert".path or null;
key = lib.mkDefault config.clan.secrets.syncthing.secrets."syncthing.key".path or null;
cert = lib.mkDefault config.clan.secrets.syncthing.secrets."syncthing.cert".path or null;
settings = {
options = {
@ -127,47 +124,33 @@
set -x
# query pending deviceID's
APIKEY=$(cat ${apiKey})
PENDING=$(${
lib.getExe pkgs.curl
} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${getPendingDevices})
PENDING=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${getPendingDevices})
PENDING=$(echo $PENDING | ${lib.getExe pkgs.jq} keys[])
# accept pending deviceID's
for ID in $PENDING;do
${
lib.getExe pkgs.curl
} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice}
${lib.getExe pkgs.curl} -X POST -d "{\"deviceId\": $ID}" -H "Content-Type: application/json" -H "X-API-Key: $APIKEY" ${baseAddress}${postNewDevice}
# get all shared folders by their ID
for folder in ${builtins.toString config.clan.syncthing.autoShares}; do
SHARED_IDS=$(${
lib.getExe pkgs.curl
} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${
lib.getExe pkgs.jq
} ."devices")
PATCHED_IDS=$(echo $SHARED_IDS | ${
lib.getExe pkgs.jq
} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]")
${
lib.getExe pkgs.curl
} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder"
SHARED_IDS=$(${lib.getExe pkgs.curl} -X GET -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder" | ${lib.getExe pkgs.jq} ."devices")
PATCHED_IDS=$(echo $SHARED_IDS | ${lib.getExe pkgs.jq} ".+= [{\"deviceID\": $ID, \"introducedBy\": \"\", \"encryptionPassword\": \"\"}]")
${lib.getExe pkgs.curl} -X PATCH -d "{\"devices\": $PATCHED_IDS}" -H "X-API-Key: $APIKEY" ${baseAddress}${SharedFolderById}"$folder"
done
done
'';
};
systemd.timers.syncthing-auto-accept =
lib.mkIf config.clan.syncthing.autoAcceptDevices
{
description = "Syncthing Auto Accept";
systemd.timers.syncthing-auto-accept = lib.mkIf config.clan.syncthing.autoAcceptDevices {
description = "Syncthing Auto Accept";
wantedBy = [ "syncthing-auto-accept.service" ];
wantedBy = [ "syncthing-auto-accept.service" ];
timerConfig = {
OnActiveSec = lib.mkDefault 60;
OnUnitActiveSec = lib.mkDefault 60;
};
};
timerConfig = {
OnActiveSec = lib.mkDefault 60;
OnUnitActiveSec = lib.mkDefault 60;
};
};
systemd.services.syncthing-init-api-key =
let
@ -182,9 +165,7 @@
set -efu pipefail
APIKEY=$(cat ${apiKey})
${
lib.getExe pkgs.gnused
} -i "s/<apikey>.*<\/apikey>/<apikey>$APIKEY<\/apikey>/" /var/lib/syncthing/config.xml
${lib.getExe pkgs.gnused} -i "s/<apikey>.*<\/apikey>/<apikey>$APIKEY<\/apikey>/" /var/lib/syncthing/config.xml
# sudo systemctl restart syncthing.service
systemctl restart syncthing.service
'';

View File

@ -1,7 +1,8 @@
{ pkgs
, lib
, config
, ...
{
pkgs,
lib,
config,
...
}:
{
options.clan.services.waypipe = {
@ -49,7 +50,10 @@
isNormalUser = true;
uid = 1000;
password = "";
extraGroups = [ "wheel" "video" ];
extraGroups = [
"wheel"
"video"
];
shell = "/run/current-system/sw/bin/bash";
};

View File

@ -1,4 +1,10 @@
{ pkgs, lib, config, ... }: {
{
pkgs,
lib,
config,
...
}:
{
options.clan.zt-tcp-relay = {
port = lib.mkOption {
type = lib.types.port;
@ -13,7 +19,9 @@
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.callPackage ../pkgs/zt-tcp-relay {}}/bin/zt-tcp-relay --listen [::]:${builtins.toString config.clan.zt-tcp-relay.port}";
ExecStart = "${
pkgs.callPackage ../pkgs/zt-tcp-relay { }
}/bin/zt-tcp-relay --listen [::]:${builtins.toString config.clan.zt-tcp-relay.port}";
Restart = "always";
RestartSec = "5";
dynamicUsers = true;

View File

@ -1,9 +1,10 @@
{
perSystem =
{ pkgs
, self'
, lib
, ...
{
pkgs,
self',
lib,
...
}:
let
python3 = pkgs.python3;
@ -20,9 +21,7 @@
ps.pygobject3
]
);
linuxOnlyPackages = lib.optionals pkgs.stdenv.isLinux [
pkgs.xdg-utils
];
linuxOnlyPackages = lib.optionals pkgs.stdenv.isLinux [ pkgs.xdg-utils ];
in
{
devShells.python = pkgs.mkShell {

View File

@ -1,9 +1,10 @@
{
perSystem =
{ pkgs
, self'
, config
, ...
{
pkgs,
self',
config,
...
}:
let
writers = pkgs.callPackage ./pkgs/builders/script-writers.nix { };
@ -16,10 +17,9 @@
# A python program to switch between dev-shells
# usage: select-shell shell-name
# the currently enabled dev-shell gets stored in ./.direnv/selected-shell
select-shell = writers.writePython3Bin "select-shell"
{
flakeIgnore = [ "E501" ];
} ./pkgs/scripts/select-shell.py;
select-shell = writers.writePython3Bin "select-shell" {
flakeIgnore = [ "E501" ];
} ./pkgs/scripts/select-shell.py;
in
{
devShells.default = pkgs.mkShell {

View File

@ -2,7 +2,9 @@
description = "clan.lol base operating system";
nixConfig.extra-substituters = [ "https://cache.clan.lol" ];
nixConfig.extra-trusted-public-keys = [ "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" ];
nixConfig.extra-trusted-public-keys = [
"cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28="
];
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small";
@ -20,44 +22,42 @@
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = inputs @ { flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } ({ lib, ... }: {
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
imports = [
./checks/flake-module.nix
./devShell.nix
./devShell-python.nix
./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } (
{ lib, ... }:
{
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
];
imports = [
./checks/flake-module.nix
./devShell.nix
./devShell-python.nix
./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
./pkgs/flake-module.nix
./pkgs/flake-module.nix
./lib/flake-module.nix
./nixosModules/flake-module.nix
{
options.flake = flake-parts.lib.mkSubmoduleOptions {
clanInternals = lib.mkOption {
type = lib.types.submodule {
options = {
all-machines-json = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
};
machines = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
};
machinesFunc = lib.mkOption {
type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified);
./lib/flake-module.nix
./nixosModules/flake-module.nix
{
options.flake = flake-parts.lib.mkSubmoduleOptions {
clanInternals = lib.mkOption {
type = lib.types.submodule {
options = {
all-machines-json = lib.mkOption { type = lib.types.attrsOf lib.types.str; };
machines = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
machinesFunc = lib.mkOption { type = lib.types.attrsOf (lib.types.attrsOf lib.types.unspecified); };
};
};
};
};
};
}
];
});
}
];
}
);
}

View File

@ -1,49 +1,47 @@
{ lib
, inputs
, ...
}: {
imports = [
inputs.treefmt-nix.flakeModule
];
perSystem = { self', pkgs, ... }: {
treefmt.projectRootFile = "flake.nix";
treefmt.programs.shellcheck.enable = true;
{ lib, inputs, ... }:
{
imports = [ inputs.treefmt-nix.flakeModule ];
perSystem =
{ self', pkgs, ... }:
{
treefmt.projectRootFile = "flake.nix";
treefmt.programs.shellcheck.enable = true;
treefmt.programs.mypy.enable = true;
treefmt.programs.mypy.directories = {
"pkgs/clan-cli".extraPythonPackages = self'.packages.clan-cli.pytestDependencies;
"pkgs/clan-vm-manager".extraPythonPackages = self'.packages.clan-vm-manager.propagatedBuildInputs;
};
treefmt.programs.mypy.enable = true;
treefmt.programs.mypy.directories = {
"pkgs/clan-cli".extraPythonPackages = self'.packages.clan-cli.pytestDependencies;
"pkgs/clan-vm-manager".extraPythonPackages = self'.packages.clan-vm-manager.propagatedBuildInputs;
};
treefmt.settings.formatter.nix = {
command = "sh";
options = [
"-eucx"
''
# First deadnix
${lib.getExe pkgs.deadnix} --edit "$@"
# Then nixpkgs-fmt
${lib.getExe pkgs.nixfmt-rfc-style} "$@"
''
"--" # this argument is ignored by bash
];
includes = [ "*.nix" ];
excludes = [
# Was copied from nixpkgs. Keep diff minimal to simplify upstreaming.
"pkgs/builders/script-writers.nix"
];
treefmt.settings.formatter.nix = {
command = "sh";
options = [
"-eucx"
''
# First deadnix
${lib.getExe pkgs.deadnix} --edit "$@"
# Then nixpkgs-fmt
${lib.getExe pkgs.nixfmt-rfc-style} "$@"
''
"--" # this argument is ignored by bash
];
includes = [ "*.nix" ];
excludes = [
# Was copied from nixpkgs. Keep diff minimal to simplify upstreaming.
"pkgs/builders/script-writers.nix"
];
};
treefmt.settings.formatter.python = {
command = "sh";
options = [
"-eucx"
''
${lib.getExe pkgs.ruff} --fix "$@"
${lib.getExe pkgs.ruff} format "$@"
''
"--" # this argument is ignored by bash
];
includes = [ "*.py" ];
};
};
treefmt.settings.formatter.python = {
command = "sh";
options = [
"-eucx"
''
${lib.getExe pkgs.ruff} --fix "$@"
${lib.getExe pkgs.ruff} format "$@"
''
"--" # this argument is ignored by bash
];
includes = [ "*.py" ];
};
};
}

View File

@ -1,66 +1,80 @@
{ clan-core, nixpkgs, lib }:
{ directory # The directory containing the machines subdirectory
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
, clanName # Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.
, clanIcon ? null # A path to an icon to be used for the clan, should be the same for all machines
, pkgsForSystem ? (_system: null) # A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
# This improves performance, but all nipxkgs.* options will be ignored.
{
clan-core,
nixpkgs,
lib,
}:
{
directory, # The directory containing the machines subdirectory
specialArgs ? { }, # Extra arguments to pass to nixosSystem i.e. useful to make self available
machines ? { }, # allows to include machine-specific modules i.e. machines.${name} = { ... }
clanName, # Needs to be (globally) unique, as this determines the folder name where the flake gets downloaded to.
clanIcon ? null, # A path to an icon to be used for the clan, should be the same for all machines
pkgsForSystem ? (_system: null), # A map from arch to pkgs, if specified this nixpkgs will be only imported once for each system.
# This improves performance, but all nipxkgs.* options will be ignored.
}:
let
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (
builtins.readDir (directory + /machines)
);
machineSettings = machineName:
machineSettings =
machineName:
# CLAN_MACHINE_SETTINGS_FILE allows to override the settings file temporarily
# This is useful for doing a dry-run before writing changes into the settings.json
# Using CLAN_MACHINE_SETTINGS_FILE requires passing --impure to nix eval
if builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE" != ""
then builtins.fromJSON (builtins.readFile (builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE"))
if builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE" != "" then
builtins.fromJSON (builtins.readFile (builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE"))
else
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json")
(builtins.fromJSON
(builtins.readFile (directory + /machines/${machineName}/settings.json)));
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json") (
builtins.fromJSON (builtins.readFile (directory + /machines/${machineName}/settings.json))
);
# Read additional imports specified via a config option in settings.json
# This is not an infinite recursion, because the imports are discovered here
# before calling evalModules.
# It is still useful to have the imports as an option, as this allows for type
# checking and easy integration with the config frontend(s)
machineImports = machineSettings:
map
(module: clan-core.clanModules.${module})
(machineSettings.clanImports or [ ]);
machineImports =
machineSettings: map (module: clan-core.clanModules.${module}) (machineSettings.clanImports or [ ]);
# TODO: remove default system once we have a hardware-config mechanism
nixosConfiguration = { system ? "x86_64-linux", name, pkgs ? null, extraConfig ? { } }: nixpkgs.lib.nixosSystem {
modules =
let
settings = machineSettings name;
in
(machineImports settings)
++ [
settings
clan-core.nixosModules.clanCore
extraConfig
(machines.${name} or { })
({
clanCore.clanName = clanName;
clanCore.clanIcon = clanIcon;
clanCore.clanDir = directory;
clanCore.machineName = name;
nixpkgs.hostPlatform = lib.mkDefault system;
nixosConfiguration =
{
system ? "x86_64-linux",
name,
pkgs ? null,
extraConfig ? { },
}:
nixpkgs.lib.nixosSystem {
modules =
let
settings = machineSettings name;
in
(machineImports settings)
++ [
settings
clan-core.nixosModules.clanCore
extraConfig
(machines.${name} or { })
(
{
clanCore.clanName = clanName;
clanCore.clanIcon = clanIcon;
clanCore.clanDir = directory;
clanCore.machineName = name;
nixpkgs.hostPlatform = lib.mkDefault system;
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = {
type = "path";
path = lib.mkDefault nixpkgs;
};
} // lib.optionalAttrs (pkgs != null) {
nixpkgs.pkgs = lib.mkForce pkgs;
})
];
inherit specialArgs;
};
# speeds up nix commands by using the nixpkgs from the host system (especially useful in VMs)
nix.registry.nixpkgs.to = {
type = "path";
path = lib.mkDefault nixpkgs;
};
}
// lib.optionalAttrs (pkgs != null) { nixpkgs.pkgs = lib.mkForce pkgs; }
)
];
inherit specialArgs;
};
allMachines = machinesDirs // machines;
@ -77,27 +91,38 @@ let
# This instantiates nixos for each system that we support:
# configPerSystem = <system>.<machine>.nixosConfiguration
# We need this to build nixos secret generators for each system
configsPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs
(name: _: nixosConfiguration {
configsPerSystem = builtins.listToAttrs (
builtins.map (
system:
lib.nameValuePair system (
lib.mapAttrs (
name: _:
nixosConfiguration {
inherit name system;
pkgs = pkgsForSystem system;
})
allMachines))
supportedSystems);
}
) allMachines
)
) supportedSystems
);
configsFuncPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs
(name: _: args: nixosConfiguration (args // {
inherit name system;
pkgs = pkgsForSystem system;
}))
allMachines))
supportedSystems);
configsFuncPerSystem = builtins.listToAttrs (
builtins.map (
system:
lib.nameValuePair system (
lib.mapAttrs (
name: _: args:
nixosConfiguration (
args
// {
inherit name system;
pkgs = pkgsForSystem system;
}
)
) allMachines
)
) supportedSystems
);
in
{
inherit nixosConfigurations;
@ -105,8 +130,11 @@ in
clanInternals = {
machines = configsPerSystem;
machinesFunc = configsFuncPerSystem;
all-machines-json = lib.mapAttrs
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
configsPerSystem;
all-machines-json = lib.mapAttrs (
system: configs:
nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (
lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs
)
) configsPerSystem;
};
}

View File

@ -1,4 +1,9 @@
{ lib, clan-core, nixpkgs, ... }:
{
lib,
clan-core,
nixpkgs,
...
}:
{
jsonschema = import ./jsonschema { inherit lib; };

View File

@ -1,11 +1,11 @@
{ lib
, inputs
, self
, ...
}: {
imports = [
./jsonschema/flake-module.nix
];
{
lib,
inputs,
self,
...
}:
{
imports = [ ./jsonschema/flake-module.nix ];
flake.lib = import ./default.nix {
inherit lib;
inherit (inputs) nixpkgs;

View File

@ -1,243 +1,290 @@
{ lib ? import <nixpkgs/lib>
, excludedTypes ? [
{
lib ? import <nixpkgs/lib>,
excludedTypes ? [
"functionTo"
"package"
]
],
}:
let
# remove _module attribute from options
clean = opts: builtins.removeAttrs opts [ "_module" ];
# throw error if option type is not supported
notSupported = option: lib.trace option throw ''
option type '${option.type.name}' ('${option.type.description}') not supported by jsonschema converter
location: ${lib.concatStringsSep "." option.loc}
'';
notSupported =
option:
lib.trace option throw ''
option type '${option.type.name}' ('${option.type.description}') not supported by jsonschema converter
location: ${lib.concatStringsSep "." option.loc}
'';
isExcludedOption = option: (lib.elem (option.type.name or null) excludedTypes);
filterExcluded = lib.filter (opt: ! isExcludedOption opt);
filterExcluded = lib.filter (opt: !isExcludedOption opt);
filterExcludedAttrs = lib.filterAttrs (_name: opt: ! isExcludedOption opt);
allBasicTypes =
[ "boolean" "integer" "number" "string" "array" "object" "null" ];
filterExcludedAttrs = lib.filterAttrs (_name: opt: !isExcludedOption opt);
allBasicTypes = [
"boolean"
"integer"
"number"
"string"
"array"
"object"
"null"
];
in
rec {
# parses a nixos module to a jsonschema
parseModule = module:
parseModule =
module:
let
evaled = lib.evalModules {
modules = [ module ];
};
evaled = lib.evalModules { modules = [ module ]; };
in
parseOptions evaled.options;
# parses a set of evaluated nixos options to a jsonschema
parseOptions = options':
parseOptions =
options':
let
options = filterExcludedAttrs (clean options');
# parse options to jsonschema properties
properties = lib.mapAttrs (_name: option: parseOption option) options;
# TODO: figure out how to handle if prop.anyOf is used
isRequired = prop: ! (prop ? default || prop.type or null == "object");
isRequired = prop: !(prop ? default || prop.type or null == "object");
requiredProps = lib.filterAttrs (_: prop: isRequired prop) properties;
required = lib.optionalAttrs (requiredProps != { }) {
required = lib.attrNames requiredProps;
};
required = lib.optionalAttrs (requiredProps != { }) { required = lib.attrNames requiredProps; };
in
# return jsonschema
required // {
required
// {
type = "object";
inherit properties;
};
# parses and evaluated nixos option to a jsonschema property definition
parseOption = option:
parseOption =
option:
let
default = lib.optionalAttrs (option ? default) {
inherit (option) default;
};
default = lib.optionalAttrs (option ? default) { inherit (option) default; };
description = lib.optionalAttrs (option ? description) {
description = option.description.text or option.description;
};
in
# either type
# TODO: if all nested optiosn are excluded, the parent sould be excluded too
if option.type.name or null == "either"
# TODO: if all nested optiosn are excluded, the parent sould be excluded too
if
option.type.name or null == "either"
# return jsonschema property definition for either
then
let
optionsList' = [
{ type = option.type.nestedTypes.left; _type = "option"; loc = option.loc; }
{ type = option.type.nestedTypes.right; _type = "option"; loc = option.loc; }
{
type = option.type.nestedTypes.left;
_type = "option";
loc = option.loc;
}
{
type = option.type.nestedTypes.right;
_type = "option";
loc = option.loc;
}
];
optionsList = filterExcluded optionsList';
in
default // description // {
anyOf = map parseOption optionsList;
}
default // description // { anyOf = map parseOption optionsList; }
# handle nested options (not a submodule)
else if ! option ? _type
then parseOptions option
else if !option ? _type then
parseOptions option
# throw if not an option
else if option._type != "option" && option._type != "option-type"
then throw "parseOption: not an option"
else if option._type != "option" && option._type != "option-type" then
throw "parseOption: not an option"
# parse nullOr
else if option.type.name == "nullOr"
else if
option.type.name == "nullOr"
# return jsonschema property definition for nullOr
then
let
nestedOption =
{ type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; };
nestedOption = {
type = option.type.nestedTypes.elemType;
_type = "option";
loc = option.loc;
};
in
default // description // {
anyOf =
[{ type = "null"; }]
++ (
lib.optional (! isExcludedOption nestedOption)
(parseOption nestedOption)
);
default
// description
// {
anyOf = [
{ type = "null"; }
] ++ (lib.optional (!isExcludedOption nestedOption) (parseOption nestedOption));
}
# parse bool
else if option.type.name == "bool"
else if
option.type.name == "bool"
# return jsonschema property definition for bool
then default // description // {
type = "boolean";
}
then
default // description // { type = "boolean"; }
# parse float
else if option.type.name == "float"
else if
option.type.name == "float"
# return jsonschema property definition for float
then default // description // {
type = "number";
}
then
default // description // { type = "number"; }
# parse int
else if (option.type.name == "int" || option.type.name == "positiveInt")
else if
(option.type.name == "int" || option.type.name == "positiveInt")
# return jsonschema property definition for int
then default // description // {
type = "integer";
}
then
default // description // { type = "integer"; }
# parse string
else if option.type.name == "str"
else if
option.type.name == "str"
# return jsonschema property definition for string
then default // description // {
type = "string";
}
then
default // description // { type = "string"; }
# parse string
else if option.type.name == "path"
else if
option.type.name == "path"
# return jsonschema property definition for path
then default // description // {
type = "string";
}
then
default // description // { type = "string"; }
# parse anything
else if option.type.name == "anything"
else if
option.type.name == "anything"
# return jsonschema property definition for anything
then default // description // {
type = allBasicTypes;
}
then
default // description // { type = allBasicTypes; }
# parse unspecified
else if option.type.name == "unspecified"
else if
option.type.name == "unspecified"
# return jsonschema property definition for unspecified
then default // description // {
type = allBasicTypes;
}
then
default // description // { type = allBasicTypes; }
# parse raw
else if option.type.name == "raw"
else if
option.type.name == "raw"
# return jsonschema property definition for raw
then default // description // {
type = allBasicTypes;
}
then
default // description // { type = allBasicTypes; }
# parse enum
else if option.type.name == "enum"
else if
option.type.name == "enum"
# return jsonschema property definition for enum
then default // description // {
enum = option.type.functor.payload;
}
then
default // description // { enum = option.type.functor.payload; }
# parse listOf submodule
else if option.type.name == "listOf" && option.type.functor.wrapped.name == "submodule"
else if
option.type.name == "listOf" && option.type.functor.wrapped.name == "submodule"
# return jsonschema property definition for listOf submodule
then default // description // {
type = "array";
items = parseOptions (option.type.functor.wrapped.getSubOptions option.loc);
}
then
default
// description
// {
type = "array";
items = parseOptions (option.type.functor.wrapped.getSubOptions option.loc);
}
# parse list
else if (option.type.name == "listOf")
else if
(option.type.name == "listOf")
# return jsonschema property definition for list
then
let
nestedOption = { type = option.type.functor.wrapped; _type = "option"; loc = option.loc; };
nestedOption = {
type = option.type.functor.wrapped;
_type = "option";
loc = option.loc;
};
in
default // description // {
default
// description
// {
type = "array";
}
// (lib.optionalAttrs (! isExcludedOption nestedOption) {
items = parseOption nestedOption;
})
// (lib.optionalAttrs (!isExcludedOption nestedOption) { items = parseOption nestedOption; })
# parse list of unspecified
else if
(option.type.name == "listOf")
&& (option.type.functor.wrapped.name == "unspecified")
(option.type.name == "listOf") && (option.type.functor.wrapped.name == "unspecified")
# return jsonschema property definition for list
then default // description // {
type = "array";
}
then
default // description // { type = "array"; }
# parse attrsOf submodule
else if option.type.name == "attrsOf" && option.type.nestedTypes.elemType.name == "submodule"
else if
option.type.name == "attrsOf" && option.type.nestedTypes.elemType.name == "submodule"
# return jsonschema property definition for attrsOf submodule
then default // description // {
type = "object";
additionalProperties = parseOptions (option.type.nestedTypes.elemType.getSubOptions option.loc);
}
then
default
// description
// {
type = "object";
additionalProperties = parseOptions (option.type.nestedTypes.elemType.getSubOptions option.loc);
}
# parse attrs
else if option.type.name == "attrs"
else if
option.type.name == "attrs"
# return jsonschema property definition for attrs
then default // description // {
type = "object";
additionalProperties = true;
}
then
default
// description
// {
type = "object";
additionalProperties = true;
}
# parse attrsOf
# TODO: if nested option is excluded, the parent sould be excluded too
else if option.type.name == "attrsOf" || option.type.name == "lazyAttrsOf"
else if
option.type.name == "attrsOf" || option.type.name == "lazyAttrsOf"
# return jsonschema property definition for attrs
then
let
nestedOption = { type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; };
nestedOption = {
type = option.type.nestedTypes.elemType;
_type = "option";
loc = option.loc;
};
in
default // description // {
default
// description
// {
type = "object";
additionalProperties =
if ! isExcludedOption nestedOption
then parseOption { type = option.type.nestedTypes.elemType; _type = "option"; loc = option.loc; }
else false;
if !isExcludedOption nestedOption then
parseOption {
type = option.type.nestedTypes.elemType;
_type = "option";
loc = option.loc;
}
else
false;
}
# parse submodule
else if option.type.name == "submodule"
else if
option.type.name == "submodule"
# return jsonschema property definition for submodule
# then (lib.attrNames (option.type.getSubOptions option.loc).opt)
then parseOptions (option.type.getSubOptions option.loc)
then
parseOptions (option.type.getSubOptions option.loc)
# throw error if option type is not supported
else notSupported option;
else
notSupported option;
}

View File

@ -1,7 +1,6 @@
/*
An example nixos module declaring an interface.
*/
{ lib, ... }: {
# An example nixos module declaring an interface.
{ lib, ... }:
{
options = {
# str
name = lib.mkOption {
@ -44,7 +43,11 @@
# list of str
kernelModules = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "nvme" "xhci_pci" "ahci" ];
default = [
"nvme"
"xhci_pci"
"ahci"
];
description = "A list of enabled kernel modules";
};
};

View File

@ -1,29 +1,31 @@
{
perSystem = { pkgs, ... }: {
checks = {
perSystem =
{ pkgs, ... }:
{
checks = {
# check if the `clan config` example jsonschema and data is valid
lib-jsonschema-example-valid = pkgs.runCommand "lib-jsonschema-example-valid" { } ''
echo "Checking that example-schema.json is valid"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${./.}/example-schema.json
# check if the `clan config` example jsonschema and data is valid
lib-jsonschema-example-valid = pkgs.runCommand "lib-jsonschema-example-valid" { } ''
echo "Checking that example-schema.json is valid"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${./.}/example-schema.json
echo "Checking that example-data.json is valid according to example-schema.json"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--schemafile ${./.}/example-schema.json \
${./.}/example-data.json
echo "Checking that example-data.json is valid according to example-schema.json"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--schemafile ${./.}/example-schema.json \
${./.}/example-data.json
touch $out
'';
touch $out
'';
# check if the `clan config` nix jsonschema converter unit tests succeed
lib-jsonschema-nix-unit-tests = pkgs.runCommand "lib-jsonschema-nix-unit-tests" { } ''
export NIX_PATH=nixpkgs=${pkgs.path}
${pkgs.nix-unit}/bin/nix-unit \
${./.}/test.nix \
--eval-store $(realpath .)
touch $out
'';
# check if the `clan config` nix jsonschema converter unit tests succeed
lib-jsonschema-nix-unit-tests = pkgs.runCommand "lib-jsonschema-nix-unit-tests" { } ''
export NIX_PATH=nixpkgs=${pkgs.path}
${pkgs.nix-unit}/bin/nix-unit \
${./.}/test.nix \
--eval-store $(realpath .)
touch $out
'';
};
};
};
}

View File

@ -1,6 +1,7 @@
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
{
lib ? (import <nixpkgs> { }).lib,
slib ? import ./. { inherit lib; },
}:
{
parseOption = import ./test_parseOption.nix { inherit lib slib; };

View File

@ -1,21 +1,25 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
{
lib ? (import <nixpkgs> { }).lib,
slib ? import ./. { inherit lib; },
}:
let
description = "Test Description";
evalType = type: default:
evalType =
type: default:
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
inherit type;
inherit default;
inherit description;
};
}];
modules = [
{
options.opt = lib.mkOption {
inherit type;
inherit default;
inherit description;
};
}
];
};
in
evaledConfig.options.opt;
@ -25,11 +29,7 @@ in
testNoDefaultNoDescription =
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
type = lib.types.bool;
};
}];
modules = [ { options.opt = lib.mkOption { type = lib.types.bool; }; } ];
};
in
{
@ -42,15 +42,17 @@ in
testDescriptionIsAttrs =
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
type = lib.types.bool;
description = {
_type = "mdDoc";
text = description;
modules = [
{
options.opt = lib.mkOption {
type = lib.types.bool;
description = {
_type = "mdDoc";
text = description;
};
};
};
}];
}
];
};
in
{
@ -112,7 +114,11 @@ in
testEnum =
let
default = "foo";
values = [ "foo" "bar" "baz" ];
values = [
"foo"
"bar"
"baz"
];
in
{
expr = slib.parseOption (evalType (lib.types.enum values) default);
@ -124,7 +130,11 @@ in
testListOfInt =
let
default = [ 1 2 3 ];
default = [
1
2
3
];
in
{
expr = slib.parseOption (evalType (lib.types.listOf lib.types.int) default);
@ -139,14 +149,26 @@ in
testListOfUnspecified =
let
default = [ 1 2 3 ];
default = [
1
2
3
];
in
{
expr = slib.parseOption (evalType (lib.types.listOf lib.types.unspecified) default);
expected = {
type = "array";
items = {
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
type = [
"boolean"
"integer"
"number"
"string"
"array"
"object"
"null"
];
};
inherit default description;
};
@ -154,7 +176,11 @@ in
testAttrs =
let
default = { foo = 1; bar = 2; baz = 3; };
default = {
foo = 1;
bar = 2;
baz = 3;
};
in
{
expr = slib.parseOption (evalType (lib.types.attrs) default);
@ -167,7 +193,11 @@ in
testAttrsOfInt =
let
default = { foo = 1; bar = 2; baz = 3; };
default = {
foo = 1;
bar = 2;
baz = 3;
};
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf lib.types.int) default);
@ -182,7 +212,11 @@ in
testLazyAttrsOfInt =
let
default = { foo = 1; bar = 2; baz = 3; };
default = {
foo = 1;
bar = 2;
baz = 3;
};
in
{
expr = slib.parseOption (evalType (lib.types.lazyAttrsOf lib.types.int) default);
@ -286,7 +320,10 @@ in
inherit description;
};
};
default = { foo.opt = false; bar.opt = true; };
default = {
foo.opt = false;
bar.opt = true;
};
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf (lib.types.submodule subModule)) default);
@ -315,7 +352,10 @@ in
inherit description;
};
};
default = [{ opt = false; } { opt = true; }];
default = [
{ opt = false; }
{ opt = true; }
];
in
{
expr = slib.parseOption (evalType (lib.types.listOf (lib.types.submodule subModule)) default);
@ -358,7 +398,15 @@ in
expr = slib.parseOption (evalType lib.types.anything default);
expected = {
inherit default description;
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
type = [
"boolean"
"integer"
"number"
"string"
"array"
"object"
"null"
];
};
};
@ -370,7 +418,15 @@ in
expr = slib.parseOption (evalType lib.types.unspecified default);
expected = {
inherit default description;
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
type = [
"boolean"
"integer"
"number"
"string"
"array"
"object"
"null"
];
};
};
@ -382,7 +438,15 @@ in
expr = slib.parseOption (evalType lib.types.raw default);
expected = {
inherit default description;
type = [ "boolean" "integer" "number" "string" "array" "object" "null" ];
type = [
"boolean"
"integer"
"number"
"string"
"array"
"object"
"null"
];
};
};
}

View File

@ -1,14 +1,13 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
{
lib ? (import <nixpkgs> { }).lib,
slib ? import ./. { inherit lib; },
}:
let
evaledOptions =
let
evaledConfig = lib.evalModules {
modules = [ ./example-interface.nix ];
};
evaledConfig = lib.evalModules { modules = [ ./example-interface.nix ]; };
in
evaledConfig.options;
in
@ -21,11 +20,7 @@ in
testParseNestedOptions =
let
evaled = lib.evalModules {
modules = [{
options.foo.bar = lib.mkOption {
type = lib.types.bool;
};
}];
modules = [ { options.foo.bar = lib.mkOption { type = lib.types.bool; }; } ];
};
in
{
@ -34,7 +29,9 @@ in
properties = {
foo = {
properties = {
bar = { type = "boolean"; };
bar = {
type = "boolean";
};
};
required = [ "bar" ];
type = "object";

View File

@ -1,45 +1,48 @@
{ lib, ... }:
{
imports = [
./state.nix
];
imports = [ ./state.nix ];
options.clanCore.backups = {
providers = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = ''
Name of the backup provider
'';
};
list = lib.mkOption {
type = lib.types.str;
description = ''
script to list backups
'';
};
restore = lib.mkOption {
type = lib.types.str;
description = ''
script to restore a backup
should take an optional service name as argument
gets ARCHIVE_ID, LOCATION, JOB and FOLDERS as environment variables
ARCHIVE_ID is the id of the backup
LOCATION is the remote identifier of the backup
JOB is the job name of the backup
FOLDERS is a colon separated list of folders to restore
'';
};
create = lib.mkOption {
type = lib.types.str;
description = ''
script to start a backup
'';
};
};
}));
type = lib.types.attrsOf (
lib.types.submodule (
{ name, ... }:
{
options = {
name = lib.mkOption {
type = lib.types.str;
default = name;
description = ''
Name of the backup provider
'';
};
list = lib.mkOption {
type = lib.types.str;
description = ''
script to list backups
'';
};
restore = lib.mkOption {
type = lib.types.str;
description = ''
script to restore a backup
should take an optional service name as argument
gets ARCHIVE_ID, LOCATION, JOB and FOLDERS as environment variables
ARCHIVE_ID is the id of the backup
LOCATION is the remote identifier of the backup
JOB is the job name of the backup
FOLDERS is a colon separated list of folders to restore
'';
};
create = lib.mkOption {
type = lib.types.str;
description = ''
script to start a backup
'';
};
};
}
)
);
default = { };
description = ''
Configured backup providers which are used by this machine

View File

@ -1,6 +1,5 @@
{ lib
, ...
}: {
{ lib, ... }:
{
/*
Declaring imports inside the module system does not trigger an infinite
recursion in this case because buildClan generates the imports from the

View File

@ -1 +1,4 @@
{ pkgs, ... }: { documentation.nixos.enable = pkgs.lib.mkDefault false; }
{ pkgs, ... }:
{
documentation.nixos.enable = pkgs.lib.mkDefault false;
}

View File

@ -1,4 +1,5 @@
{ lib, pkgs, ... }: {
{ lib, pkgs, ... }:
{
options.clanCore = {
clanName = lib.mkOption {
type = lib.types.str;

View File

@ -49,7 +49,18 @@
};
imports = [
(lib.mkRenamedOptionModule [ "clan" "networking" "deploymentAddress" ] [ "clan" "networking" "targetHost" ])
(lib.mkRenamedOptionModule
[
"clan"
"networking"
"deploymentAddress"
]
[
"clan"
"networking"
"targetHost"
]
)
];
config = {
# conflicts with systemd-resolved
@ -64,16 +75,18 @@
systemd.network.wait-online.enable = false;
# Provide a default network configuration but don't compete with network-manager or dhcpcd
systemd.network.networks."50-uplink" = lib.mkIf (!(config.networking.networkmanager.enable || config.networking.dhcpcd.enable)) {
matchConfig.Type = "ether";
networkConfig = {
DHCP = "yes";
LLDP = "yes";
LLMNR = "yes";
MulticastDNS = "yes";
IPv6AcceptRA = "yes";
};
};
systemd.network.networks."50-uplink" =
lib.mkIf (!(config.networking.networkmanager.enable || config.networking.dhcpcd.enable))
{
matchConfig.Type = "ether";
networkConfig = {
DHCP = "yes";
LLDP = "yes";
LLMNR = "yes";
MulticastDNS = "yes";
IPv6AcceptRA = "yes";
};
};
# Use networkd instead of the pile of shell scripts
networking.useNetworkd = lib.mkDefault true;

View File

@ -1,4 +1,10 @@
{ pkgs, options, lib, ... }: {
{
pkgs,
options,
lib,
...
}:
{
options.clanCore.optionsNix = lib.mkOption {
type = lib.types.raw;
internal = true;

View File

@ -1,4 +1,10 @@
{ config, lib, pkgs, ... }: {
{
config,
lib,
pkgs,
...
}:
{
# TODO: factor these out into a separate interface.nix.
# Also think about moving these options out of `system.clan`.
# Maybe we should not re-use the already polluted confg.system namespace
@ -90,6 +96,8 @@
inherit (config.clan.deployment) requireExplicitUpdate;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
system.clan.deployment.file = pkgs.writeText "deployment.json" (
builtins.toJSON config.system.clan.deployment.data
);
};
}

View File

@ -1,4 +1,5 @@
{ pkgs, ... }: {
{ pkgs, ... }:
{
# essential debugging tools for networked services
environment.systemPackages = [
pkgs.dnsutils

View File

@ -1,7 +1,17 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [ "sops" "password-store" "vm" "custom" ];
type = lib.types.enum [
"sops"
"password-store"
"vm"
"custom"
];
default = "sops";
description = ''
method to store secrets
@ -34,8 +44,8 @@
options.clanCore.secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf
(lib.types.submodule (service: {
type = lib.types.attrsOf (
lib.types.submodule (service: {
options = {
name = lib.mkOption {
type = lib.types.str;
@ -45,55 +55,60 @@
'';
};
generator = lib.mkOption {
type = lib.types.submodule ({ config, ... }: {
options = {
path = lib.mkOption {
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
default = [ ];
description = ''
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variable $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
finalScript = lib.mkOption {
type = lib.types.str;
readOnly = true;
internal = true;
default = ''
set -eu -o pipefail
type = lib.types.submodule (
{ config, ... }:
{
options = {
path = lib.mkOption {
type = lib.types.listOf (lib.types.either lib.types.path lib.types.package);
default = [ ];
description = ''
Extra paths to add to the PATH environment variable when running the generator.
'';
};
prompt = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
prompt text to ask for a value.
This value will be passed to the script as the environment variable $prompt_value.
'';
};
script = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
finalScript = lib.mkOption {
type = lib.types.str;
readOnly = true;
internal = true;
default = ''
set -eu -o pipefail
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
export PATH="${lib.makeBinPath config.path}:${pkgs.coreutils}/bin"
# prepare sandbox user
mkdir -p /etc
cp ${pkgs.runCommand "fake-etc" {} ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''}/* /etc/
# prepare sandbox user
mkdir -p /etc
cp ${
pkgs.runCommand "fake-etc" { } ''
export PATH="${pkgs.coreutils}/bin"
mkdir -p $out
cp /etc/* $out/
''
}/* /etc/
${config.script}
'';
${config.script}
'';
};
};
};
});
}
);
};
secrets =
let
@ -101,68 +116,77 @@
in
lib.mkOption {
default = { };
type = lib.types.attrsOf (lib.types.submodule ({ config, name, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
} // lib.optionalAttrs (config'.clanCore.secretStore == "sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config'.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
}));
type = lib.types.attrsOf (
lib.types.submodule (
{ config, name, ... }:
{
options =
{
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
}
// lib.optionalAttrs (config'.clanCore.secretStore == "sops") {
groups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = config'.clanCore.sops.defaultGroups;
description = ''
Groups to decrypt the secret for. By default we always use the user's key.
'';
};
};
}
)
);
description = ''
path where the secret is located in the filesystem
'';
};
facts = lib.mkOption {
default = { };
type = lib.types.attrsOf (lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
type = lib.types.attrsOf (
lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a fact which is generated by the generator
'';
default =
config.clanCore.clanDir
+ "/machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists fact.config.path then lib.strings.fileContents fact.config.path else null;
};
};
path = lib.mkOption {
type = lib.types.path;
description = ''
path to a fact which is generated by the generator
'';
default = config.clanCore.clanDir + "/machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists fact.config.path then
lib.strings.fileContents fact.config.path
else
null;
};
};
}));
})
);
};
};
}));
})
);
};
imports = [
./sops.nix

View File

@ -13,4 +13,3 @@
system.clan.secretsModule = "clan_cli.secrets.modules.password_store";
};
}

View File

@ -1,22 +1,33 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
secretsDir = config.clanCore.clanDir + "/sops/secrets";
groupsDir = config.clanCore.clanDir + "/sops/groups";
# My symlink is in the nixos module detected as a directory also it works in the repl. Is this because of pure evaluation?
containsSymlink = path:
builtins.pathExists path && (builtins.readFileType path == "directory" || builtins.readFileType path == "symlink");
containsSymlink =
path:
builtins.pathExists path
&& (builtins.readFileType path == "directory" || builtins.readFileType path == "symlink");
containsMachine = parent: name: type:
containsMachine =
parent: name: type:
type == "directory" && containsSymlink "${parent}/${name}/machines/${config.clanCore.machineName}";
containsMachineOrGroups = name: type:
(containsMachine secretsDir name type) || lib.any (group: type == "directory" && containsSymlink "${secretsDir}/${name}/groups/${group}") groups;
containsMachineOrGroups =
name: type:
(containsMachine secretsDir name type)
|| lib.any (
group: type == "directory" && containsSymlink "${secretsDir}/${name}/groups/${group}"
) groups;
filterDir = filter: dir:
lib.optionalAttrs (builtins.pathExists dir)
(lib.filterAttrs filter (builtins.readDir dir));
filterDir =
filter: dir:
lib.optionalAttrs (builtins.pathExists dir) (lib.filterAttrs filter (builtins.readDir dir));
groups = builtins.attrNames (filterDir (containsMachine groupsDir) groupsDir);
secrets = filterDir containsMachineOrGroups secretsDir;
@ -34,17 +45,18 @@ in
clanCore.secretsDirectory = "/run/secrets";
clanCore.secretsPrefix = config.clanCore.machineName + "-";
system.clan.secretsModule = "clan_cli.secrets.modules.sops";
sops.secrets = builtins.mapAttrs
(name: _: {
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
format = "binary";
})
secrets;
sops.secrets = builtins.mapAttrs (name: _: {
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
format = "binary";
}) secrets;
# To get proper error messages about missing secrets we need a dummy secret file that is always present
sops.defaultSopsFile = lib.mkIf config.sops.validateSopsFiles (lib.mkDefault (builtins.toString (pkgs.writeText "dummy.yaml" "")));
sops.defaultSopsFile = lib.mkIf config.sops.validateSopsFiles (
lib.mkDefault (builtins.toString (pkgs.writeText "dummy.yaml" ""))
);
sops.age.keyFile = lib.mkIf (builtins.pathExists (config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"))
(lib.mkDefault "/var/lib/sops-nix/key.txt");
sops.age.keyFile = lib.mkIf (builtins.pathExists (
config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"
)) (lib.mkDefault "/var/lib/sops-nix/key.txt");
clanCore.secretsUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
};
}

View File

@ -7,4 +7,3 @@
system.clan.factsModule = "clan_cli.facts.modules.vm";
};
}

View File

@ -1,41 +1,43 @@
{ lib, ... }:
{
# defaults
config.clanCore.state.HOME.folders = [
"/home"
];
config.clanCore.state.HOME.folders = [ "/home" ];
# interface
options.clanCore.state = lib.mkOption {
default = { };
type = lib.types.attrsOf
(lib.types.submodule ({ ... }: {
options = {
folders = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Folder where state resides in
'';
};
preRestoreScript = lib.mkOption {
type = lib.types.str;
default = ":";
description = ''
script to run before restoring the state dir from a backup
type = lib.types.attrsOf (
lib.types.submodule (
{ ... }:
{
options = {
folders = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
Folder where state resides in
'';
};
preRestoreScript = lib.mkOption {
type = lib.types.str;
default = ":";
description = ''
script to run before restoring the state dir from a backup
Utilize this to stop services which currently access these folders
'';
};
postRestoreScript = lib.mkOption {
type = lib.types.str;
default = ":";
description = ''
script to restore the service after the state dir was restored from a backup
Utilize this to stop services which currently access these folders
'';
};
postRestoreScript = lib.mkOption {
type = lib.types.str;
default = ":";
description = ''
script to restore the service after the state dir was restored from a backup
Utilize this to start services which were previously stopped
'';
Utilize this to start services which were previously stopped
'';
};
};
};
}));
}
)
);
};
}

View File

@ -1,12 +1,15 @@
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
{
lib,
config,
pkgs,
options,
extendModules,
modulesPath,
...
}:
let
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
(_item: attrs: attrs.folders)
config.clanCore.state
);
stateFolders = lib.flatten (lib.mapAttrsToList (_item: attrs: attrs.folders) config.clanCore.state);
vmModule = {
imports = [
@ -32,7 +35,10 @@ let
# currently needed for system.etc.overlay.enable
boot.kernelPackages = pkgs.linuxPackages_latest;
boot.initrd.systemd.storePaths = [ pkgs.util-linux pkgs.e2fsprogs ];
boot.initrd.systemd.storePaths = [
pkgs.util-linux
pkgs.e2fsprogs
];
boot.initrd.systemd.emergencyAccess = true;
# sysusers is faster than nixos's perl scripts
@ -43,50 +49,72 @@ let
boot.initrd.kernelModules = [ "virtiofs" ];
virtualisation.writableStore = false;
virtualisation.fileSystems = lib.mkForce ({
"/nix/store" = {
device = "nix-store";
options = [ "x-systemd.requires=systemd-modules-load.service" "ro" ];
fsType = "virtiofs";
};
virtualisation.fileSystems = lib.mkForce (
{
"/nix/store" = {
device = "nix-store";
options = [
"x-systemd.requires=systemd-modules-load.service"
"ro"
];
fsType = "virtiofs";
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [ "defaults" "x-systemd.makefs" "nobarrier" "noatime" "nodiratime" "data=writeback" "discard" ];
};
"/" = {
device = "/dev/vda";
fsType = "ext4";
options = [
"defaults"
"x-systemd.makefs"
"nobarrier"
"noatime"
"nodiratime"
"data=writeback"
"discard"
];
};
"/vmstate" = {
device = "/dev/vdb";
options = [ "x-systemd.makefs" "noatime" "nodiratime" "discard" ];
noCheck = true;
fsType = "ext4";
};
"/vmstate" = {
device = "/dev/vdb";
options = [
"x-systemd.makefs"
"noatime"
"nodiratime"
"discard"
];
noCheck = true;
fsType = "ext4";
};
${config.clanCore.secretsUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
} // lib.listToAttrs (map
(folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
})
stateFolders));
${config.clanCore.secretsUploadDirectory} = {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [
"trans=virtio"
"version=9p2000.L"
"cache=loose"
];
};
}
// lib.listToAttrs (
map (
folder:
lib.nameValuePair folder {
device = "/vmstate${folder}";
fsType = "none";
options = [ "bind" ];
}
) stateFolders
)
);
};
# We cannot simply merge the VM config into the current system config, because
# it is not necessarily a VM.
# Instead we use extendModules to create a second instance of the current
# system configuration, and then merge the VM config into that.
vmConfig = extendModules {
modules = [ vmModule ];
};
vmConfig = extendModules { modules = [ vmModule ]; };
in
{
options = {
@ -210,12 +238,14 @@ in
};
# for clan vm create
system.clan.vm = {
create = pkgs.writeText "vm.json" (builtins.toJSON {
initrd = "${vmConfig.config.system.build.initialRamdisk}/${vmConfig.config.system.boot.loader.initrdFile}";
toplevel = vmConfig.config.system.build.toplevel;
regInfo = (pkgs.closureInfo { rootPaths = vmConfig.config.virtualisation.additionalPaths; });
inherit (config.clan.virtualisation) memorySize cores graphics;
});
create = pkgs.writeText "vm.json" (
builtins.toJSON {
initrd = "${vmConfig.config.system.build.initialRamdisk}/${vmConfig.config.system.boot.loader.initrdFile}";
toplevel = vmConfig.config.system.build.toplevel;
regInfo = (pkgs.closureInfo { rootPaths = vmConfig.config.virtualisation.additionalPaths; });
inherit (config.clan.virtualisation) memorySize cores graphics;
}
);
};
virtualisation = lib.optionalAttrs (options.virtualisation ? cores) {

View File

@ -1,4 +1,9 @@
{ pkgs, config, lib, ... }:
{
pkgs,
config,
lib,
...
}:
{
options = {
# maybe upstream this?

View File

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.clan.networking.zerotier;
facts = config.clanCore.secrets.zerotier.facts or { };
@ -76,16 +81,18 @@ in
};
settings = lib.mkOption {
description = lib.mdDoc "override the network config in /var/lib/zerotier/bla/$network.json";
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
type = lib.types.submodule { freeformType = (pkgs.formats.json { }).type; };
};
};
config = lib.mkMerge [
({
# Override license so that we can build zerotierone without
# having to re-import nixpkgs.
services.zerotierone.package = lib.mkDefault (pkgs.zerotierone.overrideAttrs (_old: { meta = { }; }));
services.zerotierone.package = lib.mkDefault (
pkgs.zerotierone.overrideAttrs (_old: {
meta = { };
})
);
})
(lib.mkIf ((facts.zerotier-ip.value or null) != null) {
environment.etc."zerotier/ip".text = facts.zerotier-ip.value;
@ -104,29 +111,33 @@ in
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier" ''
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
zerotier-idtool getpublic /var/lib/zerotier-one/identity.secret > /var/lib/zerotier-one/identity.public
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
zerotier-idtool getpublic /var/lib/zerotier-one/identity.secret > /var/lib/zerotier-one/identity.public
${lib.optionalString (cfg.controller.enable) ''
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON cfg.settings)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}
${lib.optionalString (cfg.moon.stableEndpoints != []) ''
if [[ ! -f /var/lib/zerotier-one/moon.json ]]; then
zerotier-idtool initmoon /var/lib/zerotier-one/identity.public > /var/lib/zerotier-one/moon.json
fi
${genMoonScript}/bin/genmoon /var/lib/zerotier-one/moon.json ${builtins.toFile "moon.json" (builtins.toJSON cfg.moon.stableEndpoints)} /var/lib/zerotier-one/moons.d
''}
${lib.optionalString (cfg.controller.enable) ''
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON cfg.settings)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}
${lib.optionalString (cfg.moon.stableEndpoints != [ ]) ''
if [[ ! -f /var/lib/zerotier-one/moon.json ]]; then
zerotier-idtool initmoon /var/lib/zerotier-one/identity.public > /var/lib/zerotier-one/moon.json
fi
${genMoonScript}/bin/genmoon /var/lib/zerotier-one/moon.json ${builtins.toFile "moon.json" (builtins.toJSON cfg.moon.stableEndpoints)} /var/lib/zerotier-one/moons.d
''}
# cleanup old networks
if [[ -d /var/lib/zerotier-one/networks.d ]]; then
find /var/lib/zerotier-one/networks.d \
-type f \
-name "*.conf" \
-not \( ${lib.concatMapStringsSep " -o " (netId: ''-name "${netId}.conf"'') config.services.zerotierone.joinNetworks} \) \
-delete
fi
''}"
# cleanup old networks
if [[ -d /var/lib/zerotier-one/networks.d ]]; then
find /var/lib/zerotier-one/networks.d \
-type f \
-name "*.conf" \
-not \( ${
lib.concatMapStringsSep " -o " (
netId: ''-name "${netId}.conf"''
) config.services.zerotierone.joinNetworks
} \) \
-delete
fi
''}"
];
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "configure-interface" ''
@ -145,7 +156,7 @@ in
${lib.concatMapStringsSep "\n" (moon: ''
zerotier-cli orbit ${moon} ${moon}
'') cfg.moon.orbitMoons}
''}"
''}"
];
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ]; # mdns
@ -172,7 +183,11 @@ in
facts.zerotier-ip = { };
facts.zerotier-network-id = { };
secrets.zerotier-identity-secret = { };
generator.path = [ config.services.zerotierone.package pkgs.fakeroot pkgs.python3 ];
generator.path = [
config.services.zerotierone.package
pkgs.fakeroot
pkgs.python3
];
generator.script = ''
python3 ${./generate.py} --mode network \
--ip "$facts/zerotier-ip" \
@ -188,7 +203,10 @@ in
clanCore.secrets.zerotier = {
facts.zerotier-ip = { };
secrets.zerotier-identity-secret = { };
generator.path = [ config.services.zerotierone.package pkgs.python3 ];
generator.path = [
config.services.zerotierone.package
pkgs.python3
];
generator.script = ''
python3 ${./generate.py} --mode identity \
--ip "$facts/zerotier-ip" \
@ -200,9 +218,7 @@ in
(lib.mkIf (cfg.controller.enable && (facts.zerotier-network-id.value or null) != null) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
clan.networking.zerotier.settings = {
authTokens = [
null
];
authTokens = [ null ];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
@ -242,7 +258,9 @@ in
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''
${config.clanCore.clanPkgs.zerotier-members}/bin/zerotier-members allow ${builtins.substring 0 10 cfg.networkId}
${config.clanCore.clanPkgs.zerotier-members}/bin/zerotier-members allow ${
builtins.substring 0 10 cfg.networkId
}
''}"
];
})

View File

@ -1,4 +1,5 @@
{ inputs, self, ... }: {
{ inputs, self, ... }:
{
flake.nixosModules = {
hidden-ssh-announce.imports = [ ./hidden-ssh-announce.nix ];
installer.imports = [
@ -10,9 +11,12 @@
inputs.sops-nix.nixosModules.sops
./clanCore
./iso
({ pkgs, lib, ... }: {
clanCore.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
})
(
{ pkgs, lib, ... }:
{
clanCore.clanPkgs = lib.mkDefault self.packages.${pkgs.hostPlatform.system};
}
)
];
};
}

View File

@ -1,8 +1,10 @@
{ config
, lib
, pkgs
, ...
}: {
{
config,
lib,
pkgs,
...
}:
{
options.hidden-ssh-announce = {
enable = lib.mkEnableOption "hidden-ssh-announce";
script = lib.mkOption {
@ -32,8 +34,14 @@
};
systemd.services.hidden-ssh-announce = {
description = "announce hidden ssh";
after = [ "tor.service" "network-online.target" ];
wants = [ "tor.service" "network-online.target" ];
after = [
"tor.service"
"network-online.target"
];
wants = [
"tor.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
# ${pkgs.tor}/bin/torify

View File

@ -1,11 +1,11 @@
{ lib
, pkgs
, modulesPath
, ...
}: {
systemd.tmpfiles.rules = [
"d /var/shared 0777 root root - -"
];
{
lib,
pkgs,
modulesPath,
...
}:
{
systemd.tmpfiles.rules = [ "d /var/shared 0777 root root - -" ];
imports = [
(modulesPath + "/profiles/installation-device.nix")
(modulesPath + "/profiles/all-hardware.nix")
@ -21,7 +21,17 @@
enable = true;
script = pkgs.writeShellScript "write-hostname" ''
set -efu
export PATH=${lib.makeBinPath (with pkgs; [ iproute2 coreutils jq qrencode ])}
export PATH=${
lib.makeBinPath (
with pkgs;
[
iproute2
coreutils
jq
qrencode
]
)
}
mkdir -p /var/shared
echo "$1" > /var/shared/onion-hostname

View File

@ -1,4 +1,10 @@
{ config, extendModules, lib, pkgs, ... }:
{
config,
extendModules,
lib,
pkgs,
...
}:
let
# Generates a fileSystems entry for bind mounting a given state folder path
# It binds directories from /var/clanstate/{some-path} to /{some-path}.
@ -13,54 +19,47 @@ let
};
# Flatten the list of state folders into a single list
stateFolders = lib.flatten (
lib.mapAttrsToList
(_item: attrs: attrs.folders)
config.clanCore.state
);
stateFolders = lib.flatten (lib.mapAttrsToList (_item: attrs: attrs.folders) config.clanCore.state);
# A module setting up bind mounts for all state folders
stateMounts = {
fileSystems =
lib.listToAttrs
(map mkBindMount stateFolders);
fileSystems = lib.listToAttrs (map mkBindMount stateFolders);
};
isoModule = { config, ... }: {
imports = [
stateMounts
];
options.clan.iso.disko = lib.mkOption {
type = lib.types.submodule {
freeformType = (pkgs.formats.json { }).type;
};
default = {
disk = {
iso = {
type = "disk";
imageSize = "10G"; # TODO add auto image size in disko
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
isoModule =
{ config, ... }:
{
imports = [ stateMounts ];
options.clan.iso.disko = lib.mkOption {
type = lib.types.submodule { freeformType = (pkgs.formats.json { }).type; };
default = {
disk = {
iso = {
type = "disk";
imageSize = "10G"; # TODO add auto image size in disko
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
@ -68,19 +67,16 @@ let
};
};
};
config = {
disko.devices = lib.mkOverride 51 config.clan.iso.disko;
boot.loader.grub.enable = true;
boot.loader.grub.efiSupport = true;
boot.loader.grub.device = lib.mkForce "/dev/vda";
boot.loader.grub.efiInstallAsRemovable = true;
};
};
config = {
disko.devices = lib.mkOverride 51 config.clan.iso.disko;
boot.loader.grub.enable = true;
boot.loader.grub.efiSupport = true;
boot.loader.grub.device = lib.mkForce "/dev/vda";
boot.loader.grub.efiInstallAsRemovable = true;
};
};
isoConfig = extendModules {
modules = [ isoModule ];
};
isoConfig = extendModules { modules = [ isoModule ]; };
in
{
config = {

View File

@ -1,36 +1,37 @@
{ age
, lib
, argcomplete
, installShellFiles
, nix
, openssh
, pytest
, pytest-cov
, pytest-xdist
, pytest-subprocess
, pytest-timeout
, remote-pdb
, ipdb
, python3
, runCommand
, setuptools
, sops
, stdenv
, wheel
, fakeroot
, rsync
, bash
, sshpass
, zbar
, tor
, git
, nixpkgs
, qemu
, gnupg
, e2fsprogs
, mypy
, rope
, clan-core-path
{
age,
lib,
argcomplete,
installShellFiles,
nix,
openssh,
pytest,
pytest-cov,
pytest-xdist,
pytest-subprocess,
pytest-timeout,
remote-pdb,
ipdb,
python3,
runCommand,
setuptools,
sops,
stdenv,
wheel,
fakeroot,
rsync,
bash,
sshpass,
zbar,
tor,
git,
nixpkgs,
qemu,
gnupg,
e2fsprogs,
mypy,
rope,
clan-core-path,
}:
let
@ -38,19 +39,22 @@ let
argcomplete # optional dependency: if not enabled, shell completion will not work
];
pytestDependencies = runtimeDependencies ++ dependencies ++ [
pytest
pytest-cov
pytest-subprocess
pytest-xdist
pytest-timeout
remote-pdb
ipdb
openssh
git
gnupg
stdenv.cc
];
pytestDependencies =
runtimeDependencies
++ dependencies
++ [
pytest
pytest-cov
pytest-subprocess
pytest-xdist
pytest-timeout
remote-pdb
ipdb
openssh
git
gnupg
stdenv.cc
];
# Optional dependencies for clan cli, we re-expose them here to make sure they all build.
runtimeDependencies = [
@ -70,7 +74,9 @@ let
e2fsprogs
];
runtimeDependenciesAsSet = builtins.listToAttrs (builtins.map (p: lib.nameValuePair (lib.getName p.name) p) runtimeDependencies);
runtimeDependenciesAsSet = builtins.listToAttrs (
builtins.map (p: lib.nameValuePair (lib.getName p.name) p) runtimeDependencies
);
checkPython = python3.withPackages (_ps: pytestDependencies);
@ -121,42 +127,48 @@ python3.pkgs.buildPythonApplication {
propagatedBuildInputs = dependencies;
# also re-expose dependencies so we test them in CI
passthru.tests = (lib.mapAttrs' (n: lib.nameValuePair "clan-dep-${n}") runtimeDependenciesAsSet) // rec {
clan-pytest-without-core = runCommand "clan-pytest-without-core" { nativeBuildInputs = [ checkPython ] ++ pytestDependencies; } ''
cp -r ${source} ./src
chmod +w -R ./src
cd ./src
passthru.tests =
(lib.mapAttrs' (n: lib.nameValuePair "clan-dep-${n}") runtimeDependenciesAsSet)
// rec {
clan-pytest-without-core =
runCommand "clan-pytest-without-core" { nativeBuildInputs = [ checkPython ] ++ pytestDependencies; }
''
cp -r ${source} ./src
chmod +w -R ./src
cd ./src
export NIX_STATE_DIR=$TMPDIR/nix IN_NIX_SANDBOX=1
${checkPython}/bin/python -m pytest -m "not impure and not with_core" ./tests
touch $out
'';
# separate the tests that can never be cached
clan-pytest-with-core = runCommand "clan-pytest-with-core" { nativeBuildInputs = [ checkPython ] ++ pytestDependencies; } ''
cp -r ${source} ./src
chmod +w -R ./src
cd ./src
export NIX_STATE_DIR=$TMPDIR/nix IN_NIX_SANDBOX=1
${checkPython}/bin/python -m pytest -m "not impure and not with_core" ./tests
touch $out
'';
# separate the tests that can never be cached
clan-pytest-with-core =
runCommand "clan-pytest-with-core" { nativeBuildInputs = [ checkPython ] ++ pytestDependencies; }
''
cp -r ${source} ./src
chmod +w -R ./src
cd ./src
export CLAN_CORE=${clan-core-path}
export NIX_STATE_DIR=$TMPDIR/nix IN_NIX_SANDBOX=1
${checkPython}/bin/python -m pytest -m "not impure and with_core" ./tests
touch $out
'';
export CLAN_CORE=${clan-core-path}
export NIX_STATE_DIR=$TMPDIR/nix IN_NIX_SANDBOX=1
${checkPython}/bin/python -m pytest -m "not impure and with_core" ./tests
touch $out
'';
clan-pytest = runCommand "clan-pytest" { } ''
echo ${clan-pytest-without-core}
echo ${clan-pytest-with-core}
touch $out
'';
check-for-breakpoints = runCommand "breakpoints" { } ''
if grep --include \*.py -Rq "breakpoint()" ${source}; then
echo "breakpoint() found in ${source}:"
grep --include \*.py -Rn "breakpoint()" ${source}
exit 1
fi
touch $out
'';
};
clan-pytest = runCommand "clan-pytest" { } ''
echo ${clan-pytest-without-core}
echo ${clan-pytest-with-core}
touch $out
'';
check-for-breakpoints = runCommand "breakpoints" { } ''
if grep --include \*.py -Rq "breakpoint()" ${source}; then
echo "breakpoint() found in ${source}:"
grep --include \*.py -Rn "breakpoint()" ${source}
exit 1
fi
touch $out
'';
};
passthru.nixpkgs = nixpkgs';
passthru.checkPython = checkPython;

View File

@ -1,37 +1,44 @@
{ inputs, self, lib, ... }:
{
perSystem = { self', pkgs, ... }:
inputs,
self,
lib,
...
}:
{
perSystem =
{ self', pkgs, ... }:
let
flakeLock = lib.importJSON (self + /flake.lock);
flakeInputs = (builtins.removeAttrs inputs [ "self" ]);
flakeLockVendoredDeps = flakeLock // {
nodes = flakeLock.nodes // (
lib.flip lib.mapAttrs flakeInputs (name: _: flakeLock.nodes.${name} // {
locked = {
inherit (flakeLock.nodes.${name}.locked) narHash;
lastModified =
# lol, nixpkgs has a different timestamp on the fs???
if name == "nixpkgs"
then 0
else 1;
path = "${inputs.${name}}";
type = "path";
};
})
);
nodes =
flakeLock.nodes
// (lib.flip lib.mapAttrs flakeInputs (
name: _:
flakeLock.nodes.${name}
// {
locked = {
inherit (flakeLock.nodes.${name}.locked) narHash;
lastModified =
# lol, nixpkgs has a different timestamp on the fs???
if name == "nixpkgs" then 0 else 1;
path = "${inputs.${name}}";
type = "path";
};
}
));
};
flakeLockFile = builtins.toFile "clan-core-flake.lock"
(builtins.toJSON flakeLockVendoredDeps);
clanCoreWithVendoredDeps = lib.trace flakeLockFile pkgs.runCommand "clan-core-with-vendored-deps" { } ''
cp -r ${self} $out
chmod +w -R $out
cp ${flakeLockFile} $out/flake.lock
'';
flakeLockFile = builtins.toFile "clan-core-flake.lock" (builtins.toJSON flakeLockVendoredDeps);
clanCoreWithVendoredDeps =
lib.trace flakeLockFile pkgs.runCommand "clan-core-with-vendored-deps" { }
''
cp -r ${self} $out
chmod +w -R $out
cp ${flakeLockFile} $out/flake.lock
'';
in
{
devShells.clan-cli = pkgs.callPackage ./shell.nix {
inherit (self'.packages) clan-cli;
};
devShells.clan-cli = pkgs.callPackage ./shell.nix { inherit (self'.packages) clan-cli; };
packages = {
clan-cli = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (inputs) nixpkgs;
@ -42,5 +49,4 @@
checks = self'.packages.clan-cli.tests;
};
}

View File

@ -1,16 +1,20 @@
{ nix-unit, clan-cli, system, mkShell, writeScriptBin, openssh, ruff, python3 }:
{
nix-unit,
clan-cli,
system,
mkShell,
writeScriptBin,
openssh,
ruff,
python3,
}:
let
checkScript = writeScriptBin "check" ''
nix build .#checks.${system}.{treefmt,clan-pytest} -L "$@"
'';
pythonWithDeps = python3.withPackages (
ps:
clan-cli.propagatedBuildInputs
++ clan-cli.devDependencies
++ [
ps.pip
]
ps: clan-cli.propagatedBuildInputs ++ clan-cli.devDependencies ++ [ ps.pip ]
);
in
mkShell {

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clan.virtualisation.graphics = false;

View File

@ -1,6 +1,5 @@
{ lib
, ...
}: {
{ lib, ... }:
{
options.clan.fake-module.fake-flag = lib.mkOption {
type = lib.types.bool;
default = false;

View File

@ -2,32 +2,41 @@
# this placeholder is replaced by the path to nixpkgs
inputs.nixpkgs.url = "__NIXPKGS__";
outputs = inputs':
outputs =
inputs':
let
# fake clan-core input
fake-clan-core = {
clanModules.fake-module = ./fake-module.nix;
};
inputs = inputs' // { clan-core = fake-clan-core; };
inputs = inputs' // {
clan-core = fake-clan-core;
};
machineSettings = (
if builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE" != ""
then builtins.fromJSON (builtins.readFile (builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE"))
else if builtins.pathExists ./machines/machine1/settings.json
then builtins.fromJSON (builtins.readFile ./machines/machine1/settings.json)
else { }
if builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE" != "" then
builtins.fromJSON (builtins.readFile (builtins.getEnv "CLAN_MACHINE_SETTINGS_FILE"))
else if builtins.pathExists ./machines/machine1/settings.json then
builtins.fromJSON (builtins.readFile ./machines/machine1/settings.json)
else
{ }
);
machineImports = map (module: fake-clan-core.clanModules.${module}) (
machineSettings.clanImports or [ ]
);
machineImports =
map
(module: fake-clan-core.clanModules.${module})
(machineSettings.clanImports or [ ]);
in
{
nixosConfigurations.machine1 = inputs.nixpkgs.lib.nixosSystem {
modules =
machineImports ++ [
./nixosModules/machine1.nix
machineSettings
({ lib, options, pkgs, ... }: {
modules = machineImports ++ [
./nixosModules/machine1.nix
machineSettings
(
{
lib,
options,
pkgs,
...
}:
{
config = {
nixpkgs.hostPlatform = "x86_64-linux";
# speed up by not instantiating nixpkgs twice and disable documentation
@ -51,8 +60,9 @@
The buildClan function will automatically import these modules for the current machine.
'';
};
})
];
}
)
];
};
};
}

View File

@ -1,4 +1,5 @@
{ lib, ... }: {
{ lib, ... }:
{
options.clan.jitsi.enable = lib.mkOption {
type = lib.types.bool;
default = false;

View File

@ -5,40 +5,45 @@
# this placeholder is replaced by the path to nixpkgs
inputs.clan-core.url = "__CLAN_CORE__";
outputs = { self, clan-core }:
outputs =
{ self, clan-core }:
let
clan = clan-core.lib.buildClan {
directory = self;
clanName = "test_flake_with_core";
machines = {
vm1 = { lib, ... }: {
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clanCore.sops.defaultGroups = [ "admins" ];
clan.virtualisation.graphics = false;
vm1 =
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clanCore.sops.defaultGroups = [ "admins" ];
clan.virtualisation.graphics = false;
clan.networking.zerotier.controller.enable = true;
networking.useDHCP = false;
clan.networking.zerotier.controller.enable = true;
networking.useDHCP = false;
systemd.services.shutdown-after-boot = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ];
script = ''
#!/usr/bin/env bash
shutdown -h now
'';
systemd.services.shutdown-after-boot = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ];
script = ''
#!/usr/bin/env bash
shutdown -h now
'';
};
};
vm2 =
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.networking.zerotier.networkId = "82b44b162ec6c013";
};
};
vm2 = { lib, ... }: {
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clanCore.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.networking.zerotier.networkId = "82b44b162ec6c013";
};
};
};
in

View File

@ -5,30 +5,33 @@
# this placeholder is replaced by the path to clan-core
inputs.clan-core.url = "__CLAN_CORE__";
outputs = { self, clan-core }:
outputs =
{ self, clan-core }:
let
clan = clan-core.lib.buildClan {
directory = self;
clanName = "test_flake_with_core_and_pass";
machines = {
vm1 = { lib, ... }: {
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clanCore.secretStore = "password-store";
clanCore.secretsUploadDirectory = lib.mkForce "__CLAN_SOPS_KEY_DIR__/secrets";
vm1 =
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clanCore.secretStore = "password-store";
clanCore.secretsUploadDirectory = lib.mkForce "__CLAN_SOPS_KEY_DIR__/secrets";
clan.networking.zerotier.controller.enable = true;
clan.networking.zerotier.controller.enable = true;
systemd.services.shutdown-after-boot = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ];
script = ''
#!/usr/bin/env bash
shutdown -h now
'';
systemd.services.shutdown-after-boot = {
enable = true;
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ];
script = ''
#!/usr/bin/env bash
shutdown -h now
'';
};
};
};
};
};
in

View File

@ -5,7 +5,8 @@
# this placeholder is replaced by the path to nixpkgs
inputs.clan-core.url = "__CLAN_CORE__";
outputs = { self, clan-core }:
outputs =
{ self, clan-core }:
let
clan = clan-core.lib.buildClan {
directory = self;
@ -14,9 +15,7 @@
let
machineModules = builtins.readDir (self + "/machines");
in
builtins.mapAttrs
(name: _type: import (self + "/machines/${name}"))
machineModules;
builtins.mapAttrs (name: _type: import (self + "/machines/${name}")) machineModules;
};
in
{

View File

@ -1,16 +1,17 @@
{ python3
, runCommand
, setuptools
, copyDesktopItems
, pygobject3
, wrapGAppsHook
, gtk4
, gnome
, pygobject-stubs
, gobject-introspection
, clan-cli
, makeDesktopItem
, libadwaita
{
python3,
runCommand,
setuptools,
copyDesktopItems,
pygobject3,
wrapGAppsHook,
gtk4,
gnome,
pygobject-stubs,
gobject-introspection,
clan-cli,
makeDesktopItem,
libadwaita,
}:
let
source = ./.;
@ -41,7 +42,11 @@ python3.pkgs.buildPythonApplication {
gobject-introspection
];
buildInputs = [ gtk4 libadwaita gnome.adwaita-icon-theme ];
buildInputs = [
gtk4
libadwaita
gnome.adwaita-icon-theme
];
# We need to propagate the build inputs to nix fmt / treefmt
propagatedBuildInputs = [
@ -73,7 +78,5 @@ python3.pkgs.buildPythonApplication {
checkPhase = ''
PYTHONPATH= $out/bin/clan-vm-manager --help
'';
desktopItems = [
desktop-file
];
desktopItems = [ desktop-file ];
}

View File

@ -1,12 +1,15 @@
{ ... }: {
perSystem = { config, pkgs, ... }: {
devShells.clan-vm-manager = pkgs.callPackage ./shell.nix {
inherit (config.packages) clan-cli clan-vm-manager;
};
packages.clan-vm-manager = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (config.packages) clan-cli;
};
{ ... }:
{
perSystem =
{ config, pkgs, ... }:
{
devShells.clan-vm-manager = pkgs.callPackage ./shell.nix {
inherit (config.packages) clan-cli clan-vm-manager;
};
packages.clan-vm-manager = pkgs.python3.pkgs.callPackage ./default.nix {
inherit (config.packages) clan-cli;
};
checks = config.packages.clan-vm-manager.tests;
};
checks = config.packages.clan-vm-manager.tests;
};
}

View File

@ -1,11 +1,37 @@
{ lib, runCommand, makeWrapper, stdenv, clan-vm-manager, gdb, gtk4, libadwaita, clan-cli, mkShell, ruff, desktop-file-utils, xdg-utils, mypy, python3, python3Packages }:
{
lib,
runCommand,
makeWrapper,
stdenv,
clan-vm-manager,
gdb,
gtk4,
libadwaita,
clan-cli,
mkShell,
ruff,
desktop-file-utils,
xdg-utils,
mypy,
python3,
python3Packages,
}:
mkShell (
let
pygdb = runCommand "pygdb" { buildInputs = [ gdb python3 makeWrapper ]; } ''
mkdir -p "$out/bin"
makeWrapper "${gdb}/bin/gdb" "$out/bin/pygdb" \
--add-flags '-ex "source ${python3}/share/gdb/libpython.py"'
'';
pygdb =
runCommand "pygdb"
{
buildInputs = [
gdb
python3
makeWrapper
];
}
''
mkdir -p "$out/bin"
makeWrapper "${gdb}/bin/gdb" "$out/bin/pygdb" \
--add-flags '-ex "source ${python3}/share/gdb/libpython.py"'
'';
in
{
inherit (clan-vm-manager) propagatedBuildInputs buildInputs;
@ -15,7 +41,6 @@ mkShell (
pygdb
];
# To debug clan-vm-manger execute pygdb --args python ./bin/clan-vm-manager
nativeBuildInputs = [
ruff

View File

@ -1,38 +1,45 @@
{ ... }: {
{ ... }:
{
imports = [
./clan-cli/flake-module.nix
./clan-vm-manager/flake-module.nix
./installer/flake-module.nix
];
perSystem = { pkgs, config, lib, ... }: {
packages = {
tea-create-pr = pkgs.callPackage ./tea-create-pr { };
zerotier-members = pkgs.callPackage ./zerotier-members { };
zt-tcp-relay = pkgs.callPackage ./zt-tcp-relay { };
merge-after-ci = pkgs.callPackage ./merge-after-ci {
inherit (config.packages) tea-create-pr;
};
pending-reviews = pkgs.callPackage ./pending-reviews { };
} // lib.optionalAttrs pkgs.stdenv.isLinux {
wayland-proxy-virtwl = pkgs.callPackage ./wayland-proxy-virtwl { };
waypipe = pkgs.waypipe.overrideAttrs
(_old: {
# https://gitlab.freedesktop.org/mstoeckl/waypipe
src = pkgs.fetchFromGitLab {
domain = "gitlab.freedesktop.org";
owner = "mstoeckl";
repo = "waypipe";
rev = "4e4ff3bc1943cf7f6aeb56b06c060f40578d3570";
hash = "sha256-dxz4AmeJAweffyPCayvykworQNntHtHeq6PXMXWsM5k=";
};
});
# halalify zerotierone
zerotierone = pkgs.zerotierone.overrideAttrs (_old: {
meta = _old.meta // {
license = lib.licenses.apsl20;
perSystem =
{
pkgs,
config,
lib,
...
}:
{
packages =
{
tea-create-pr = pkgs.callPackage ./tea-create-pr { };
zerotier-members = pkgs.callPackage ./zerotier-members { };
zt-tcp-relay = pkgs.callPackage ./zt-tcp-relay { };
merge-after-ci = pkgs.callPackage ./merge-after-ci { inherit (config.packages) tea-create-pr; };
pending-reviews = pkgs.callPackage ./pending-reviews { };
}
// lib.optionalAttrs pkgs.stdenv.isLinux {
wayland-proxy-virtwl = pkgs.callPackage ./wayland-proxy-virtwl { };
waypipe = pkgs.waypipe.overrideAttrs (_old: {
# https://gitlab.freedesktop.org/mstoeckl/waypipe
src = pkgs.fetchFromGitLab {
domain = "gitlab.freedesktop.org";
owner = "mstoeckl";
repo = "waypipe";
rev = "4e4ff3bc1943cf7f6aeb56b06c060f40578d3570";
hash = "sha256-dxz4AmeJAweffyPCayvykworQNntHtHeq6PXMXWsM5k=";
};
});
# halalify zerotierone
zerotierone = pkgs.zerotierone.overrideAttrs (_old: {
meta = _old.meta // {
license = lib.licenses.apsl20;
};
});
};
});
};
};
}

View File

@ -1,7 +1,7 @@
{ lib
, buildGoModule
, fetchFromGitHub
,
{
lib,
buildGoModule,
fetchFromGitHub,
}:
buildGoModule rec {
pname = "go-ssb";
@ -17,7 +17,10 @@ buildGoModule rec {
vendorHash = "sha256-ZytuWFre7Cz6Qt01tLQoPEuNzDIyoC938OkdIrU8nZo=";
ldflags = [ "-s" "-w" ];
ldflags = [
"-s"
"-w"
];
# take very long
doCheck = false;

View File

@ -1,14 +1,16 @@
{ self, lib, ... }:
let
installerModule = { config, pkgs, ... }: {
imports = [
self.nixosModules.installer
self.inputs.nixos-generators.nixosModules.all-formats
];
installerModule =
{ config, pkgs, ... }:
{
imports = [
self.nixosModules.installer
self.inputs.nixos-generators.nixosModules.all-formats
];
system.stateVersion = config.system.nixos.version;
nixpkgs.pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
};
system.stateVersion = config.system.nixos.version;
nixpkgs.pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
};
installer = lib.nixosSystem {
modules = [
@ -27,7 +29,9 @@ in
flake.packages.x86_64-linux.install-iso = self.inputs.disko.lib.makeDiskImages {
nixosConfig = installer;
};
flake.nixosConfigurations = { inherit (clan.nixosConfigurations) installer; };
flake.nixosConfigurations = {
inherit (clan.nixosConfigurations) installer;
};
flake.clanInternals = clan.clanInternals;
flake.apps.x86_64-linux.install-vm.program = installer.config.formats.vm.outPath;
flake.apps.x86_64-linux.install-vm-nogui.program = installer.config.formats.vm-nogui.outPath;

View File

@ -1,19 +1,19 @@
{ bash
, callPackage
, coreutils
, git
, lib
, nix
, openssh
, tea
, tea-create-pr
, ...
{
bash,
callPackage,
coreutils,
git,
lib,
nix,
openssh,
tea,
tea-create-pr,
...
}:
let
writers = callPackage ../builders/script-writers.nix { };
in
writers.writePython3Bin "merge-after-ci"
{
writers.writePython3Bin "merge-after-ci" {
makeWrapperArgs = [
"--prefix"
"PATH"
@ -28,6 +28,4 @@ writers.writePython3Bin "merge-after-ci"
tea-create-pr
])
];
}
./merge-after-ci.py
} ./merge-after-ci.py

View File

@ -1,6 +1,7 @@
{ writeShellApplication
, bash
, curl
{
writeShellApplication,
bash,
curl,
}:
writeShellApplication {
name = "pending-reviews";

View File

@ -1,9 +1,10 @@
{ writeShellApplication
, bash
, coreutils
, git
, tea
, openssh
{
writeShellApplication,
bash,
coreutils,
git,
tea,
openssh,
}:
writeShellApplication {
name = "tea-create-pr";

View File

@ -1,4 +1,9 @@
{ wayland-proxy-virtwl, fetchFromGitHub, libdrm, ocaml-ng }:
{
wayland-proxy-virtwl,
fetchFromGitHub,
libdrm,
ocaml-ng,
}:
let
ocaml-wayland = ocaml-ng.ocamlPackages_5_0.wayland.overrideAttrs (_old: {
src = fetchFromGitHub {
@ -16,13 +21,15 @@ wayland-proxy-virtwl.overrideAttrs (_old: {
rev = "652fca9d4e006a2bdeba920dfaf53190c5373a7d";
hash = "sha256-VgpqxjHgueK9eQSX987PF0KvscpzkScOzFkW3haYCOw=";
};
buildInputs = [ libdrm ] ++ (with ocaml-ng.ocamlPackages_5_0; [
ocaml-wayland
dune-configurator
eio_main
ppx_cstruct
cmdliner
logs
ppx_cstruct
]);
buildInputs =
[ libdrm ]
++ (with ocaml-ng.ocamlPackages_5_0; [
ocaml-wayland
dune-configurator
eio_main
ppx_cstruct
cmdliner
logs
ppx_cstruct
]);
})

View File

@ -1,4 +1,8 @@
{ stdenv, python3, lib }:
{
stdenv,
python3,
lib,
}:
stdenv.mkDerivation {
name = "zerotier-members";

View File

@ -1,6 +1,7 @@
{ lib
, rustPlatform
, fetchFromGitHub
{
lib,
rustPlatform,
fetchFromGitHub,
}:
rustPlatform.buildRustPackage {

View File

@ -1,4 +1,5 @@
{ self, ... }: {
{ self, ... }:
{
flake.templates = {
new-clan = {
description = "Initialize a new clan flake";

View File

@ -3,7 +3,8 @@
inputs.clan-core.url = "git+https://git.clan.lol/clan/clan-core";
outputs = { self, clan-core, ... }:
outputs =
{ self, clan-core, ... }:
let
system = "x86_64-linux";
pkgs = clan-core.inputs.nixpkgs.legacyPackages.${system};
@ -17,9 +18,7 @@
inherit (clan) nixosConfigurations clanInternals;
# add the cLAN cli tool to the dev shell
devShells.${system}.default = pkgs.mkShell {
packages = [
clan-core.packages.${system}.clan-cli
];
packages = [ clan-core.packages.${system}.clan-cli ];
};
};
}