Compare commits

..

1 Commits

27 changed files with 75 additions and 315 deletions

View File

@ -27,7 +27,7 @@
self.clanModules.localbackup
self.clanModules.sshd
];
clan.networking.targetHost = "machine";
clan.core.networking.targetHost = "machine";
networking.hostName = "machine";
services.openssh.settings.UseDns = false;

View File

@ -1,7 +1,7 @@
{ self, lib, ... }:
{
clan.machines.test_install_machine = {
clan.networking.targetHost = "test_install_machine";
clan.core.networking.targetHost = "test_install_machine";
fileSystems."/".device = lib.mkDefault "/dev/vdb";
boot.loader.grub.device = lib.mkDefault "/dev/vdb";

View File

@ -98,7 +98,7 @@
settings = {
options = {
urAccepted = -1;
allowedNetworks = [ config.clan.networking.zerotier.subnet ];
allowedNetworks = [ config.clan.core.networking.zerotier.subnet ];
};
devices =
{ }

View File

@ -57,7 +57,7 @@ in
) filteredMachines
);
in
lib.mkIf (config.clan.networking.zerotier.controller.enable) {
lib.mkIf (config.clan.core.networking.zerotier.controller.enable) {
wantedBy = [ "multi-user.target" ];
after = [ "zerotierone.service" ];
path = [ config.clan.core.clanPkgs.zerotierone ];
@ -69,5 +69,5 @@ in
'';
};
config.clan.networking.zerotier.networkId = lib.mkDefault networkId;
config.clan.core.networking.zerotier.networkId = lib.mkDefault networkId;
}

View File

@ -14,7 +14,6 @@ markdown_extensions:
- attr_list
- footnotes
- md_in_html
- def_list
- meta
- plantuml_markdown
- pymdownx.emoji:
@ -50,8 +49,6 @@ nav:
- Mesh VPN: getting-started/mesh-vpn.md
- Backup & Restore: getting-started/backups.md
- Flake-parts: getting-started/flake-parts.md
- Concepts:
- Configuration: concepts/configuration.md
- Reference:
- Clan Modules:
- reference/clanModules/borgbackup-static.md
@ -97,6 +94,8 @@ nav:
- reference/clan-core/facts.md
- reference/clan-core/sops.md
- reference/clan-core/state.md
- reference/clan-core/deployment.md
- reference/clan-core/networking.md
- Contributing: contributing/contributing.md
docs_dir: site

View File

@ -1,114 +0,0 @@
# Configuration
## Introduction
When managing machine configuration this can be done through many possible ways.
Ranging from writing `nix` expression in a `flake.nix` file; placing `autoincluded` files into your machine directory; or configuring everything in a simple UI (upcomming).
clan currently offers the following methods to configure machines:
!!! Success "Recommended for nix people"
- flake.nix (i.e. via `buildClan`)
- `machine` argument
- `inventory` argument
- machines/`machine_name`/configuration.nix (`autoincluded` if it exists)
???+ Note "Used by CLI & UI"
- inventory.json
- machines/`machine_name`/hardware-configuration.nix (`autoincluded` if it exists)
!!! Warning "Deprecated"
machines/`machine_name`/settings.json
## BuildClan
The core function that produces a clan. It returns a set of consistent configurations for all machines with ready-to-use secrets, backups and other services.
### Inputs
`directory`
: The directory containing the machines subdirectory
`machines`
: Allows to include machine-specific modules i.e. machines.${name} = { ... }
`meta`
: An optional set
: `{ name :: string, icon :: string, description :: string }`
`inventory`
: Service set for easily configuring distributed services, such as backups
: For more details see [Inventory](#inventory)
`specialArgs`
: Extra arguments to pass to nixosSystem i.e. useful to make self available
`pkgsForSystem`
: A function that maps from architecture to pkgs, if specified this nixpkgs will be only imported once for each system.
This improves performance, but all nipxkgs.* options will be ignored.
`(string -> pkgs )`
## Inventory
`Inventory` is an abstract service layer for consistently configuring distributed services across machine boundaries.
The following is the specification of the inventory in `cuelang`
```cue
{
meta: {
// A name of the clan (primarily shown by the UI)
name: string
// A description of the clan
description?: string
// The icon path
icon?: string
}
// A map of services
services: [string]: [string]: {
// Required meta fields
meta: {
name: string,
icon?: string
description?: string,
},
// Machines are added via the avilable roles
// Membership depends only on this field
roles: [string]: {
machines: [...string],
tags: [...string],
}
machines?: {
[string]: {
config?: {
...
}
}
},
// Global Configuration for the service
// Applied to all machines.
config?: {
// Schema depends on the module.
// It declares the interface how the service can be configured.
...
}
}
// A map of machines, extends the machines of `buildClan`
machines: [string]: {
name: string,
description?: string,
icon?: string
tags: [...string]
system: string
}
}
```

View File

@ -89,7 +89,7 @@ Adding or configuring a new machine requires two simple steps:
# Change this to the correct ip-address or hostname
# The hostname is the machine name by default
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon"
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon"
# Change this to the ID-LINK of the desired disk shown by 'lsblk'
disko.devices.disk.main = {
@ -122,7 +122,7 @@ Adding or configuring a new machine requires two simple steps:
# Change this to the correct ip-address or hostname
# The hostname is the machine name by default
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon"
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon"
# Change this to the ID-LINK of the desired disk shown by 'lsblk'
disko.devices.disk.main = {

View File

@ -160,7 +160,7 @@ buildClan {
# Set this for clan commands use ssh i.e. `clan machines update`
# If you change the hostname, you need to update this line to root@<new-hostname>
# This only works however if you have avahi running on your admin machine else use IP
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon";
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon";
};
};
};
@ -197,7 +197,7 @@ buildClan {
# ...
machines = {
"jon" = {
clan.networking.buildHost = "root@<host_or_ip>";
clan.core.networking.buildHost = "root@<host_or_ip>";
};
};
};

View File

@ -75,7 +75,7 @@ Below is a guide on how to structure this in your flake.nix:
nixpkgs.hostPlatform = "x86_64-linux";
# Set this for clan commands use ssh i.e. `clan machines update`
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon";
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon";
# remote> lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
disko.devices.disk.main = {
@ -83,7 +83,7 @@ Below is a guide on how to structure this in your flake.nix:
};
# There needs to be exactly one controller per clan
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
};
};

View File

@ -29,7 +29,7 @@ peers. Once addresses are allocated, the controller's continuous operation is no
2. **Add Configuration**: Input the following configuration to the NixOS
configuration of the controller machine:
```nix
clan.networking.zerotier.controller = {
clan.core.networking.zerotier.controller = {
enable = true;
public = true;
};
@ -48,7 +48,7 @@ To introduce a new machine to the VPN, adhere to the following steps:
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.networking.zerotier.networkId = builtins.readFile (config.clan.core.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
clan.core.networking.zerotier.networkId = builtins.readFile (config.clan.core.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
1. **Update the New Machine**: Execute:

View File

@ -1,7 +1,4 @@
{
"meta": {
"name": "Minimal inventory"
},
"machines": {
"minimal-inventory-machine": {
"name": "foo",

View File

@ -152,13 +152,6 @@ let
in
(machineImports settings)
++ [
{
# Autoinclude configuration.nix and hardware-configuration.nix
imports = builtins.filter (p: builtins.pathExists p) [
"${directory}/machines/${name}/configuration.nix"
"${directory}/machines/${name}/hardware-configuration.nix"
];
}
settings
clan-core.nixosModules.clanCore
extraConfig

View File

@ -5,7 +5,6 @@ package schema
description?: string,
icon?: string
tags: [...string]
system?: string
}
#role: string

View File

@ -1,6 +1,6 @@
{ config, lib, ... }:
{
options.clan = {
options.clan.core = {
networking = {
targetHost = lib.mkOption {
description = ''
@ -49,14 +49,43 @@
};
imports = [
# TODO: use mkRenamedOptionModule once this is fixed: https://github.com/NixOS/nixpkgs/issues/324802
(lib.doRename rec {
from = [
"clan"
"networking"
];
to = [
"clan"
"core"
"networking"
];
visible = false;
warn = true;
use = lib.trace "Obsolete option `${lib.showOption from}' is used. It was renamed to `${lib.showOption to}'.";
withPriority = false;
})
(lib.mkRenamedOptionModule
[
"clan"
"deployment"
]
[
"clan"
"core"
"deployment"
]
)
(lib.mkRenamedOptionModule
[
"clan"
"core"
"networking"
"deploymentAddress"
]
[
"clan"
"core"
"networking"
"targetHost"
]

View File

@ -73,7 +73,7 @@
services
;
};
inherit (config.clan.networking) targetHost buildHost;
inherit (config.clan.core.networking) targetHost buildHost;
inherit (config.clan.deployment) requireExplicitUpdate;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (

View File

@ -5,7 +5,7 @@
...
}:
let
cfg = config.clan.networking.zerotier;
cfg = config.clan.core.networking.zerotier;
facts = config.clan.core.facts.services.zerotier.public or { };
genMoonScript = pkgs.runCommand "genmoon" { nativeBuildInputs = [ pkgs.python3 ]; } ''
install -Dm755 ${./genmoon.py} $out/bin/genmoon
@ -13,7 +13,7 @@ let
'';
in
{
options.clan.networking.zerotier = {
options.clan.core.networking.zerotier = {
networkId = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
@ -231,8 +231,8 @@ in
};
})
(lib.mkIf (cfg.controller.enable && (facts.zerotier-network-id.value or null) != null) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
clan.networking.zerotier.settings = {
clan.core.networking.zerotier.networkId = facts.zerotier-network-id.value;
clan.core.networking.zerotier.settings = {
authTokens = [ null ];
authorizationEndpoint = "";
capabilities = [ ];

View File

@ -76,7 +76,6 @@ def type_to_dict(t: Any, scope: str = "", type_map: dict[TypeVar, type] = {}) ->
properties = {
f.name: type_to_dict(f.type, f"{scope} {t.__name__}.{f.name}", type_map)
for f in fields
if not f.name.startswith("_")
}
required = set()
@ -128,7 +127,7 @@ def type_to_dict(t: Any, scope: str = "", type_map: dict[TypeVar, type] = {}) ->
if origin is None:
# Non-generic user-defined or built-in type
# TODO: handle custom types
raise JSchemaTypeError(f"{scope} Unhandled Type: ", origin)
raise JSchemaTypeError("Unhandled Type: ", origin)
elif origin is Literal:
# Handle Literal values for enums in JSON Schema
@ -173,7 +172,7 @@ def type_to_dict(t: Any, scope: str = "", type_map: dict[TypeVar, type] = {}) ->
new_map.update(inspect_dataclass_fields(t))
return type_to_dict(origin, scope, new_map)
raise JSchemaTypeError(f"{scope} - Error api type not yet supported {t!s}")
raise JSchemaTypeError(f"Error api type not yet supported {t!s}")
elif isinstance(t, type):
if t is str:
@ -188,7 +187,7 @@ def type_to_dict(t: Any, scope: str = "", type_map: dict[TypeVar, type] = {}) ->
return {"type": "object"}
if t is Any:
raise JSchemaTypeError(
f"{scope} - Usage of the Any type is not supported for API functions. In: {scope}"
f"Usage of the Any type is not supported for API functions. In: {scope}"
)
if t is pathlib.Path:
return {
@ -197,13 +196,13 @@ def type_to_dict(t: Any, scope: str = "", type_map: dict[TypeVar, type] = {}) ->
}
if t is dict:
raise JSchemaTypeError(
f"{scope} - Generic 'dict' type not supported. Use dict[str, Any] or any more expressive type."
"Error: generic dict type not supported. Use dict[str, Any] instead"
)
# Optional[T] gets internally transformed Union[T,NoneType]
if t is NoneType:
return {"type": "null"}
raise JSchemaTypeError(f"{scope} - Error primitive type not supported {t!s}")
raise JSchemaTypeError(f"Error primitive type not supported {t!s}")
else:
raise JSchemaTypeError(f"{scope} - Error type not supported {t!s}")
raise JSchemaTypeError(f"Error type not supported {t!s}")

View File

@ -51,7 +51,7 @@ def show_machine_deployment_target(
[
f"{clan_dir}#clanInternals.machines.{system}.{machine_name}",
"--apply",
"machine: { inherit (machine.config.clan.networking) targetHost; }",
"machine: { inherit (machine.config.clan.core.networking) targetHost; }",
"--json",
]
)

View File

@ -20,7 +20,7 @@ class Machine:
name: str
flake: FlakeId
nix_options: list[str] = field(default_factory=list)
cached_deployment: None | dict[str, Any] = None
cached_deployment: None | dict = None
_eval_cache: dict[str, str] = field(default_factory=dict)
_build_cache: dict[str, Path] = field(default_factory=dict)
@ -53,7 +53,7 @@ class Machine:
"deploymentAddress"
)
if val is None:
msg = f"the 'clan.networking.targetHost' nixos option is not set for machine '{self.name}'"
msg = f"the 'clan.core.networking.targetHost' nixos option is not set for machine '{self.name}'"
raise ClanError(msg)
return val

View File

@ -161,7 +161,7 @@ def update(args: argparse.Namespace) -> None:
if not machines and ignored_machines != []:
print(
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.networking.targetHost` nixos option set:",
"WARNING: No machines to update. The following defined machines were ignored because they do not have `clan.core.networking.targetHost` nixos option set:",
file=sys.stderr,
)
for machine in ignored_machines:

View File

@ -1,12 +1,12 @@
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clan.core.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.virtualisation.graphics = false;
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
networking.useDHCP = false;
systemd.services.shutdown-after-boot = {

View File

@ -1,12 +1,12 @@
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clan.core.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.virtualisation.graphics = false;
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
networking.useDHCP = false;
systemd.services.shutdown-after-boot = {

View File

@ -1,6 +1,6 @@
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clan.virtualisation.graphics = false;

View File

@ -1,142 +0,0 @@
import ast
import importlib.util
import os
import sys
from dataclasses import is_dataclass
from pathlib import Path
from clan_cli.api.util import JSchemaTypeError, type_to_dict
from clan_cli.errors import ClanError
def find_dataclasses_in_directory(
directory: Path, exclude_paths: list[str] = []
) -> list[tuple[str, str]]:
"""
Find all dataclass classes in all Python files within a nested directory.
Args:
directory (str): The root directory to start searching from.
Returns:
List[Tuple[str, str]]: A list of tuples containing the file path and the dataclass name.
"""
dataclass_files = []
excludes = [os.path.join(directory, d) for d in exclude_paths]
for root, _, files in os.walk(directory, topdown=False):
for file in files:
if not file.endswith(".py"):
continue
file_path = os.path.join(root, file)
if file_path in excludes:
print(f"Skipping dataclass check for file: {file_path}")
continue
with open(file_path, encoding="utf-8") as f:
try:
tree = ast.parse(f.read(), filename=file_path)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
for deco in node.decorator_list:
if (
isinstance(deco, ast.Name)
and deco.id == "dataclass"
):
dataclass_files.append((file_path, node.name))
elif (
isinstance(deco, ast.Call)
and isinstance(deco.func, ast.Name)
and deco.func.id == "dataclass"
):
dataclass_files.append((file_path, node.name))
except (SyntaxError, UnicodeDecodeError) as e:
print(f"Error parsing {file_path}: {e}")
return dataclass_files
def load_dataclass_from_file(
file_path: str, class_name: str, root_dir: str
) -> type | None:
"""
Load a dataclass from a given file path.
Args:
file_path (str): Path to the file.
class_name (str): Name of the class to load.
Returns:
List[Type]: The dataclass type if found, else an empty list.
"""
module_name = (
os.path.relpath(file_path, root_dir).replace(os.path.sep, ".").rstrip(".py")
)
try:
sys.path.insert(0, root_dir)
spec = importlib.util.spec_from_file_location(module_name, file_path)
if not spec:
raise ClanError(f"Could not load spec from file: {file_path}")
module = importlib.util.module_from_spec(spec)
if not module:
raise ClanError(f"Could not create module: {file_path}")
if not spec.loader:
raise ClanError(f"Could not load loader from spec: {spec}")
spec.loader.exec_module(module)
finally:
sys.path.pop(0)
dataclass_type = getattr(module, class_name, None)
if dataclass_type and is_dataclass(dataclass_type):
return dataclass_type
raise ClanError(f"Could not load dataclass {class_name} from file: {file_path}")
def test_all_dataclasses() -> None:
"""
This Test ensures that all dataclasses are compatible with the API.
It will load all dataclasses from the clan_cli directory and
generate a JSON schema for each of them.
It will fail if any dataclass cannot be converted to JSON schema.
This means the dataclass in its current form is not compatible with the API.
"""
# Excludes:
# - API includes Type Generic wrappers, that are not known in the init file.
excludes = ["api/__init__.py"]
cli_path = Path("clan_cli").resolve()
dataclasses = find_dataclasses_in_directory(cli_path, excludes)
for file, dataclass in dataclasses:
print(f"checking dataclass {dataclass} in file: {file}")
try:
dclass = load_dataclass_from_file(file, dataclass, str(cli_path.parent))
type_to_dict(dclass)
except JSchemaTypeError as e:
print(f"Error loading dataclass {dataclass} from {file}: {e}")
raise ClanError(
f"""
--------------------------------------------------------------------------------
Error converting dataclass 'class {dataclass}()' from {file}
Details:
{e}
Help:
- Converting public fields to PRIVATE by prefixing them with underscore ('_')
- Ensure all private fields are initialized the API wont provide initial values for them.
--------------------------------------------------------------------------------
""",
location=__file__,
)

View File

@ -15,14 +15,14 @@
vm1 =
{ lib, ... }:
{
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clan.core.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.core.sops.defaultGroups = [ "admins" ];
clan.virtualisation.graphics = false;
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
networking.useDHCP = false;
systemd.services.shutdown-after-boot = {
@ -45,11 +45,11 @@
];
clan.user-password.user = "alice";
clan.user-password.prompt = false;
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
sops.age.keyFile = "__CLAN_SOPS_KEY_PATH__";
clan.core.secretsUploadDirectory = "__CLAN_SOPS_KEY_DIR__";
clan.networking.zerotier.networkId = "82b44b162ec6c013";
clan.core.networking.zerotier.networkId = "82b44b162ec6c013";
};
};
};

View File

@ -23,12 +23,12 @@
clan.user-password.user = "alice";
clan.user-password.prompt = false;
clan.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
clan.core.networking.targetHost = "__CLAN_TARGET_ADDRESS__";
system.stateVersion = lib.version;
clan.core.secretStore = "password-store";
clan.core.secretsUploadDirectory = lib.mkForce "__CLAN_SOPS_KEY_DIR__/secrets";
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
systemd.services.shutdown-after-boot = {
enable = true;

View File

@ -42,7 +42,7 @@
# Set this for clan commands use ssh i.e. `clan machines update`
# If you change the hostname, you need to update this line to root@<new-hostname>
# This only works however if you have avahi running on your admin machine else use IP
clan.networking.targetHost = pkgs.lib.mkDefault "root@jon";
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@jon";
# ssh root@flash-installer.local lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
disko.devices.disk.main = {
@ -58,7 +58,7 @@
# Zerotier needs one controller to accept new nodes. Once accepted
# the controller can be offline and routing still works.
clan.networking.zerotier.controller.enable = true;
clan.core.networking.zerotier.controller.enable = true;
};
# "sara" will be the hostname of the machine
sara = {
@ -72,7 +72,7 @@
# Set this for clan commands use ssh i.e. `clan machines update`
# If you change the hostname, you need to update this line to root@<new-hostname>
# This only works however if you have avahi running on your admin machine else use IP
clan.networking.targetHost = pkgs.lib.mkDefault "root@sara";
clan.core.networking.targetHost = pkgs.lib.mkDefault "root@sara";
# ssh root@flash-installer.local lsblk --output NAME,ID-LINK,FSTYPE,SIZE,MOUNTPOINT
disko.devices.disk.main = {
@ -91,7 +91,7 @@
This will allow sara to share the VPN overlay network with jon
The networkId is generated by the first deployment of jon
*/
# clan.networking.zerotier.networkId = builtins.readFile ../jon/facts/zerotier-network-id;
# clan.core.networking.zerotier.networkId = builtins.readFile ../jon/facts/zerotier-network-id;
};
};
};