Compare commits

..

No commits in common. "main" and "flake-update-2024-04-22" have entirely different histories.

124 changed files with 9912 additions and 2785 deletions

View File

@ -0,0 +1,11 @@
name: checks
on:
pull_request:
push:
branches: main
jobs:
test:
runs-on: nix
steps:
- uses: actions/checkout@v3
- run: nix run --refresh github:Mic92/nix-fast-build -- --no-nom --eval-workers 10

View File

@ -1,18 +1,7 @@
Copyright 2023 Clan contributers Copyright 2023 Clan contributers
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -4,10 +4,8 @@ This repository contains nixos modules and terraform code that powers clan.lol.
The website and git hosting is currently on [hetzner](https://www.hetzner.com/). The website and git hosting is currently on [hetzner](https://www.hetzner.com/).
## Servers ## Servers
- web01: - web01:
- Instance type: - Instance type: [ex101](https://www.hetzner.com/de/dedicated-rootserver/ex101)
[ex101](https://www.hetzner.com/de/dedicated-rootserver/ex101)
- CPU: Intel Core i9-13900 (24 cores / 32 threads) - CPU: Intel Core i9-13900 (24 cores / 32 threads)
- RAM: 64GB DDR5 - RAM: 64GB DDR5
- Drives: 2 x 1.92 TB NVME - Drives: 2 x 1.92 TB NVME
@ -25,10 +23,3 @@ $ ./tf.sh apply
$ cd ./targets/web01 $ cd ./targets/web01
$ ./tf.sh apply $ ./tf.sh apply
``` ```
## To add a new project to CI
1. Add the 'buildbot-clan' topic to the repository using the "Manage topics"
button below the project description
2. Go to https://buildbot.clan.lol/#/builders/2 and press "Update projects"
after you have logged in.

View File

@ -1,14 +1,14 @@
{ {
perSystem = perSystem =
{ inputs', pkgs, ... }: { inputs'
, pkgs
, lib
, ...
}:
let let
convert2Tofu = convert2Tofu = provider: provider.override (prev: {
provider: homepage = builtins.replaceStrings [ "registry.terraform.io/providers" ] [ "registry.opentofu.org" ] prev.homepage;
provider.override (prev: { });
homepage = builtins.replaceStrings [ "registry.terraform.io/providers" ] [
"registry.opentofu.org"
] prev.homepage;
});
in in
{ {
devShells.default = pkgs.mkShellNoCC { devShells.default = pkgs.mkShellNoCC {
@ -18,18 +18,17 @@
inputs'.clan-core.packages.clan-cli inputs'.clan-core.packages.clan-cli
(pkgs.opentofu.withPlugins ( (pkgs.opentofu.withPlugins (p: builtins.map convert2Tofu [
p: p.hetznerdns
builtins.map convert2Tofu [ p.hcloud
p.hetznerdns p.null
p.hcloud p.external
p.null p.local
p.external ]))
p.local ];
] inputsFrom = [
)) inputs'.clan-core.devShells.default
]; ];
inputsFrom = [ inputs'.clan-core.devShells.default ];
}; };
}; };
} }

View File

@ -1,47 +1,5 @@
{ {
"nodes": { "nodes": {
"blobs": {
"flake": false,
"locked": {
"lastModified": 1604995301,
"narHash": "sha256-wcLzgLec6SGJA8fx1OEN1yV/Py5b+U5iyYpksUY/yLw=",
"owner": "simple-nixos-mailserver",
"repo": "blobs",
"rev": "2cccdf1ca48316f2cfd1c9a0017e8de5a7156265",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"repo": "blobs",
"type": "gitlab"
}
},
"buildbot-nix": {
"inputs": {
"flake-parts": [
"flake-parts"
],
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": [
"treefmt-nix"
]
},
"locked": {
"lastModified": 1719797756,
"narHash": "sha256-TGZthxgxLdT8boadFm6+MK7HZlIxN1u1V+x3hu+Fd8I=",
"owner": "Mic92",
"repo": "buildbot-nix",
"rev": "0b56574a5c823097771487d1bac952c3549fe9fb",
"type": "github"
},
"original": {
"owner": "Mic92",
"repo": "buildbot-nix",
"type": "github"
}
},
"clan-core": { "clan-core": {
"inputs": { "inputs": {
"disko": "disko", "disko": "disko",
@ -49,25 +7,29 @@
"flake-parts" "flake-parts"
], ],
"nixos-generators": "nixos-generators", "nixos-generators": "nixos-generators",
"nixos-images": "nixos-images",
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs"
], ],
"sops-nix": "sops-nix", "sops-nix": [
"sops-nix"
],
"treefmt-nix": [ "treefmt-nix": [
"treefmt-nix" "treefmt-nix"
] ]
}, },
"locked": { "locked": {
"lastModified": 1720009622, "lastModified": 1712910239,
"narHash": "sha256-uA0FvklUt4M3yrNaSecCFgxXjnQZY8mmafLTuGSdUmU=", "narHash": "sha256-0Iu86fs3QqmDTEBZ2kJFYeNQc59L0ncW22CnJItDIuE=",
"rev": "fa41f94ae751b654088bb8f268f5dc0f4bb323fe", "ref": "synapse",
"type": "tarball", "rev": "e22501799b2409b9c1db340a25acadc5ff730e4c",
"url": "https://git.clan.lol/api/v1/repos/clan/clan-core/archive/fa41f94ae751b654088bb8f268f5dc0f4bb323fe.tar.gz" "revCount": 2473,
"type": "git",
"url": "https://git.clan.lol/clan/clan-core"
}, },
"original": { "original": {
"type": "tarball", "ref": "synapse",
"url": "https://git.clan.lol/clan/clan-core/archive/main.tar.gz" "type": "git",
"url": "https://git.clan.lol/clan/clan-core"
} }
}, },
"disko": { "disko": {
@ -78,11 +40,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1718846788, "lastModified": 1712356478,
"narHash": "sha256-9dtXYtEkmXoUJV+PGLqscqF7qTn4AIhAKpFWRFU2NYs=", "narHash": "sha256-kTcEtrQIRnexu5lAbLsmUcfR2CrmsACF1s3ZFw1NEVA=",
"owner": "nix-community", "owner": "nix-community",
"repo": "disko", "repo": "disko",
"rev": "e1174d991944a01eaaa04bc59c6281edca4c0e6e", "rev": "0a17298c0d96190ef3be729d594ba202b9c53beb",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -91,18 +53,23 @@
"type": "github" "type": "github"
} }
}, },
"flake-compat": { "disko_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": { "locked": {
"lastModified": 1696426674, "lastModified": 1713406758,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", "narHash": "sha256-kwZvhmx+hSZvjzemKxsAqzEqWmXZS47VVwQhNrINORQ=",
"owner": "edolstra", "owner": "nix-community",
"repo": "flake-compat", "repo": "disko",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "rev": "1efd500e9805a9efbce401ed5999006d397b9f11",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "edolstra", "owner": "nix-community",
"repo": "flake-compat", "repo": "disko",
"type": "github" "type": "github"
} }
}, },
@ -113,11 +80,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1719994518, "lastModified": 1712014858,
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=", "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci", "owner": "hercules-ci",
"repo": "flake-parts", "repo": "flake-parts",
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7", "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -126,41 +93,35 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils": { "nixlib": {
"inputs": {
"systems": "systems"
},
"locked": { "locked": {
"lastModified": 1710146030, "lastModified": 1711846064,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "narHash": "sha256-cqfX0QJNEnge3a77VnytM0Q6QZZ0DziFXt6tSCV8ZSc=",
"owner": "numtide", "owner": "nix-community",
"repo": "flake-utils", "repo": "nixpkgs.lib",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "rev": "90b1a963ff84dc532db92f678296ff2499a60a87",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "numtide", "owner": "nix-community",
"repo": "flake-utils", "repo": "nixpkgs.lib",
"type": "github" "type": "github"
} }
}, },
"nixos-generators": { "nixos-generators": {
"inputs": { "inputs": {
"nixlib": [ "nixlib": "nixlib",
"clan-core",
"nixpkgs"
],
"nixpkgs": [ "nixpkgs": [
"clan-core", "clan-core",
"nixpkgs" "nixpkgs"
] ]
}, },
"locked": { "locked": {
"lastModified": 1718025593, "lastModified": 1712191720,
"narHash": "sha256-WZ1gdKq/9u1Ns/oXuNsDm+W0salonVA0VY1amw8urJ4=", "narHash": "sha256-xXtSSnVHURHsxLQO30dzCKW5NJVGV/umdQPmFjPFMVA=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nixos-generators", "repo": "nixos-generators",
"rev": "35c20ba421dfa5059e20e0ef2343c875372bdcf3", "rev": "0c15e76bed5432d7775a22e8d22059511f59d23a",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -169,62 +130,13 @@
"type": "github" "type": "github"
} }
}, },
"nixos-images": {
"inputs": {
"nixos-stable": [
"clan-core"
],
"nixos-unstable": [
"clan-core",
"nixpkgs"
]
},
"locked": {
"lastModified": 1718845599,
"narHash": "sha256-HbQ0iKohKJC5grC95HNjLxGPdgsc/BJgoENDYNbzkLo=",
"owner": "nix-community",
"repo": "nixos-images",
"rev": "c1e6a5f7b08f1c9993de1cfc5f15f838bf783b88",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixos-images",
"type": "github"
}
},
"nixos-mailserver": {
"inputs": {
"blobs": "blobs",
"flake-compat": [
"flake-compat"
],
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-24_05": []
},
"locked": {
"lastModified": 1718697807,
"narHash": "sha256-Enla61WFisytTYbWygPynEbu8vozjeGc6Obkj2GRj7o=",
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"rev": "290a995de5c3d3f08468fa548f0d55ab2efc7b6b",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"type": "gitlab"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1719931832, "lastModified": 1713687659,
"narHash": "sha256-0LD+KePCKKEb4CcPsTBOwf019wDtZJanjoKm1S8q3Do=", "narHash": "sha256-Yd8KuOBpZ0Slau/NxFhMPJI0gBxeax0vq/FD0rqKwuQ=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "0aeab749216e4c073cece5d34bc01b79e717c3e0", "rev": "f2d7a289c5a5ece8521dd082b81ac7e4a57c2c5c",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -236,13 +148,11 @@
}, },
"root": { "root": {
"inputs": { "inputs": {
"buildbot-nix": "buildbot-nix",
"clan-core": "clan-core", "clan-core": "clan-core",
"flake-compat": "flake-compat", "disko": "disko_2",
"flake-parts": "flake-parts", "flake-parts": "flake-parts",
"flake-utils": "flake-utils",
"nixos-mailserver": "nixos-mailserver",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"sops-nix": "sops-nix",
"srvos": "srvos", "srvos": "srvos",
"treefmt-nix": "treefmt-nix" "treefmt-nix": "treefmt-nix"
} }
@ -250,19 +160,16 @@
"sops-nix": { "sops-nix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
"clan-core",
"nixpkgs" "nixpkgs"
], ],
"nixpkgs-stable": [ "nixpkgs-stable": []
"clan-core"
]
}, },
"locked": { "locked": {
"lastModified": 1719111739, "lastModified": 1713668495,
"narHash": "sha256-kr2QzRrplzlCP87ddayCZQS+dhGW98kw2zy7+jUXtF4=", "narHash": "sha256-4BvlfPfyUmB1U0r/oOF6jGEW/pG59c5yv6PJwgucTNM=",
"owner": "Mic92", "owner": "Mic92",
"repo": "sops-nix", "repo": "sops-nix",
"rev": "5e2e9421e9ed2b918be0a441c4535cfa45e04811", "rev": "09f1bc8ba3277c0f052f7887ec92721501541938",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -278,11 +185,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1719965291, "lastModified": 1713533513,
"narHash": "sha256-IQiO6VNESSmgxQkpI1q86pqxRw0SZ45iSeM1jsmBpSw=", "narHash": "sha256-nv5GmWaGryyZU8ihQIYLZWasqaXTZKGTjsypG0TRw9Q=",
"owner": "numtide", "owner": "numtide",
"repo": "srvos", "repo": "srvos",
"rev": "1844f1a15ef530c963bb07c3846172fccbfb9f74", "rev": "d8945920cb8e98dc737d1fc2d42607f5916c34cf",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -291,21 +198,6 @@
"type": "github" "type": "github"
} }
}, },
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"treefmt-nix": { "treefmt-nix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
@ -313,11 +205,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1719887753, "lastModified": 1711963903,
"narHash": "sha256-p0B2r98UtZzRDM5miGRafL4h7TwGRC4DII+XXHDHqek=", "narHash": "sha256-N3QDhoaX+paWXHbEXZapqd1r95mdshxToGowtjtYkGI=",
"owner": "numtide", "owner": "numtide",
"repo": "treefmt-nix", "repo": "treefmt-nix",
"rev": "bdb6355009562d8f9313d9460c0d3860f525bc6c", "rev": "49dc4a92b02b8e68798abd99184f228243b6e3ac",
"type": "github" "type": "github"
}, },
"original": { "original": {

125
flake.nix
View File

@ -8,101 +8,62 @@
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-utils.url = "github:numtide/flake-utils";
flake-compat.url = "github:edolstra/flake-compat";
flake-parts.url = "github:hercules-ci/flake-parts"; flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
treefmt-nix.url = "github:numtide/treefmt-nix"; treefmt-nix.url = "github:numtide/treefmt-nix";
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs"; treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
nixos-mailserver = { disko.url = "github:nix-community/disko";
url = "gitlab:simple-nixos-mailserver/nixos-mailserver"; disko.inputs.nixpkgs.follows = "nixpkgs";
inputs.nixpkgs.follows = "nixpkgs";
inputs.nixpkgs-24_05.follows = ""; sops-nix.url = "github:Mic92/sops-nix";
inputs.flake-compat.follows = "flake-compat"; sops-nix.inputs.nixpkgs.follows = "nixpkgs";
}; sops-nix.inputs.nixpkgs-stable.follows = "";
srvos.url = "github:numtide/srvos"; srvos.url = "github:numtide/srvos";
# Use the version of nixpkgs that has been tested to work with SrvOS # Use the version of nixpkgs that has been tested to work with SrvOS
srvos.inputs.nixpkgs.follows = "nixpkgs"; srvos.inputs.nixpkgs.follows = "nixpkgs";
clan-core.url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz"; clan-core.url = "git+https://git.clan.lol/clan/clan-core?ref=synapse";
clan-core.inputs.flake-parts.follows = "flake-parts"; clan-core.inputs.flake-parts.follows = "flake-parts";
clan-core.inputs.nixpkgs.follows = "nixpkgs"; clan-core.inputs.nixpkgs.follows = "nixpkgs";
clan-core.inputs.treefmt-nix.follows = "treefmt-nix"; clan-core.inputs.treefmt-nix.follows = "treefmt-nix";
clan-core.inputs.sops-nix.follows = "sops-nix";
buildbot-nix.url = "github:Mic92/buildbot-nix";
buildbot-nix.inputs.nixpkgs.follows = "nixpkgs";
buildbot-nix.inputs.flake-parts.follows = "flake-parts";
buildbot-nix.inputs.treefmt-nix.follows = "treefmt-nix";
}; };
outputs = outputs = inputs@{ flake-parts, ... }:
inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } ({ self, ... }: {
flake-parts.lib.mkFlake { inherit inputs; } ( systems = [
{ self, ... }: "x86_64-linux"
{ "aarch64-linux"
systems = [ ];
"x86_64-linux" imports = [
"aarch64-linux" inputs.treefmt-nix.flakeModule
]; ./devShells/flake-module.nix
imports = [ ./targets/flake-module.nix
inputs.treefmt-nix.flakeModule ./modules/flake-module.nix
./devShells/flake-module.nix ./pkgs/flake-module.nix
./targets/flake-module.nix ];
./modules/flake-module.nix perSystem = ({ lib, self', system, ... }: {
./pkgs/flake-module.nix treefmt = {
]; projectRootFile = ".git/config";
perSystem = ( programs.hclfmt.enable = true;
{ programs.nixpkgs-fmt.enable = true;
lib, settings.formatter.nixpkgs-fmt.excludes = [
self', # generated files
system, "node-env.nix"
pkgs, "node-packages.nix"
... "composition.nix"
}: ];
{ };
treefmt = { checks =
package = pkgs.treefmt.overrideAttrs (_old: { let
# https://github.com/numtide/treefmt/pull/325 nixosMachines = lib.mapAttrs' (name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel) ((lib.filterAttrs (_: config: config.pkgs.system == system)) self.nixosConfigurations);
patches = [ ./treefmt-config.patch ]; packages = lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages;
}); devShells = lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells;
projectRootFile = ".git/config"; homeConfigurations = lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (self'.legacyPackages.homeConfigurations or { });
programs.terraform.enable = true; in
programs.shellcheck.enable = true; nixosMachines // packages // devShells // homeConfigurations;
});
programs.deno.enable = true; });
settings.global.excludes = [
# generated files
"sops/*"
"terraform.tfstate"
"*.tfvars.sops.json"
"*nixos-vars.json"
"secrets.yaml"
];
programs.nixfmt-rfc-style.enable = true;
settings.formatter.nixfmt-rfc-style.excludes = [
# generated files
"node-env.nix"
"node-packages.nix"
"composition.nix"
];
};
checks =
let
nixosMachines = lib.mapAttrs' (
name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel
) ((lib.filterAttrs (_: config: config.pkgs.system == system)) self.nixosConfigurations);
packages = lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages;
devShells = lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells;
homeConfigurations = lib.mapAttrs' (
name: config: lib.nameValuePair "home-manager-${name}" config.activation-script
) (self'.legacyPackages.homeConfigurations or { });
in
nixosMachines // packages // devShells // homeConfigurations;
}
);
}
);
} }

View File

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHS2PvT2e04pqbt1EFFN2y1za9nNmr8rcfnXq9kG5RS2 nixbld@turingmachine

View File

@ -41,10 +41,7 @@ in
extraGroups = [ "wheel" ]; extraGroups = [ "wheel" ];
shell = "/run/current-system/sw/bin/zsh"; shell = "/run/current-system/sw/bin/zsh";
uid = 1004; uid = 1004;
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [ admins.kenji admins.kenji-remote ];
admins.kenji
admins.kenji-remote
];
}; };
johannes = { johannes = {
isNormalUser = true; isNormalUser = true;

View File

@ -1,57 +0,0 @@
{ config, ... }:
{
services.buildbot-nix.master = {
enable = true;
# Domain name under which the buildbot frontend is reachable
domain = "buildbot.clan.lol";
# The workers file configures credentials for the buildbot workers to connect to the master.
# "name" is the configured worker name in services.buildbot-nix.worker.name of a worker
# (defaults to the hostname of the machine)
# "pass" is the password for the worker configured in `services.buildbot-nix.worker.workerPasswordFile`
# "cores" is the number of cpu cores the worker has.
# The number must match as otherwise potentially not enought buildbot-workers are created.
workersFile = config.sops.secrets.buildbot-workers-file.path;
authBackend = "gitea";
admins = [
"Mic92"
"Qubasa"
"DavHau"
"kenji"
"hsjobeki"
"lassulus"
];
gitea = {
enable = true;
instanceUrl = "https://git.clan.lol";
# Redirect URIs. Please use a new line for every URI: https://buildbot.clan.lol/auth/login
oauthId = "adb3425c-490f-4558-9487-8f8940d2925b";
oauthSecretFile = config.sops.secrets.buildbot-oauth-secret-file.path;
webhookSecretFile = config.sops.secrets.buildbot-webhook-secret-file.path;
tokenFile = config.sops.secrets.buildbot-token-file.path;
topic = "buildbot-clan";
};
# optional nix-eval-jobs settings
evalWorkerCount = 10; # limit number of concurrent evaluations
evalMaxMemorySize = "4096"; # limit memory usage per evaluation
};
# Optional: Enable acme/TLS in nginx (recommended)
services.nginx.virtualHosts.${config.services.buildbot-nix.master.domain} = {
forceSSL = true;
enableACME = true;
};
services.buildbot-nix.worker = {
enable = true;
workerPasswordFile = config.sops.secrets.buildbot-worker-password-file.path;
};
sops.secrets.buildbot-oauth-secret-file = { };
sops.secrets.buildbot-workers-file = { };
sops.secrets.buildbot-worker-password-file = { };
sops.secrets.buildbot-token-file = { };
}

View File

@ -1,5 +1,4 @@
{ self, inputs, ... }: { self, inputs, ... }: {
{
flake.nixosModules = { flake.nixosModules = {
server.imports = [ server.imports = [
inputs.srvos.nixosModules.server inputs.srvos.nixosModules.server
@ -16,21 +15,11 @@
./initrd-networking.nix ./initrd-networking.nix
]; ];
buildbot.imports = [
inputs.buildbot-nix.nixosModules.buildbot-master
inputs.buildbot-nix.nixosModules.buildbot-worker
./buildbot.nix
];
web01.imports = [ web01.imports = [
self.nixosModules.server self.nixosModules.server
self.nixosModules.buildbot
inputs.srvos.nixosModules.mixins-nginx inputs.srvos.nixosModules.mixins-nginx
inputs.srvos.nixosModules.mixins-nix-experimental inputs.srvos.nixosModules.mixins-nix-experimental
./matrix-bot.nix
./web01 ./web01
inputs.nixos-mailserver.nixosModules.mailserver
./mailserver.nix
]; ];
}; };
} }

View File

@ -1,22 +1,31 @@
{ config, lib, ... }: { config
let , lib
, ...
}:
with lib; let
cfg = config.clan.networking; cfg = config.clan.networking;
in in
{ {
options = { options = {
clan.networking.ipv4.address = lib.mkOption { type = lib.types.str; }; clan.networking.ipv4.address = mkOption {
type = types.str;
};
clan.networking.ipv4.cidr = lib.mkOption { clan.networking.ipv4.cidr = mkOption {
type = lib.types.str; type = types.str;
default = "26"; default = "26";
}; };
clan.networking.ipv4.gateway = lib.mkOption { type = lib.types.str; }; clan.networking.ipv4.gateway = mkOption {
type = types.str;
};
clan.networking.ipv6.address = lib.mkOption { type = lib.types.str; }; clan.networking.ipv6.address = mkOption {
type = types.str;
};
clan.networking.ipv6.cidr = lib.mkOption { clan.networking.ipv6.cidr = mkOption {
type = lib.types.str; type = types.str;
default = "64"; default = "64";
}; };
}; };

View File

@ -1,54 +0,0 @@
{ config, pkgs, ... }:
let
mailPassword =
{ service }:
{
secret."${service}-password" = { };
secret."${service}-password-hash" = { };
generator.path = with pkgs; [
coreutils
xkcdpass
mkpasswd
];
generator.script = ''
xkcdpass -n 4 -d - > $secrets/${service}-password
cat $secrets/${service}-password | mkpasswd -s -m bcrypt > $secrets/${service}-password-hash
'';
};
in
{
mailserver = {
enable = true;
fqdn = "mail.clan.lol";
domains = [ "clan.lol" ];
enablePop3 = true;
certificateScheme = "acme-nginx";
# kresd sucks unfortunally (fails when one NS server is not working, instead of trying other ones)
localDnsResolver = false;
loginAccounts."golem@clan.lol".hashedPasswordFile =
config.clan.core.facts.services.golem-mail.secret.golem-password-hash.path;
loginAccounts."gitea@clan.lol".hashedPasswordFile =
config.clan.core.facts.services.gitea-mail.secret.gitea-password-hash.path;
};
services.unbound = {
enable = true;
settings.server = {
prefetch = "yes";
prefetch-key = true;
qname-minimisation = true;
# Too many broken dnssec setups even at big companies such as amazon.
# Breaks my email setup. Better rely on tls for security.
val-permissive-mode = "yes";
};
};
# use local unbound as dns resolver
networking.nameservers = [ "127.0.0.1" ];
security.acme.acceptTerms = true;
clan.core.facts.services.golem-mail = mailPassword { service = "golem"; };
clan.core.facts.services.gitea-mail = mailPassword { service = "gitea"; };
}

View File

@ -1,49 +0,0 @@
{
config,
pkgs,
self,
...
}:
let
name = "matrix-bot";
in
{
users.groups.matrix-bot-user = { };
users.users.matrix-bot-user = {
group = "matrix-bot-user";
isSystemUser = true;
description = "User for matrix-bot service";
home = "/var/lib/matrix-bot";
createHome = true;
};
systemd.services.${name} = {
path = [ self.packages.${pkgs.system}.matrix-bot ];
description = "Matrix bot for changelog and reviews";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
MATRIX_PASSWORD_FILE = "%d/MATRIX_PASSWORD_FILE";
OPENAI_API_KEY_FILE = "%d/OPENAI_API_KEY_FILE";
HOME = "/var/lib/${name}";
};
serviceConfig = {
LoadCredential = [
"MATRIX_PASSWORD_FILE:${config.sops.secrets.web01-matrix-password-clan-bot.path}"
"OPENAI_API_KEY_FILE:${config.sops.secrets.qubasas-openai-api-key.path}"
];
User = "matrix-bot-user";
Group = "matrix-bot-user";
WorkingDirectory = "/var/lib/${name}";
RuntimeDirectory = "/var/lib/${name}";
};
script = ''
set -euxo pipefail
mbot --changelog-room "!FdCwyKsRlfooNYKYzx:matrix.org" --review-room "!tmSRJlbsVXFUKAddiM:gchq.icu"
'';
};
}

45
modules/single-disk.nix Normal file
View File

@ -0,0 +1,45 @@
{ self, ... }:
let
partitions = {
grub = {
name = "grub";
size = "1M";
type = "ef02";
};
esp = {
name = "ESP";
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
name = "root";
size = "100%";
content = {
type = "filesystem";
# We use xfs because it has support for compression and has a quite good performance for databases
format = "xfs";
mountpoint = "/";
};
};
};
in
{
imports = [
self.inputs.disko.nixosModules.disko
];
disko.devices = {
disk.sda = {
type = "disk";
device = "/dev/sda";
content = {
type = "gpt";
inherit partitions;
};
};
};
}

View File

@ -1,21 +1,26 @@
{ config, self, ... }: { config, ... }: {
{
imports = [ self.inputs.clan-core.clanModules.borgbackup ];
# 100GB storagebox is under the nix-community hetzner account # 100GB storagebox is under the nix-community hetzner account
clan.borgbackup.destinations.${config.networking.hostName} = {
repo = "u366395@u366395.your-storagebox.de:/./borgbackup";
rsh = "ssh -oPort=23 -i ${config.clan.core.facts.services.borgbackup.secret."borgbackup.ssh".path}";
};
clan.core.state.system.folders = [ systemd.services.borgbackup-job-clan-lol.serviceConfig.ReadWritePaths = [
"/home" "/var/log/telegraf"
"/etc"
"/var"
"/root"
]; ];
services.borgbackup.jobs.${config.networking.hostName} = { # Run this from the hetzner network:
# ssh-keyscan -p 23 u359378.your-storagebox.de
programs.ssh.knownHosts = {
storagebox-ecdsa.hostNames = [ "[u359378.your-storagebox.de]:23" ];
storagebox-ecdsa.publicKey = "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==";
storagebox-rsa.hostNames = [ "[u359378.your-storagebox.de]:23" ];
storagebox-rsa.publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==";
};
services.borgbackup.jobs.clan-lol = {
paths = [
"/home"
"/var"
"/root"
];
exclude = [ exclude = [
"*.pyc" "*.pyc"
"/home/*/.direnv" "/home/*/.direnv"
@ -36,6 +41,9 @@
"/var/tmp" "/var/tmp"
"/var/log" "/var/log"
]; ];
# $ ssh-keygen -y -f /run/secrets/hetzner-borgbackup-ssh > /tmp/hetzner-borgbackup-ssh.pub
# $ cat /tmp/hetzner-borgbackup-ssh.pub | ssh -p23 u366395@u366395.your-storagebox.de install-ssh-key
repo = "u366395@u366395.your-storagebox.de:/./borgbackup";
# Disaster recovery: # Disaster recovery:
# get the backup passphrase and ssh key from the sops and store them in /tmp # get the backup passphrase and ssh key from the sops and store them in /tmp
@ -45,11 +53,20 @@
# $ borg list # $ borg list
# web01-clan-lol-2023-07-21T14:12:22 Fri, 2023-07-21 14:12:27 [539b1037669ffd0d3f50020f439bbe2881b7234910e405eafc333125383351bc] # web01-clan-lol-2023-07-21T14:12:22 Fri, 2023-07-21 14:12:27 [539b1037669ffd0d3f50020f439bbe2881b7234910e405eafc333125383351bc]
# $ borg mount u359378@u359378.your-storagebox.de:/./borgbackup::web01-clan-lol-2023-07-21T14:12:22 /tmp/backup # $ borg mount u359378@u359378.your-storagebox.de:/./borgbackup::web01-clan-lol-2023-07-21T14:12:22 /tmp/backup
doInit = true;
encryption = {
mode = "repokey-blake2";
# $ nix run nixpkgs#xkcdpass -- -d '-' -n 3 -C capitalize "$@"
passCommand = "cat ${config.sops.secrets.hetzner-borgbackup-passphrase.path}";
};
compression = "auto,zstd";
startAt = "daily";
# Also enable ssh support in the storagebox web interface. # Also enable ssh support in the storagebox web interface.
# By default the storage box is only accessible from the hetzner network. # By default the storage box is only accessible from the hetzner network.
# $ clan facts generate # $ ssh-keygen -t ed25519 -N "" -f /tmp/ssh_host_ed25519_key
# $ clan facts list web01 | jq .borgbackup.ssh.pub | ssh -p23 u359378@u359378.your-storagebox.de install-ssh-key # $ cat /tmp/ssh_host_ed25519_key.pub | ssh -p23 u359378@u359378.your-storagebox.de install-ssh-key
environment.BORG_RSH = "ssh -oPort=23 -i ${config.sops.secrets.hetzner-borgbackup-ssh.path}";
preHook = '' preHook = ''
set -x set -x
''; '';
@ -59,19 +76,12 @@
task,frequency=daily last_run=$(date +%s)i,state="$([[ $exitStatus == 0 ]] && echo ok || echo fail)" task,frequency=daily last_run=$(date +%s)i,state="$([[ $exitStatus == 0 ]] && echo ok || echo fail)"
EOF EOF
''; '';
};
systemd.services."borgbackup-job-${config.networking.hostName}".serviceConfig.ReadWritePaths = [ prune.keep = {
"/var/log/telegraf" within = "1d"; # Keep all archives from the last day
]; daily = 7;
weekly = 4;
# Run this from the hetzner network: monthly = 0;
# ssh-keyscan -p 23 u359378.your-storagebox.de };
programs.ssh.knownHosts = {
storagebox-ecdsa.hostNames = [ "[u359378.your-storagebox.de]:23" ];
storagebox-ecdsa.publicKey = "ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==";
storagebox-rsa.hostNames = [ "[u359378.your-storagebox.de]:23" ];
storagebox-rsa.publicKey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==";
}; };
} }

View File

@ -1,18 +1,10 @@
{ { config, self, pkgs, ... }: {
config,
self,
pkgs,
...
}:
{
# service to for automatic merge bot # service to for automatic merge bot
systemd.services.clan-merge = { systemd.services.clan-merge = {
description = "Merge clan.lol PRs automatically"; description = "Merge clan.lol PRs automatically";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
environment = { environment = { GITEA_TOKEN_FILE = "%d/GITEA_TOKEN_FILE"; };
GITEA_TOKEN_FILE = "%d/GITEA_TOKEN_FILE";
};
serviceConfig = { serviceConfig = {
LoadCredential = [ "GITEA_TOKEN_FILE:${config.sops.secrets.merge-bot-gitea-token.path}" ]; LoadCredential = [ "GITEA_TOKEN_FILE:${config.sops.secrets.merge-bot-gitea-token.path}" ];
Restart = "on-failure"; Restart = "on-failure";

View File

@ -1,5 +1,4 @@
{ self, ... }: { self, ... }: {
{
imports = [ imports = [
./borgbackup.nix ./borgbackup.nix
./clan-merge.nix ./clan-merge.nix
@ -9,7 +8,7 @@
./homepage.nix ./homepage.nix
./postfix.nix ./postfix.nix
./jobs.nix ./jobs.nix
./matrix-synapse.nix #./matrix-synapse.nix
../dev.nix ../dev.nix
self.inputs.clan-core.clanModules.zt-tcp-relay self.inputs.clan-core.clanModules.zt-tcp-relay
]; ];

View File

@ -1,26 +1,8 @@
{ { config, self, pkgs, lib, ... }:
config,
self,
pkgs,
lib,
...
}:
let let
storeDeps = pkgs.runCommand "store-deps" { } '' storeDeps = pkgs.runCommand "store-deps" { } ''
mkdir -p $out/bin mkdir -p $out/bin
for dir in ${ for dir in ${toString [ pkgs.coreutils pkgs.findutils pkgs.gnugrep pkgs.gawk pkgs.git pkgs.nix pkgs.bash pkgs.jq pkgs.nodejs ]}; do
toString [
pkgs.coreutils
pkgs.findutils
pkgs.gnugrep
pkgs.gawk
pkgs.git
pkgs.nix
pkgs.bash
pkgs.jq
pkgs.nodejs
]
}; do
for bin in "$dir"/bin/*; do for bin in "$dir"/bin/*; do
ln -s "$bin" "$out/bin/$(basename "$bin")" ln -s "$bin" "$out/bin/$(basename "$bin")"
done done
@ -32,95 +14,87 @@ let
''; '';
numInstances = 2; numInstances = 2;
in in
lib.mkMerge [ lib.mkMerge [{
# everything here has no dependencies on the store
systemd.services.gitea-runner-nix-image = {
wantedBy = [ "multi-user.target" ];
after = [ "podman.service" ];
requires = [ "podman.service" ];
path = [ config.virtualisation.podman.package pkgs.gnutar pkgs.shadow pkgs.getent ];
# we also include etc here because the cleanup job also wants the nixuser to be present
script = ''
set -eux -o pipefail
mkdir -p etc/nix
# Create an unpriveleged user that we can use also without the run-as-user.sh script
touch etc/passwd etc/group
groupid=$(cut -d: -f3 < <(getent group nixuser))
userid=$(cut -d: -f3 < <(getent passwd nixuser))
groupadd --prefix $(pwd) --gid "$groupid" nixuser
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
cat <<NIX_CONFIG > etc/nix/nix.conf
accept-flake-config = true
experimental-features = nix-command flakes
NIX_CONFIG
cat <<NSSWITCH > etc/nsswitch.conf
passwd: files mymachines systemd
group: files mymachines systemd
shadow: files
hosts: files mymachines dns myhostname
networks: files
ethers: files
services: files
protocols: files
rpc: files
NSSWITCH
# list the content as it will be imported into the container
tar -cv . | tar -tvf -
tar -cv . | podman import - gitea-runner-nix
'';
serviceConfig = {
RuntimeDirectory = "gitea-runner-nix-image";
WorkingDirectory = "/run/gitea-runner-nix-image";
Type = "oneshot";
RemainAfterExit = true;
};
};
users.users.nixuser = {
group = "nixuser";
description = "Used for running nix ci jobs";
home = "/var/empty";
isSystemUser = true;
};
users.groups.nixuser = { };
}
{ {
# everything here has no dependencies on the store systemd.services = lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}-token") numInstances) (name: {
systemd.services.gitea-runner-nix-image = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "podman.service" ]; after = [ "gitea.service" ];
requires = [ "podman.service" ]; environment = {
path = [ GITEA_CUSTOM = "/var/lib/gitea/custom";
config.virtualisation.podman.package GITEA_WORK_DIR = "/var/lib/gitea";
pkgs.gnutar };
pkgs.shadow
pkgs.getent
];
# we also include etc here because the cleanup job also wants the nixuser to be present
script = '' script = ''
set -eux -o pipefail set -euo pipefail
mkdir -p etc/nix token=$(${lib.getExe self.packages.${pkgs.hostPlatform.system}.gitea} actions generate-runner-token)
echo "TOKEN=$token" > /var/lib/gitea-registration/${name}
# Create an unpriveleged user that we can use also without the run-as-user.sh script
touch etc/passwd etc/group
groupid=$(cut -d: -f3 < <(getent group nixuser))
userid=$(cut -d: -f3 < <(getent passwd nixuser))
groupadd --prefix $(pwd) --gid "$groupid" nixuser
emptypassword='$6$1ero.LwbisiU.h3D$GGmnmECbPotJoPQ5eoSTD6tTjKnSWZcjHoVTkxFLZP17W9hRi/XkmCiAMOfWruUwy8gMjINrBMNODc7cYEo4K.'
useradd --prefix $(pwd) -p "$emptypassword" -m -d /tmp -u "$userid" -g "$groupid" -G nixuser nixuser
cat <<NIX_CONFIG > etc/nix/nix.conf
accept-flake-config = true
experimental-features = nix-command flakes
NIX_CONFIG
cat <<NSSWITCH > etc/nsswitch.conf
passwd: files mymachines systemd
group: files mymachines systemd
shadow: files
hosts: files mymachines dns myhostname
networks: files
ethers: files
services: files
protocols: files
rpc: files
NSSWITCH
# list the content as it will be imported into the container
tar -cv . | tar -tvf -
tar -cv . | podman import - gitea-runner-nix
''; '';
unitConfig.ConditionPathExists = [ "!/var/lib/gitea-registration/${name}" ];
serviceConfig = { serviceConfig = {
RuntimeDirectory = "gitea-runner-nix-image"; User = "gitea";
WorkingDirectory = "/run/gitea-runner-nix-image"; Group = "gitea";
StateDirectory = "gitea-registration";
Type = "oneshot"; Type = "oneshot";
RemainAfterExit = true; RemainAfterExit = true;
}; };
}; });
users.users.nixuser = {
group = "nixuser";
description = "Used for running nix ci jobs";
home = "/var/empty";
isSystemUser = true;
};
users.groups.nixuser = { };
}
{
systemd.services =
lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}-token") numInstances)
(name: {
wantedBy = [ "multi-user.target" ];
after = [ "gitea.service" ];
environment = {
GITEA_CUSTOM = "/var/lib/gitea/custom";
GITEA_WORK_DIR = "/var/lib/gitea";
};
script = ''
set -euo pipefail
token=$(${lib.getExe self.packages.${pkgs.hostPlatform.system}.gitea} actions generate-runner-token)
echo "TOKEN=$token" > /var/lib/gitea-registration/${name}
'';
unitConfig.ConditionPathExists = [ "!/var/lib/gitea-registration/${name}" ];
serviceConfig = {
User = "gitea";
Group = "gitea";
StateDirectory = "gitea-registration";
Type = "oneshot";
RemainAfterExit = true;
};
});
# Format of the token file: # Format of the token file:
virtualisation = { virtualisation = {
@ -137,119 +111,106 @@ lib.mkMerge [
virtualisation.containers.containersConf.settings = { virtualisation.containers.containersConf.settings = {
# podman seems to not work with systemd-resolved # podman seems to not work with systemd-resolved
containers.dns_servers = [ containers.dns_servers = [ "8.8.8.8" "8.8.4.4" ];
"8.8.8.8"
"8.8.4.4"
];
}; };
} }
{ {
systemd.services = systemd.services = lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") numInstances) (name: {
lib.genAttrs (builtins.genList (n: "gitea-runner-nix${builtins.toString n}") numInstances) after = [
(name: { "${name}-token.service"
after = [ "gitea-runner-nix-image.service"
"${name}-token.service" ];
"gitea-runner-nix-image.service" requires = [
]; "${name}-token.service"
requires = [ "gitea-runner-nix-image.service"
"${name}-token.service" ];
"gitea-runner-nix-image.service"
];
# TODO: systemd confinment # TODO: systemd confinment
serviceConfig = { serviceConfig = {
# Hardening (may overlap with DynamicUser=) # Hardening (may overlap with DynamicUser=)
# The following options are only for optimizing output of systemd-analyze # The following options are only for optimizing output of systemd-analyze
AmbientCapabilities = ""; AmbientCapabilities = "";
CapabilityBoundingSet = ""; CapabilityBoundingSet = "";
# ProtectClock= adds DeviceAllow=char-rtc r # ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = ""; DeviceAllow = "";
NoNewPrivileges = true; NoNewPrivileges = true;
PrivateDevices = true; PrivateDevices = true;
PrivateMounts = true; PrivateMounts = true;
PrivateTmp = true; PrivateTmp = true;
PrivateUsers = true; PrivateUsers = true;
ProtectClock = true; ProtectClock = true;
ProtectControlGroups = true; ProtectControlGroups = true;
ProtectHome = true; ProtectHome = true;
ProtectHostname = true; ProtectHostname = true;
ProtectKernelLogs = true; ProtectKernelLogs = true;
ProtectKernelModules = true; ProtectKernelModules = true;
ProtectKernelTunables = true; ProtectKernelTunables = true;
ProtectSystem = "strict"; ProtectSystem = "strict";
RemoveIPC = true; RemoveIPC = true;
RestrictNamespaces = true; RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
UMask = "0066"; UMask = "0066";
ProtectProc = "invisible"; ProtectProc = "invisible";
SystemCallFilter = [ SystemCallFilter = [
"~@clock" "~@clock"
"~@cpu-emulation" "~@cpu-emulation"
"~@module" "~@module"
"~@mount" "~@mount"
"~@obsolete" "~@obsolete"
"~@raw-io" "~@raw-io"
"~@reboot" "~@reboot"
"~@swap" "~@swap"
# needed by go? # needed by go?
#"~@resources" #"~@resources"
"~@privileged" "~@privileged"
"~capset" "~capset"
"~setdomainname" "~setdomainname"
"~sethostname" "~sethostname"
]; ];
RestrictAddressFamilies = [ RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
"AF_INET"
"AF_INET6"
"AF_UNIX"
"AF_NETLINK"
];
# Needs network access # Needs network access
PrivateNetwork = false; PrivateNetwork = false;
# Cannot be true due to Node # Cannot be true due to Node
MemoryDenyWriteExecute = false; MemoryDenyWriteExecute = false;
# The more restrictive "pid" option makes `nix` commands in CI emit # The more restrictive "pid" option makes `nix` commands in CI emit
# "GC Warning: Couldn't read /proc/stat" # "GC Warning: Couldn't read /proc/stat"
# You may want to set this to "pid" if not using `nix` commands # You may want to set this to "pid" if not using `nix` commands
ProcSubset = "all"; ProcSubset = "all";
# Coverage programs for compiled code such as `cargo-tarpaulin` disable # Coverage programs for compiled code such as `cargo-tarpaulin` disable
# ASLR (address space layout randomization) which requires the # ASLR (address space layout randomization) which requires the
# `personality` syscall # `personality` syscall
# You may want to set this to `true` if not using coverage tooling on # You may want to set this to `true` if not using coverage tooling on
# compiled code # compiled code
LockPersonality = false; LockPersonality = false;
# Note that this has some interactions with the User setting; so you may # Note that this has some interactions with the User setting; so you may
# want to consult the systemd docs if using both. # want to consult the systemd docs if using both.
DynamicUser = true; DynamicUser = true;
}; };
}); });
services.gitea-actions-runner.instances = services.gitea-actions-runner.instances = lib.genAttrs (builtins.genList (n: "nix${builtins.toString n}") numInstances) (name: {
lib.genAttrs (builtins.genList (n: "nix${builtins.toString n}") numInstances) enable = true;
(name: { name = "nix-runner";
enable = true; # take the git root url from the gitea config
name = "nix-runner"; # only possible if you've also configured your gitea though the same nix config
# take the git root url from the gitea config # otherwise you need to set it manually
# only possible if you've also configured your gitea though the same nix config url = config.services.gitea.settings.server.ROOT_URL;
# otherwise you need to set it manually # use your favourite nix secret manager to get a path for this
url = config.services.gitea.settings.server.ROOT_URL; tokenFile = "/var/lib/gitea-registration/gitea-runner-${name}-token";
# use your favourite nix secret manager to get a path for this labels = [ "nix:docker://gitea-runner-nix" ];
tokenFile = "/var/lib/gitea-registration/gitea-runner-${name}-token"; settings = {
labels = [ "nix:docker://gitea-runner-nix" ]; container.options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm";
settings = { # the default network that also respects our dns server settings
container.options = "-e NIX_BUILD_SHELL=/bin/bash -e PAGER=cat -e PATH=/bin -e SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt --device /dev/kvm -v /nix:/nix -v ${storeDeps}/bin:/bin -v ${storeDeps}/etc/ssl:/etc/ssl --user nixuser --device=/dev/kvm"; container.network = "host";
# the default network that also respects our dns server settings container.valid_volumes = [
container.network = "host"; "/nix"
container.valid_volumes = [ "${storeDeps}/bin"
"/nix" "${storeDeps}/etc/ssl"
"${storeDeps}/bin" ];
"${storeDeps}/etc/ssl" };
]; });
}; }]
});
}
]

View File

@ -1,29 +1,18 @@
{ { config, pkgs, lib, publog, self, ... }:
pkgs,
lib,
self,
config,
...
}:
let let
# make the logs for this host "public" so that they show up in e.g. metrics # make the logs for this host "public" so that they show up in e.g. metrics
publog = publog = vhost: lib.attrsets.unionOfDisjoint vhost {
vhost: extraConfig = (vhost.extraConfig or "") + ''
lib.attrsets.unionOfDisjoint vhost { access_log /var/log/nginx/public.log vcombined;
extraConfig = '';
(vhost.extraConfig or "") };
+ ''
access_log /var/log/nginx/public.log vcombined;
'';
};
in in
{ {
imports = [ imports = [
./postgresql.nix ./postgresql.nix
./actions-runner.nix ./actions-runner.nix
./installer.nix
]; ];
services.gitea = { services.gitea = {
@ -37,17 +26,11 @@ in
package = self.packages.${pkgs.hostPlatform.system}.gitea; package = self.packages.${pkgs.hostPlatform.system}.gitea;
settings.actions.ENABLED = true; settings.actions.ENABLED = true;
mailerPasswordFile = config.clan.core.facts.services.gitea-mail.secret.gitea-password.path;
settings.mailer = { settings.mailer = {
ENABLED = true; ENABLED = true;
FROM = "gitea@clan.lol"; FROM = "gitea@clan.lol";
USER = "gitea@clan.lol"; HOST = "localhost:25";
SMTP_ADDR = "mail.clan.lol";
SMTP_PORT = "587";
}; };
settings.log.LEVEL = "Error"; settings.log.LEVEL = "Error";
settings.service.DISABLE_REGISTRATION = false; settings.service.DISABLE_REGISTRATION = false;
settings.metrics.ENABLED = true; settings.metrics.ENABLED = true;
@ -59,17 +42,16 @@ in
DOMAIN = "git.clan.lol"; DOMAIN = "git.clan.lol";
LANDING_PAGE = "explore"; LANDING_PAGE = "explore";
}; };
settings.session.PROVIDER = "db";
settings.session.COOKIE_SECURE = true; settings.session.COOKIE_SECURE = true;
}; };
sops.secrets.web01-gitea-password.owner = config.systemd.services.gitea.serviceConfig.User;
services.nginx.virtualHosts."git.clan.lol" = publog { services.nginx.virtualHosts."git.clan.lol" = publog {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
# The add_header directive is used to set the Content-Security-Policy header to allow embedding the Gitea instance in an iframe on the pad.lassul.us instance.
locations."/".extraConfig = '' locations."/".extraConfig = ''
proxy_pass http://localhost:3002; proxy_pass http://localhost:3002;
add_header Content-Security-Policy "frame-ancestors 'self' https://pad.lassul.us";
''; '';
}; };
} }

View File

@ -1,13 +0,0 @@
{
# http forward from https://clan.lol/sh to https://git.clan.lol/clan/clan-core/raw/branch/main/pkgs/gui-installer/gui-installer.sh
services.nginx.virtualHosts."clan.lol" = {
forceSSL = true;
enableACME = true;
locations."/install.sh".extraConfig = ''
proxy_pass http://localhost:3002/clan/clan-core/raw/branch/main/pkgs/gui-installer/gui-installer.sh;
'';
locations."/install-dev.sh".extraConfig = ''
proxy_pass http://localhost:3002/clan/clan-core/raw/branch/install-dev/pkgs/gui-installer/gui-installer.sh;
'';
};
}

View File

@ -1,5 +1,4 @@
{ pkgs, ... }: { pkgs, ... }: {
{
services.postgresql.enable = true; services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_14; services.postgresql.package = pkgs.postgresql_14;
services.postgresql.settings = { services.postgresql.settings = {

View File

@ -1,4 +1,4 @@
{ pkgs, ... }: { stdenv, lib, pkgs, ... }:
let let
domain = "metrics.clan.lol"; domain = "metrics.clan.lol";
@ -38,13 +38,14 @@ in
"d ${pub_goaccess} 0755 goaccess nginx -" "d ${pub_goaccess} 0755 goaccess nginx -"
]; ];
# --browsers-file=/etc/goaccess/browsers.list # --browsers-file=/etc/goaccess/browsers.list
# https://raw.githubusercontent.com/allinurl/goaccess/master/config/browsers.list # https://raw.githubusercontent.com/allinurl/goaccess/master/config/browsers.list
systemd.services.goaccess = { systemd.services.goaccess = {
description = "GoAccess server monitoring"; description = "GoAccess server monitoring";
preStart = '' preStart = ''
rm -f ${pub_goaccess}/index.html rm -f ${pub_goaccess}/index.html
''; '';
serviceConfig = { serviceConfig = {
User = "goaccess"; User = "goaccess";
Group = "nginx"; Group = "nginx";
@ -82,11 +83,7 @@ in
ProtectSystem = "strict"; ProtectSystem = "strict";
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @privileged @reboot @resources @setuid @swap @raw-io"; SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @privileged @reboot @resources @setuid @swap @raw-io";
ReadOnlyPaths = "/"; ReadOnlyPaths = "/";
ReadWritePaths = [ ReadWritePaths = [ "/proc/self" "${pub_goaccess}" "${priv_goaccess}" ];
"/proc/self"
"${pub_goaccess}"
"${priv_goaccess}"
];
PrivateDevices = "yes"; PrivateDevices = "yes";
ProtectKernelModules = "yes"; ProtectKernelModules = "yes";
ProtectKernelTunables = "yes"; ProtectKernelTunables = "yes";
@ -95,6 +92,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };
services.nginx.virtualHosts."${domain}" = { services.nginx.virtualHosts."${domain}" = {
addSSL = true; addSSL = true;
enableACME = true; enableACME = true;

View File

@ -1,18 +1,17 @@
{ config, pkgs, ... }: { config, pkgs, ... }: {
{
services.harmonia.enable = true; services.harmonia.enable = true;
# $ nix-store --generate-binary-cache-key cache.yourdomain.tld-1 harmonia.secret harmonia.pub # $ nix-store --generate-binary-cache-key cache.yourdomain.tld-1 harmonia.secret harmonia.pub
services.harmonia.signKeyPath = config.sops.secrets.harmonia-secret.path; services.harmonia.signKeyPath = config.sops.secrets.harmonia-secret.path;
services.nginx = { services.nginx = {
package = pkgs.nginxStable.override { modules = [ pkgs.nginxModules.zstd ]; }; package = pkgs.nginxStable.override {
modules = [ pkgs.nginxModules.zstd ];
};
}; };
# trust our own cache # trust our own cache
nix.settings.trusted-substituters = [ "https://cache.clan.lol" ]; nix.settings.trusted-substituters = [ "https://cache.clan.lol" ];
nix.settings.trusted-public-keys = [ nix.settings.trusted-public-keys = [ "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" ];
"cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28="
];
services.nginx.virtualHosts."cache.clan.lol" = { services.nginx.virtualHosts."cache.clan.lol" = {
forceSSL = true; forceSSL = true;

View File

@ -1,4 +1,4 @@
{ config, ... }: { config, lib, pkgs, self, ... }:
{ {
security.acme.defaults.email = "admins@clan.lol"; security.acme.defaults.email = "admins@clan.lol";
@ -6,11 +6,12 @@
# www user to push website artifacts via ssh # www user to push website artifacts via ssh
users.users.www = { users.users.www = {
openssh.authorizedKeys.keys = config.users.users.root.openssh.authorizedKeys.keys ++ [ openssh.authorizedKeys.keys =
# ssh-homepage-key config.users.users.root.openssh.authorizedKeys.keys
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMxZ3Av30M6Sh6NU1mnCskB16bYtNP8vskc/+ud0AU1C ssh-homepage-key" ++ [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBuYyfSuETSrwqCsWHeeClqjcsFlMEmiJN6Rr8/DwrU0 gitea-ci" # ssh-homepage-key
]; "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMxZ3Av30M6Sh6NU1mnCskB16bYtNP8vskc/+ud0AU1C ssh-homepage-key"
];
isSystemUser = true; isSystemUser = true;
shell = "/run/current-system/sw/bin/bash"; shell = "/run/current-system/sw/bin/bash";
group = "www"; group = "www";
@ -18,7 +19,9 @@
users.groups.www = { }; users.groups.www = { };
# ensure /var/www can be accessed by nginx and www user # ensure /var/www can be accessed by nginx and www user
systemd.tmpfiles.rules = [ "d /var/www 0755 www nginx" ]; systemd.tmpfiles.rules = [
"d /var/www 0755 www nginx"
];
services.nginx = { services.nginx = {
@ -32,45 +35,13 @@
source_charset utf-8; source_charset utf-8;
''; '';
# Make sure to expire the cache after 1 hour
locations."/".extraConfig = '' locations."/".extraConfig = ''
set $cors "false"; add_header Cache-Control "public, max-age=3600";
# Allow cross-origin requests from docs.clan.lol
if ($http_origin = "https://docs.clan.lol") {
set $cors "true";
}
# Allow cross-origin requests from localhost IPs with port 8000
if ($http_origin = "http://localhost:8000") {
set $cors "true";
}
if ($http_origin = "http://127.0.0.1:8000") {
set $cors "true";
}
if ($http_origin = "http://[::1]:8000") {
set $cors "true";
}
if ($cors = "true") {
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With, Content-Type, Accept, Authorization' always;
}
if ($cors = "true") {
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With, Content-Type, Accept, Authorization' always;
}
''; '';
locations."^~ /docs".extraConfig = '' locations."^~ /docs".extraConfig = ''
rewrite ^/docs(.*)$ https://docs.clan.lol permanent; rewrite ^/docs(.*)$ https://docs.clan.lol permanent;
''; '';
locations."^~ /blog".extraConfig = ''
rewrite ^/blog(.*)$ https://docs.clan.lol/blog permanent;
'';
locations."/thaigersprint".return = "307 https://pad.lassul.us/s/clan-thaigersprint"; locations."/thaigersprint".return = "307 https://pad.lassul.us/s/clan-thaigersprint";
}; };
@ -84,9 +55,9 @@
source_charset utf-8; source_charset utf-8;
''; '';
# Make sure to expire the cache after 12 hour # Make sure to expire the cache after 1 hour
locations."/".extraConfig = '' locations."/".extraConfig = ''
add_header Cache-Control "public, max-age=43200"; add_header Cache-Control "public, max-age=3600";
''; '';
}; };

View File

@ -1,10 +1,4 @@
{ { config, self, pkgs, lib, ... }:
config,
self,
pkgs,
lib,
...
}:
let let
configForJob = name: { configForJob = name: {
systemd.timers.${name} = { systemd.timers.${name} = {
@ -52,11 +46,9 @@ let
}; };
in in
{ {
config = lib.mkMerge ( config = lib.mkMerge (map configForJob [
map configForJob [ "job-flake-update-clan-core"
"job-flake-update-clan-core" "job-flake-update-clan-homepage"
"job-flake-update-clan-homepage" "job-flake-update-clan-infra"
"job-flake-update-clan-infra" ]);
]
);
} }

View File

@ -1,30 +1,6 @@
{ self, ... }: { self, ... }:
{ {
imports = [ self.inputs.clan-core.clanModules.matrix-synapse ]; imports = [ self.inputs.clan-core.clanModules.matrix-synapse ];
clan.matrix-synapse.enable = true;
clan.matrix-synapse.domain = "clan.lol"; clan.matrix-synapse.domain = "clan.lol";
clan.matrix-synapse.users.admin = {
admin = true;
};
clan.matrix-synapse.users.monitoring = { };
clan.matrix-synapse.users.clan-bot = { };
# Rate limiting settings
# we need to up this to be able to support matrix bots
services.matrix-synapse.settings = {
rc_login = {
address = {
per_second = 20;
burst_count = 200;
};
account = {
per_second = 20;
burst_count = 200;
};
failed_attempts = {
per_second = 3;
burst_count = 15;
};
};
};
} }

View File

@ -1,41 +1,40 @@
{ } { config, ... }:
#{ config, ... }: let
#let domain = "clan.lol";
# domain = "clan.lol"; in
#in {
#{ services.opendkim.enable = true;
# services.opendkim.enable = true; services.opendkim.domains = domain;
# services.opendkim.domains = domain; services.opendkim.selector = "v1";
# services.opendkim.selector = "v1"; services.opendkim.user = config.services.postfix.user;
# services.opendkim.user = config.services.postfix.user; services.opendkim.group = config.services.postfix.group;
# services.opendkim.group = config.services.postfix.group;
# # postfix configuration for sending emails only
# # postfix configuration for sending emails only services.postfix = {
# services.postfix = { enable = true;
# enable = true; hostname = "mail.${domain}";
# hostname = "mail.${domain}"; inherit domain;
# inherit domain;
# config = {
# config = { smtp_tls_note_starttls_offer = "yes";
# smtp_tls_note_starttls_offer = "yes";
# smtp_dns_support_level = "dnssec";
# smtp_dns_support_level = "dnssec"; smtp_tls_security_level = "dane";
# smtp_tls_security_level = "dane";
# tls_medium_cipherlist = "AES128+EECDH:AES128+EDH";
# tls_medium_cipherlist = "AES128+EECDH:AES128+EDH";
# smtpd_relay_restrictions = "permit_mynetworks permit_sasl_authenticated defer_unauth_destination";
# smtpd_relay_restrictions = "permit_mynetworks permit_sasl_authenticated defer_unauth_destination"; mydestination = "localhost.$mydomain, localhost, $myhostname";
# mydestination = "localhost.$mydomain, localhost, $myhostname"; myorigin = "$mydomain";
# myorigin = "$mydomain";
# milter_default_action = "accept";
# milter_default_action = "accept"; milter_protocol = "6";
# milter_protocol = "6"; smtpd_milters = "unix:/run/opendkim/opendkim.sock";
# smtpd_milters = "unix:/run/opendkim/opendkim.sock"; non_smtpd_milters = "unix:/run/opendkim/opendkim.sock";
# non_smtpd_milters = "unix:/run/opendkim/opendkim.sock";
# inet_interfaces = "loopback-only";
# inet_interfaces = "loopback-only"; inet_protocols = "all";
# inet_protocols = "all"; };
# }; };
# }; }
#}

View File

@ -0,0 +1,83 @@
{ self, lib, ... }:
let
disk = index: {
type = "disk";
device = "/dev/nvme${toString index}n1";
content = {
type = "gpt";
partitions =
# systemd only wants to have one /boot partition
# should we rsync?
(lib.optionalAttrs (index == 0) {
boot = {
type = "EF00";
size = "1G";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
}) // {
root = {
size = "100%";
content = {
type = "luks";
name = "crypted${toString index}";
keyFile = "/tmp/secret.key";
content = {
type = "lvm_pv";
vg = "pool";
};
};
};
};
};
};
in
{
imports = [
self.inputs.disko.nixosModules.disko
];
boot.initrd.kernelModules = [
"xhci_pci"
"ahci"
"sd_mod"
"nvme"
"dm-raid"
"dm-integrity"
];
disko.devices = {
disk = {
nvme0n1 = disk 0;
nvme1n1 = disk 1;
};
lvm_vg = {
pool = {
type = "lvm_vg";
lvs = {
root = {
size = "95%FREE";
lvm_type = "raid1";
extraArgs = [
"--raidintegrity"
"y"
];
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/";
mountOptions = [
"defaults"
];
};
};
};
};
};
};
}

View File

@ -1,3 +1,4 @@
{ self, ... }:
let let
mirrorBoot = idx: { mirrorBoot = idx: {
type = "disk"; type = "disk";
@ -26,6 +27,10 @@ let
}; };
in in
{ {
imports = [
self.inputs.disko.nixosModules.disko
];
networking.hostId = "8425e349"; networking.hostId = "8425e349";
boot.initrd.postDeviceCommands = '' boot.initrd.postDeviceCommands = ''
@ -40,14 +45,8 @@ in
efiSupport = true; efiSupport = true;
efiInstallAsRemovable = true; efiInstallAsRemovable = true;
mirroredBoots = [ mirroredBoots = [
{ { path = "/boot0"; devices = [ "nodev" ]; }
path = "/boot0"; { path = "/boot1"; devices = [ "nodev" ]; }
devices = [ "nodev" ];
}
{
path = "/boot1";
devices = [ "nodev" ];
}
]; ];
}; };

View File

@ -1,19 +1,10 @@
{ { bash
bash, , coreutils
coreutils, , git
git, , tea
tea, , openssh
openssh, , writePureShellScriptBin
writePureShellScriptBin,
}: }:
writePureShellScriptBin "action-create-pr" writePureShellScriptBin "action-create-pr" [ bash coreutils git tea openssh ] ''
[ bash ${./script.sh} "$@"
bash ''
coreutils
git
tea
openssh
]
''
bash ${./script.sh} "$@"
''

View File

@ -1,15 +1,8 @@
{ { bash
bash, , coreutils
coreutils, , tea
tea, , writePureShellScriptBin
writePureShellScriptBin,
}: }:
writePureShellScriptBin "action-ensure-tea-login" writePureShellScriptBin "action-ensure-tea-login" [ bash coreutils tea ] ''
[ bash ${./script.sh}
bash ''
coreutils
tea
]
''
bash ${./script.sh}
''

View File

@ -8,5 +8,5 @@ fi
GITEA_TOKEN="${GITEA_TOKEN:-"$(cat "$GITEA_TOKEN_FILE")"}" GITEA_TOKEN="${GITEA_TOKEN:-"$(cat "$GITEA_TOKEN_FILE")"}"
tea login add \ tea login add \
--token "$GITEA_TOKEN" \ --token $GITEA_TOKEN \
--url "$GITEA_URL" --url $GITEA_URL

View File

@ -1,23 +1,20 @@
{ { bash
bash, , coreutils
coreutils, , git
git, , openssh
openssh, , action-ensure-tea-login
action-ensure-tea-login, , action-create-pr
action-create-pr, , action-flake-update
action-flake-update, , writePureShellScriptBin
writePureShellScriptBin,
}: }:
writePureShellScriptBin "action-flake-update-pr-clan" writePureShellScriptBin "action-flake-update-pr-clan" [
[ bash
bash coreutils
coreutils git
git openssh
openssh action-ensure-tea-login
action-ensure-tea-login action-create-pr
action-create-pr action-flake-update
action-flake-update ] ''
] bash ${./script.sh}
'' ''
bash ${./script.sh}
''

View File

@ -5,10 +5,8 @@ set -euo pipefail
export KEEP_VARS="GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GITEA_URL GITEA_USER PR_TITLE REMOTE_BRANCH REPO_DIR${KEEP_VARS:+ $KEEP_VARS}" export KEEP_VARS="GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GITEA_URL GITEA_USER PR_TITLE REMOTE_BRANCH REPO_DIR${KEEP_VARS:+ $KEEP_VARS}"
# configure variables for actions # configure variables for actions
today=$(date --iso-8601) export PR_TITLE="Automatic flake update - $(date --iso-8601=minutes)"
today_minutes=$(date --iso-8601=minutes) export REMOTE_BRANCH="flake-update-$(date --iso-8601)"
export PR_TITLE="Automatic flake update - ${today_minutes}"
export REMOTE_BRANCH="flake-update-${today}"
export REPO_DIR=$TMPDIR/repo export REPO_DIR=$TMPDIR/repo
export GIT_AUTHOR_NAME="Clan Merge Bot" export GIT_AUTHOR_NAME="Clan Merge Bot"
export GIT_AUTHOR_EMAIL="clan-bot@git.clan.lol" export GIT_AUTHOR_EMAIL="clan-bot@git.clan.lol"

View File

@ -1,17 +1,9 @@
{ { bash
bash, , coreutils
coreutils, , git
git, , nix
nix, , writePureShellScriptBin
writePureShellScriptBin,
}: }:
writePureShellScriptBin "action-flake-update" writePureShellScriptBin "action-flake-update" [ bash coreutils git nix ] ''
[ bash ${./script.sh}
bash ''
coreutils
git
nix
]
''
bash ${./script.sh}
''

View File

@ -1,7 +1,7 @@
import argparse import argparse
import json import json
import urllib.error
import urllib.request import urllib.request
import urllib.error
from os import environ from os import environ
from typing import Optional from typing import Optional
@ -38,7 +38,6 @@ def is_ci_green(pr: dict) -> bool:
return False return False
return True return True
def is_org_member(user: str, token: str) -> bool: def is_org_member(user: str, token: str) -> bool:
url = "https://git.clan.lol/api/v1/orgs/clan/members/" + user + f"?token={token}" url = "https://git.clan.lol/api/v1/orgs/clan/members/" + user + f"?token={token}"
try: try:
@ -51,6 +50,7 @@ def is_org_member(user: str, token: str) -> bool:
raise raise
def merge_allowed(pr: dict, bot_name: str, token: str) -> bool: def merge_allowed(pr: dict, bot_name: str, token: str) -> bool:
assignees = pr["assignees"] if pr["assignees"] else [] assignees = pr["assignees"] if pr["assignees"] else []
if ( if (

View File

@ -1,9 +1,9 @@
{ { pkgs ? import <nixpkgs> { }
pkgs ? import <nixpkgs> { }, , lib ? pkgs.lib
lib ? pkgs.lib, , python3 ? pkgs.python3
python3 ? pkgs.python3, , ruff ? pkgs.ruff
ruff ? pkgs.ruff, , runCommand ? pkgs.runCommand
runCommand ? pkgs.runCommand, ,
}: }:
let let
pyproject = builtins.fromTOML (builtins.readFile ./pyproject.toml); pyproject = builtins.fromTOML (builtins.readFile ./pyproject.toml);
@ -32,11 +32,13 @@ let
package = python3.pkgs.buildPythonPackage { package = python3.pkgs.buildPythonPackage {
inherit name src; inherit name src;
format = "pyproject"; format = "pyproject";
nativeBuildInputs = [ python3.pkgs.setuptools ]; nativeBuildInputs = [
propagatedBuildInputs = dependencies ++ [ ]; python3.pkgs.setuptools
passthru.tests = { ];
inherit check; propagatedBuildInputs =
}; dependencies
++ [ ];
passthru.tests = { inherit check; };
passthru.devDependencies = devDependencies; passthru.devDependencies = devDependencies;
}; };

View File

@ -1,6 +1,5 @@
{ {
perSystem = perSystem = { pkgs, ... }:
{ pkgs, ... }:
let let
package = pkgs.callPackage ./default.nix { inherit pkgs; }; package = pkgs.callPackage ./default.nix { inherit pkgs; };
in in

View File

@ -1,11 +1,16 @@
{ { pkgs ? import <nixpkgs> { } }:
pkgs ? import <nixpkgs> { },
}:
let let
inherit (pkgs) lib python3; inherit (pkgs) lib python3;
package = import ./default.nix { inherit lib pkgs python3; }; package = import ./default.nix {
inherit lib pkgs python3;
};
pythonWithDeps = python3.withPackages ( pythonWithDeps = python3.withPackages (
ps: package.propagatedBuildInputs ++ package.devDependencies ++ [ ps.pip ] ps:
package.propagatedBuildInputs
++ package.devDependencies
++ [
ps.pip
]
); );
checkScript = pkgs.writeScriptBin "check" '' checkScript = pkgs.writeScriptBin "check" ''
nix build -f . tests -L "$@" nix build -f . tests -L "$@"

View File

@ -112,6 +112,4 @@ def test_list_prs_to_merge(monkeypatch: pytest.MonkeyPatch) -> None:
assignees=[dict(login=bot_name)], assignees=[dict(login=bot_name)],
), ),
] ]
assert clan_merge.list_prs_to_merge(prs, bot_name=bot_name, gitea_token="test") == [ assert clan_merge.list_prs_to_merge(prs, bot_name=bot_name, gitea_token="test") == [prs[0]]
prs[0]
]

View File

@ -1,40 +1,33 @@
{ {
imports = [ imports = [
./clan-merge/flake-module.nix ./clan-merge/flake-module.nix
./matrix-bot/flake-module.nix
]; ];
perSystem = perSystem = { pkgs, config, ... }: {
{ pkgs, config, ... }: packages =
{ let
packages = writers = pkgs.callPackage ./writers.nix { };
let in
writers = pkgs.callPackage ./writers.nix { }; {
in inherit (pkgs.callPackage ./renovate { }) renovate;
{ gitea = pkgs.callPackage ./gitea { };
gitea = pkgs.callPackage ./gitea { };
action-create-pr = pkgs.callPackage ./action-create-pr { action-create-pr = pkgs.callPackage ./action-create-pr {
inherit (writers) writePureShellScriptBin; inherit (writers) writePureShellScriptBin;
};
action-ensure-tea-login = pkgs.callPackage ./action-ensure-tea-login {
inherit (writers) writePureShellScriptBin;
};
action-flake-update = pkgs.callPackage ./action-flake-update {
inherit (writers) writePureShellScriptBin;
};
action-flake-update-pr-clan = pkgs.callPackage ./action-flake-update-pr-clan {
inherit (writers) writePureShellScriptBin;
inherit (config.packages) action-ensure-tea-login action-create-pr action-flake-update;
};
inherit
(pkgs.callPackages ./job-flake-updates {
inherit (writers) writePureShellScriptBin;
inherit (config.packages) action-flake-update-pr-clan;
})
job-flake-update-clan-core
job-flake-update-clan-homepage
job-flake-update-clan-infra
;
}; };
}; action-ensure-tea-login = pkgs.callPackage ./action-ensure-tea-login {
inherit (writers) writePureShellScriptBin;
};
action-flake-update = pkgs.callPackage ./action-flake-update {
inherit (writers) writePureShellScriptBin;
};
action-flake-update-pr-clan = pkgs.callPackage ./action-flake-update-pr-clan {
inherit (writers) writePureShellScriptBin;
inherit (config.packages) action-ensure-tea-login action-create-pr action-flake-update;
};
inherit (pkgs.callPackages ./job-flake-updates {
inherit (writers) writePureShellScriptBin;
inherit (config.packages) action-flake-update-pr-clan;
}) job-flake-update-clan-core job-flake-update-clan-homepage job-flake-update-clan-infra;
};
};
} }

View File

@ -1,120 +0,0 @@
From dd2ccf4ff923757b81088e27e362e3fdb222c9d3 Mon Sep 17 00:00:00 2001
From: Jade Lovelace <software@lfcode.ca>
Date: Tue, 28 May 2024 16:36:25 +0200
Subject: [PATCH] Add an immutable tarball link to archive download headers for
Nix
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This allows `nix flake metadata` and nix in general to lock a *branch*
tarball link in a manner that causes it to fetch the correct commit even
if the branch is updated with a newer version.
For further context, Nix flakes are a feature that, among other things,
allows for "inputs" that are "github:someuser/somerepo",
"https://some-tarball-service/some-tarball.tar.gz",
"sourcehut:~meow/nya" or similar. This feature allows our users to fetch
tarballs of git-based inputs to their builds rather than using git to
fetch them, saving significant download time.
There is presently no gitea or forgejo specific fetcher in Nix, and we
don't particularly wish to have one. Ideally (as a developer on a Nix
implementation myself) we could just use the generic tarball fetcher and
not add specific forgejo support, but to do so, we need additional
metadata to know which commit a given *branch* tarball represents, which
is the purpose of the Link header added here.
The result of this patch is that a Nix user can specify `inputs.something.url =
"https://forgejo-host/some/project/archive/main.tar.gz"` in flake.nix
and get a link to some concrete tarball for the actual commit in the
lock file, then when they run `nix flake update` in the future, they
will get the latest commit in that branch.
Example of it working locally:
» nix flake metadata --refresh 'http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix'
Resolved URL: http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix
Locked URL: http://localhost:3000/api/v1/repos/jade/cats/archive/804ede182b6b66469b23ea4d21eece52766b7a06.tar.gz?dir=configs
/nix&narHash=sha256-yP7KkDVfuixZzs0fsqhSETXFC0y8m6nmPLw2GrAMxKQ%3D
Description: Computers with the nixos
Path: /nix/store/s856c6yqghyan4v0zy6jj19ksv0q22nx-source
Revision: 804ede182b6b66469b23ea4d21eece52766b7a06
Last modified: 2024-05-02 00:48:32
For details on the header value, see:
https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
Signed-off-by: Jörg Thalheim <joerg@thalheim.io>
---
routers/api/v1/repo/file.go | 6 ++++++
routers/web/repo/repo.go | 6 ++++++
tests/integration/api_repo_archive_test.go | 11 +++++++++++
3 files changed, 23 insertions(+)
diff --git a/routers/api/v1/repo/file.go b/routers/api/v1/repo/file.go
index 156033f58a..b7ad63af08 100644
--- a/routers/api/v1/repo/file.go
+++ b/routers/api/v1/repo/file.go
@@ -319,6 +319,12 @@ func archiveDownload(ctx *context.APIContext) {
func download(ctx *context.APIContext, archiveName string, archiver *repo_model.RepoArchiver) {
downloadName := ctx.Repo.Repository.Name + "-" + archiveName
+ // Add nix format link header so tarballs lock correctly:
+ // https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
+ ctx.Resp.Header().Add("Link", fmt.Sprintf("<%s/archive/%s.tar.gz?rev=%s>; rel=\"immutable\"",
+ ctx.Repo.Repository.APIURL(),
+ archiver.CommitID, archiver.CommitID))
+
rPath := archiver.RelativePath()
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
// If we have a signed url (S3, object storage), redirect to this directly.
diff --git a/routers/web/repo/repo.go b/routers/web/repo/repo.go
index 71c582b5f9..bb6349658f 100644
--- a/routers/web/repo/repo.go
+++ b/routers/web/repo/repo.go
@@ -484,6 +484,12 @@ func Download(ctx *context.Context) {
func download(ctx *context.Context, archiveName string, archiver *repo_model.RepoArchiver) {
downloadName := ctx.Repo.Repository.Name + "-" + archiveName
+ // Add nix format link header so tarballs lock correctly:
+ // https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
+ ctx.Resp.Header().Add("Link", fmt.Sprintf("<%s/archive/%s.tar.gz?rev=%s>; rel=\"immutable\"",
+ ctx.Repo.Repository.APIURL(),
+ archiver.CommitID, archiver.CommitID))
+
rPath := archiver.RelativePath()
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
// If we have a signed url (S3, object storage), redirect to this directly.
diff --git a/tests/integration/api_repo_archive_test.go b/tests/integration/api_repo_archive_test.go
index 57d3abfe84..340ff03961 100644
--- a/tests/integration/api_repo_archive_test.go
+++ b/tests/integration/api_repo_archive_test.go
@@ -8,6 +8,7 @@
"io"
"net/http"
"net/url"
+ "regexp"
"testing"
auth_model "code.gitea.io/gitea/models/auth"
@@ -39,6 +40,16 @@ func TestAPIDownloadArchive(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, bs, 266)
+ // Must return a link to a commit ID as the "immutable" archive link
+ linkHeaderRe := regexp.MustCompile(`<(?P<url>https?://.*/api/v1/repos/user2/repo1/archive/[a-f0-9]+\.tar\.gz.*)>; rel="immutable"`)
+ m := linkHeaderRe.FindStringSubmatch(resp.Header().Get("Link"))
+ assert.NotEmpty(t, m[1])
+ resp = MakeRequest(t, NewRequest(t, "GET", m[1]).AddTokenAuth(token), http.StatusOK)
+ bs2, err := io.ReadAll(resp.Body)
+ assert.NoError(t, err)
+ // The locked URL should give the same bytes as the non-locked one
+ assert.EqualValues(t, bs, bs2)
+
link, _ = url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master.bundle", user2.Name, repo.Name))
resp = MakeRequest(t, NewRequest(t, "GET", link.String()).AddTokenAuth(token), http.StatusOK)
bs, err = io.ReadAll(resp.Body)
--
2.44.1

View File

@ -21,7 +21,7 @@ index 007e790b8..a8f3ba7dc 100644
ctx.Data["PageIsSignUp"] = true ctx.Data["PageIsSignUp"] = true
+ if !strings.Contains(strings.ToLower(form.Notabot), "clan") { + if strings.ToLower(form.Notabot) != "clan" {
+ ctx.Error(http.StatusForbidden) + ctx.Error(http.StatusForbidden)
+ return + return
+ } + }

View File

@ -3,6 +3,5 @@
gitea.overrideAttrs (old: { gitea.overrideAttrs (old: {
patches = old.patches ++ [ patches = old.patches ++ [
./0001-add-bot-check.patch ./0001-add-bot-check.patch
./0001-Add-an-immutable-tarball-link-to-archive-download-he.patch
]; ];
}) })

View File

@ -1,13 +1,13 @@
{ action-flake-update-pr-clan, writePureShellScriptBin }: { action-flake-update-pr-clan
, writePureShellScriptBin
}:
let let
job-flake-update = job-flake-update = repo: writePureShellScriptBin "job-flake-update-${repo}" [ action-flake-update-pr-clan ] ''
repo: export REPO="gitea@git.clan.lol:clan/${repo}.git"
writePureShellScriptBin "job-flake-update-${repo}" [ action-flake-update-pr-clan ] '' export KEEP_VARS="REPO''${KEEP_VARS:+ $KEEP_VARS}"
export REPO="gitea@git.clan.lol:clan/${repo}.git"
export KEEP_VARS="REPO''${KEEP_VARS:+ $KEEP_VARS}"
action-flake-update-pr-clan action-flake-update-pr-clan
''; '';
in in
{ {
job-flake-update-clan-core = job-flake-update "clan-core"; job-flake-update-clan-core = job-flake-update "clan-core";

View File

@ -1,6 +0,0 @@
source_up
watch_file flake-module.nix shell.nix default.nix
# Because we depend on nixpkgs sources, uploading to builders takes a long time
use flake .#matrix-bot --builders ''

View File

@ -1,3 +0,0 @@
*.json
**/data
**/__pycache__

View File

@ -1,13 +0,0 @@
#!/usr/bin/env python3
import os
import sys
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
)
from matrix_bot import main # NOQA
if __name__ == "__main__":
main()

View File

@ -1,42 +0,0 @@
{
python3,
setuptools,
matrix-nio,
aiofiles,
aiohttp,
markdown2,
git,
...
}:
let
pythonDependencies = [
matrix-nio
aiofiles
aiohttp
markdown2
];
runtimeDependencies = [ git ];
testDependencies = pythonDependencies ++ runtimeDependencies ++ [ ];
in
python3.pkgs.buildPythonApplication {
name = "matrix-bot";
src = ./.;
format = "pyproject";
nativeBuildInputs = [ setuptools ];
propagatedBuildInputs = pythonDependencies ++ runtimeDependencies;
passthru.testDependencies = testDependencies;
# Clean up after the package to avoid leaking python packages into a devshell
postFixup = ''
rm $out/nix-support/propagated-build-inputs
'';
meta.mainProgram = "matrix-bot";
}

View File

@ -1,14 +0,0 @@
{ ... }:
{
perSystem =
{ self', pkgs, ... }:
{
devShells.matrix-bot = pkgs.callPackage ./shell.nix { inherit (self'.packages) matrix-bot; };
packages = {
matrix-bot = pkgs.python3.pkgs.callPackage ./default.nix { };
};
checks = { };
};
}

View File

@ -1,169 +0,0 @@
import argparse
import asyncio
import logging
import os
import sys
from os import environ
from pathlib import Path
from matrix_bot.custom_logger import setup_logging
from matrix_bot.gitea import GiteaData
from matrix_bot.main import bot_main
from matrix_bot.matrix import MatrixData
log = logging.getLogger(__name__)
curr_dir = Path(__file__).parent
data_dir = Path(os.getcwd()) / "data"
def create_parser(prog: str | None = None) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog=prog,
description="A gitea bot for matrix",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--debug",
help="Enable debug logging",
action="store_true",
default=False,
)
parser.add_argument(
"--server",
help="The matrix server to connect to",
default="https://matrix.clan.lol",
)
parser.add_argument(
"--user",
help="The matrix user to connect as",
default="@clan-bot:clan.lol",
)
parser.add_argument(
"--avatar",
help="The path to the image to use as the avatar",
default=curr_dir / "avatar.png",
)
parser.add_argument(
"--repo-owner",
help="The owner of gitea the repository",
default="clan",
)
parser.add_argument(
"--repo-name",
help="The name of the repository",
default="clan-core",
)
parser.add_argument(
"--changelog-room",
help="The matrix room to join for the changelog bot",
default="#bot-test:gchq.icu",
)
parser.add_argument(
"--review-room",
help="The matrix room to join for the review bot",
default="#bot-test:gchq.icu",
)
parser.add_argument(
"--changelog-frequency",
help="The frequency to check for changelog updates in days",
default=7,
type=int,
)
def valid_weekday(value: str) -> str:
days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
if value not in days:
raise argparse.ArgumentTypeError(
f"{value} is not a valid weekday. Choose from {', '.join(days)}"
)
return value
parser.add_argument(
"--publish-day",
help="The day of the week to publish the changelog. Ignored if changelog-frequency is less than 7 days.",
default="Wednesday",
type=valid_weekday,
)
parser.add_argument(
"--gitea-url",
help="The gitea url to connect to",
default="https://git.clan.lol",
)
parser.add_argument(
"--data-dir",
help="The directory to store data",
default=data_dir,
type=Path,
)
return parser
def matrix_password() -> str:
matrix_password = environ.get("MATRIX_PASSWORD")
if matrix_password is not None:
return matrix_password
matrix_password_file = environ.get("MATRIX_PASSWORD_FILE", default=None)
if matrix_password_file is None:
raise Exception("MATRIX_PASSWORD_FILE environment variable is not set")
with open(matrix_password_file) as f:
return f.read().strip()
def main() -> None:
parser = create_parser()
args = parser.parse_args()
if args.debug:
setup_logging(logging.DEBUG, root_log_name=__name__.split(".")[0])
log.debug("Debug log activated")
else:
setup_logging(logging.INFO, root_log_name=__name__.split(".")[0])
matrix = MatrixData(
server=args.server,
user=args.user,
avatar=args.avatar,
changelog_room=args.changelog_room,
changelog_frequency=args.changelog_frequency,
publish_day=args.publish_day,
review_room=args.review_room,
password=matrix_password(),
)
gitea = GiteaData(
url=args.gitea_url,
owner=args.repo_owner,
repo=args.repo_name,
access_token=os.getenv("GITEA_ACCESS_TOKEN"),
)
args.data_dir.mkdir(parents=True, exist_ok=True)
try:
asyncio.run(bot_main(matrix, gitea, args.data_dir))
except KeyboardInterrupt:
print("User Interrupt", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@ -1,4 +0,0 @@
from . import main
if __name__ == "__main__":
main()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 105 KiB

View File

@ -1,214 +0,0 @@
import asyncio
import datetime
import json
import logging
import shlex
import subprocess
from pathlib import Path
import aiohttp
from nio import (
AsyncClient,
JoinResponse,
)
from matrix_bot.gitea import (
GiteaData,
)
from .locked_open import read_locked_file, write_locked_file
from .matrix import MatrixData, send_message
from .openai import create_jsonl_data, upload_and_process_file
log = logging.getLogger(__name__)
def last_ndays_to_today(ndays: int) -> (str, str):
# Get today's date
today = datetime.datetime.now()
# Calculate the date one week ago
last_week = today - datetime.timedelta(days=ndays)
# Format both dates to "YYYY-MM-DD"
todate = today.strftime("%Y-%m-%d")
fromdate = last_week.strftime("%Y-%m-%d")
return (fromdate, todate)
def write_file_with_date_prefix(
content: str, directory: Path, *, ndays: int, suffix: str
) -> Path:
"""
Write content to a file with the current date as filename prefix.
:param content: The content to write to the file.
:param directory: The directory where the file will be saved.
:return: The path to the created file.
"""
# Ensure the directory exists
directory.mkdir(parents=True, exist_ok=True)
# Get the current date
fromdate, todate = last_ndays_to_today(ndays)
# Create the filename
filename = f"{fromdate}__{todate}_{suffix}.txt"
file_path = directory / filename
# Write the content to the file
with open(file_path, "w") as file:
file.write(content)
return file_path
async def git_pull(repo_path: Path) -> None:
cmd = ["git", "pull"]
log.debug(f"Running command: {shlex.join(cmd)}")
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=str(repo_path),
)
await process.wait()
async def git_log(repo_path: str, ndays: int) -> str:
cmd = [
"git",
"log",
f"--since={ndays} days ago",
"--pretty=format:%h - %an, %ar : %s",
"--stat",
"--patch",
]
log.debug(f"Running command: {shlex.join(cmd)}")
process = await asyncio.create_subprocess_exec(
*cmd,
cwd=repo_path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
raise Exception(
f"Command '{' '.join(cmd)}' failed with exit code {process.returncode}"
)
return stdout.decode()
async def changelog_bot(
client: AsyncClient,
http: aiohttp.ClientSession,
matrix: MatrixData,
gitea: GiteaData,
data_dir: Path,
) -> None:
last_run_path = data_dir / "last_changelog_run.json"
last_run = read_locked_file(last_run_path)
if last_run == {}:
fromdate, todate = last_ndays_to_today(matrix.changelog_frequency)
last_run = {
"fromdate": fromdate,
"todate": todate,
"ndays": matrix.changelog_frequency,
}
log.debug(f"First run. Setting last_run to {last_run}")
today = datetime.datetime.now()
today_weekday = today.strftime("%A")
if today_weekday != matrix.publish_day:
log.debug(f"Changelog not due yet. Due on {matrix.publish_day}")
return
else:
last_date = datetime.datetime.strptime(last_run["todate"], "%Y-%m-%d")
today = datetime.datetime.now()
today_weekday = today.strftime("%A")
delta = datetime.timedelta(days=matrix.changelog_frequency)
if today - last_date <= delta:
log.debug(f"Changelog not due yet. Due in {delta.days} days")
return
elif today_weekday != matrix.publish_day:
log.debug(f"Changelog not due yet. Due on {matrix.publish_day}")
return
# If you made a new room and haven't joined as that user, you can use
room: JoinResponse = await client.join(matrix.changelog_room)
if not room.transport_response.ok:
log.error("This can happen if the room doesn't exist or the bot isn't invited")
raise Exception(f"Failed to join room {room}")
repo_path = data_dir / gitea.repo
if not repo_path.exists():
cmd = [
"git",
"clone",
f"{gitea.url}/{gitea.owner}/{gitea.repo}.git",
gitea.repo,
]
subprocess.run(cmd, cwd=data_dir, check=True)
# git pull
await git_pull(repo_path)
# git log
diff = await git_log(repo_path, matrix.changelog_frequency)
fromdate, todate = last_ndays_to_today(matrix.changelog_frequency)
log.info(f"Generating changelog from {fromdate} to {todate}")
system_prompt = f"""
Create a concise changelog for the {matrix.changelog_frequency}.
Follow these guidelines:
- The header should include the date range from {fromdate} to {todate}
- Use present tense
- Keep the summary brief
- Follow commit message format: "scope: message (#number)"
- Link pull requests as: '{gitea.url}/{gitea.owner}/{gitea.repo}/pulls/<number>'
- Use markdown links to make the pull request number clickable
- Mention each scope and pull request number only once
- Have these headers in the changelog if applicable:
- New Features
- Documentation
- Refactoring
- Bug Fixes
- Other Changes
Changelog:
---
"""
# Step 1: Create the JSONL file
jsonl_data = await create_jsonl_data(user_prompt=diff, system_prompt=system_prompt)
# Step 2: Upload the JSONL file and process it
results = await upload_and_process_file(session=http, jsonl_data=jsonl_data)
# Write the results to a file in the changelogs directory
result_file = write_file_with_date_prefix(
json.dumps(results, indent=4),
data_dir / "changelogs",
ndays=matrix.changelog_frequency,
suffix="result",
)
log.info(f"LLM result written to: {result_file}")
# Join responses together
all_changelogs = []
for result in results:
choices = result["response"]["body"]["choices"]
changelog = "\n".join(choice["message"]["content"] for choice in choices)
all_changelogs.append(changelog)
full_changelog = "\n\n".join(all_changelogs)
# Write the last run to the file
write_locked_file(last_run_path, last_run)
log.info(f"Changelog generated:\n{full_changelog}")
await send_message(client, room, full_changelog)

View File

@ -1,97 +0,0 @@
import inspect
import logging
from collections.abc import Callable
from pathlib import Path
from typing import Any
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
bold_red = "\x1b[31;1m"
green = "\u001b[32m"
blue = "\u001b[34m"
def get_formatter(color: str) -> Callable[[logging.LogRecord, bool], logging.Formatter]:
def myformatter(
record: logging.LogRecord, with_location: bool
) -> logging.Formatter:
reset = "\x1b[0m"
try:
filepath = Path(record.pathname).resolve()
filepath = Path("~", filepath.relative_to(Path.home()))
except Exception:
filepath = Path(record.pathname)
if not with_location:
return logging.Formatter(f"{color}%(levelname)s{reset}: %(message)s")
return logging.Formatter(
f"{color}%(levelname)s{reset}: %(message)s\nLocation: {filepath}:%(lineno)d::%(funcName)s\n"
)
return myformatter
FORMATTER = {
logging.DEBUG: get_formatter(blue),
logging.INFO: get_formatter(green),
logging.WARNING: get_formatter(yellow),
logging.ERROR: get_formatter(red),
logging.CRITICAL: get_formatter(bold_red),
}
class CustomFormatter(logging.Formatter):
def __init__(self, log_locations: bool) -> None:
super().__init__()
self.log_locations = log_locations
def format(self, record: logging.LogRecord) -> str:
return FORMATTER[record.levelno](record, self.log_locations).format(record)
class ThreadFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
return FORMATTER[record.levelno](record, False).format(record)
def get_caller() -> str:
frame = inspect.currentframe()
if frame is None:
return "unknown"
caller_frame = frame.f_back
if caller_frame is None:
return "unknown"
caller_frame = caller_frame.f_back
if caller_frame is None:
return "unknown"
frame_info = inspect.getframeinfo(caller_frame)
try:
filepath = Path(frame_info.filename).resolve()
filepath = Path("~", filepath.relative_to(Path.home()))
except Exception:
filepath = Path(frame_info.filename)
ret = f"{filepath}:{frame_info.lineno}::{frame_info.function}"
return ret
def setup_logging(level: Any, root_log_name: str = __name__.split(".")[0]) -> None:
# Get the root logger and set its level
main_logger = logging.getLogger(root_log_name)
main_logger.setLevel(level)
# Create and add the default handler
default_handler = logging.StreamHandler()
# Create and add your custom handler
default_handler.setLevel(level)
default_handler.setFormatter(CustomFormatter(str(level) == str(logging.DEBUG)))
main_logger.addHandler(default_handler)
# Set logging level for other modules used by this module
logging.getLogger("asyncio").setLevel(logging.INFO)
logging.getLogger("httpx").setLevel(level=logging.WARNING)

View File

@ -1,88 +0,0 @@
import logging
log = logging.getLogger(__name__)
from dataclasses import dataclass
from enum import Enum
import aiohttp
@dataclass
class GiteaData:
url: str
owner: str
repo: str
access_token: str | None = None
def endpoint_url(gitea: GiteaData, endpoint: str) -> str:
return f"{gitea.url}/api/v1/repos/{gitea.owner}/{gitea.repo}/{endpoint}"
async def fetch_repo_labels(
gitea: GiteaData,
session: aiohttp.ClientSession,
) -> list[dict]:
"""
Fetch labels from a Gitea repository.
Returns:
list: List of labels in the repository.
"""
url = endpoint_url(gitea, "labels")
headers = {"Accept": "application/vnd.github.v3+json"}
if gitea.access_token:
headers["Authorization"] = f"token {gitea.access_token}"
async with session.get(url, headers=headers) as response:
if response.status == 200:
labels = await response.json()
return labels
else:
# You may want to handle different statuses differently
raise Exception(
f"Failed to fetch labels: {response.status}, {await response.text()}"
)
class PullState(Enum):
OPEN = "open"
CLOSED = "closed"
ALL = "all"
async def fetch_pull_requests(
gitea: GiteaData,
session: aiohttp.ClientSession,
*,
limit: int,
state: PullState,
label_ids: list[int] = [],
) -> list[dict]:
"""
Fetch pull requests from a Gitea repository.
Returns:
list: List of pull requests.
"""
# You can use the same pattern as fetch_repo_labels
url = endpoint_url(gitea, "pulls")
params = {
"state": state.value,
"sort": "recentupdate",
"limit": limit,
"labels": label_ids,
}
headers = {"accept": "application/json"}
async with session.get(url, params=params, headers=headers) as response:
if response.status == 200:
labels = await response.json()
return labels
else:
# You may want to handle different statuses differently
raise Exception(
f"Failed to fetch labels: {response.status}, {await response.text()}"
)

View File

@ -1,31 +0,0 @@
import fcntl
import json
from collections.abc import Generator
from contextlib import contextmanager
from pathlib import Path
from typing import Any
@contextmanager
def locked_open(filename: str | Path, mode: str = "r") -> Generator:
"""
This is a context manager that provides an advisory write lock on the file specified by `filename` when entering the context, and releases the lock when leaving the context. The lock is acquired using the `fcntl` module's `LOCK_EX` flag, which applies an exclusive write lock to the file.
"""
with open(filename, mode) as fd:
fcntl.flock(fd, fcntl.LOCK_EX)
yield fd
fcntl.flock(fd, fcntl.LOCK_UN)
def write_locked_file(path: Path, data: dict[str, Any]) -> None:
with locked_open(path, "w+") as f:
f.write(json.dumps(data, indent=4))
def read_locked_file(path: Path) -> dict[str, Any]:
if not path.exists():
return {}
with locked_open(path, "r") as f:
content: str = f.read()
parsed: dict[str, Any] = json.loads(content)
return parsed

View File

@ -1,54 +0,0 @@
import asyncio
import logging
from pathlib import Path
import aiohttp
log = logging.getLogger(__name__)
from nio import AsyncClient, ClientConfig, ProfileGetAvatarResponse, RoomMessageText
from .changelog_bot import changelog_bot
from .gitea import GiteaData
from .matrix import MatrixData, set_avatar, upload_image
from .review_bot import message_callback, review_requested_bot
async def bot_main(
matrix: MatrixData,
gitea: GiteaData,
data_dir: Path,
) -> None:
# Setup client configuration to handle encryption
client_config = ClientConfig(
encryption_enabled=False,
)
log.info(f"Connecting to {matrix.server} as {matrix.user}")
client = AsyncClient(matrix.server, matrix.user, config=client_config)
client.add_event_callback(message_callback, RoomMessageText)
result = await client.login(matrix.password)
if not result.transport_response.ok:
log.critical(f"Failed to login: {result}")
exit(1)
log.info(f"Logged in as {result}")
avatar: ProfileGetAvatarResponse = await client.get_avatar()
if not avatar.avatar_url:
mxc_url = await upload_image(client, matrix.avatar)
log.info(f"Uploaded avatar to {mxc_url}")
await set_avatar(client, mxc_url)
else:
log.info(f"Bot already has an avatar {avatar.avatar_url}")
try:
async with aiohttp.ClientSession() as session:
while True:
await changelog_bot(client, session, matrix, gitea, data_dir)
await review_requested_bot(client, session, matrix, gitea, data_dir)
await asyncio.sleep(60 * 5)
except Exception as e:
log.exception(e)
finally:
await client.close()

View File

@ -1,88 +0,0 @@
import logging
from pathlib import Path
log = logging.getLogger(__name__)
from dataclasses import dataclass
from markdown2 import markdown
from nio import (
AsyncClient,
JoinedMembersResponse,
JoinResponse,
ProfileSetAvatarResponse,
RoomMember,
RoomSendResponse,
UploadResponse,
)
async def upload_image(client: AsyncClient, image_path: str) -> str:
with open(image_path, "rb") as image_file:
response: UploadResponse
response, _ = await client.upload(image_file, content_type="image/png")
if not response.transport_response.ok:
raise Exception(f"Failed to upload image {response}")
return response.content_uri # This is the MXC URL
async def set_avatar(client: AsyncClient, mxc_url: str) -> None:
response: ProfileSetAvatarResponse
response = await client.set_avatar(mxc_url)
if not response.transport_response.ok:
raise Exception(f"Failed to set avatar {response}")
async def get_room_members(client: AsyncClient, room: JoinResponse) -> list[RoomMember]:
users: JoinedMembersResponse = await client.joined_members(room.room_id)
if not users.transport_response.ok:
raise Exception(f"Failed to get users {users}")
return users.members
async def send_message(
client: AsyncClient,
room: JoinResponse,
message: str,
user_ids: list[str] | None = None,
) -> None:
"""
Send a message in a Matrix room, optionally mentioning users.
"""
# If user_ids are provided, format the message to mention them
formatted_message = markdown(message)
if user_ids:
mention_list = ", ".join(
[
f"<a href='https://matrix.to/#/{user_id}'>{user_id}</a>"
for user_id in user_ids
]
)
formatted_message = f"{mention_list}: {formatted_message}"
content = {
"msgtype": "m.text" if user_ids else "m.notice",
"format": "org.matrix.custom.html",
"body": message,
"formatted_body": formatted_message,
}
res: RoomSendResponse = await client.room_send(
room_id=room.room_id, message_type="m.room.message", content=content
)
if not res.transport_response.ok:
raise Exception(f"Failed to send message {res}")
@dataclass
class MatrixData:
server: str
user: str
avatar: Path
password: str
changelog_room: str
review_room: str
changelog_frequency: int
publish_day: str

View File

@ -1,129 +0,0 @@
import asyncio
import json
import logging
from os import environ
import aiohttp
log = logging.getLogger(__name__)
# The URL to which the request is sent
url: str = "https://api.openai.com/v1/chat/completions"
def api_key() -> str:
openapi_key = environ.get("OPENAI_API_KEY")
if openapi_key is not None:
return openapi_key
openai_key_file = environ.get("OPENAI_API_KEY_FILE", default=None)
if openai_key_file is None:
raise Exception("OPENAI_API_KEY_FILE environment variable is not set")
with open(openai_key_file) as f:
return f.read().strip()
from typing import Any
async def create_jsonl_data(
*,
user_prompt: str,
system_prompt: str,
model: str = "gpt-4o",
max_tokens: int = 1000,
) -> bytes:
summary_request = {
"custom_id": "request-1",
"method": "POST",
"url": "/v1/chat/completions",
"body": {
"model": model,
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
"max_tokens": max_tokens,
},
}
return json.dumps(summary_request).encode("utf-8")
async def upload_and_process_file(
*, session: aiohttp.ClientSession, jsonl_data: bytes, api_key: str = api_key()
) -> dict[str, Any]:
"""
Upload a JSONL file to OpenAI's Batch API and process it asynchronously.
"""
upload_url = "https://api.openai.com/v1/files"
headers = {
"Authorization": f"Bearer {api_key}",
}
data = aiohttp.FormData()
data.add_field(
"file", jsonl_data, filename="changelog.jsonl", content_type="application/jsonl"
)
data.add_field("purpose", "batch")
async with session.post(upload_url, headers=headers, data=data) as response:
if response.status != 200:
raise Exception(f"File upload failed with status code {response.status}")
upload_response = await response.json()
file_id = upload_response.get("id")
if not file_id:
raise Exception("File ID not returned from upload")
# Step 2: Create a batch using the uploaded file ID
batch_url = "https://api.openai.com/v1/batches"
batch_data = {
"input_file_id": file_id,
"endpoint": "/v1/chat/completions",
"completion_window": "24h",
}
async with session.post(batch_url, headers=headers, json=batch_data) as response:
if response.status != 200:
raise Exception(f"Batch creation failed with status code {response.status}")
batch_response = await response.json()
batch_id = batch_response.get("id")
if not batch_id:
raise Exception("Batch ID not returned from creation")
# Step 3: Check the status of the batch until completion
status_url = f"https://api.openai.com/v1/batches/{batch_id}"
while True:
async with session.get(status_url, headers=headers) as response:
if response.status != 200:
raise Exception(
f"Failed to check batch status with status code {response.status}"
)
status_response = await response.json()
status = status_response.get("status")
if status in ["completed", "failed", "expired"]:
break
await asyncio.sleep(10) # Wait before checking again
if status != "completed":
raise Exception(f"Batch processing failed with status: {status}")
# Step 4: Retrieve the results
output_file_id = status_response.get("output_file_id")
output_url = f"https://api.openai.com/v1/files/{output_file_id}/content"
async with session.get(output_url, headers=headers) as response:
if response.status != 200:
raise Exception(
f"Failed to retrieve batch results with status code {response.status}"
)
# Read content as text
content = await response.text()
# Parse the content as JSONL
results = [json.loads(line) for line in content.splitlines()]
return results

View File

@ -1,90 +0,0 @@
import logging
log = logging.getLogger(__name__)
import datetime
import time
from pathlib import Path
import aiohttp
from nio import (
AsyncClient,
JoinResponse,
MatrixRoom,
RoomMessageText,
)
from matrix_bot.gitea import (
GiteaData,
PullState,
fetch_pull_requests,
)
from .locked_open import read_locked_file, write_locked_file
from .matrix import MatrixData, get_room_members, send_message
async def message_callback(room: MatrixRoom, event: RoomMessageText) -> None:
log.debug(
f"Message received in room {room.display_name}\n"
f"{room.user_name(event.sender)} | {event.body}"
)
async def review_requested_bot(
client: AsyncClient,
http: aiohttp.ClientSession,
matrix: MatrixData,
gitea: GiteaData,
data_dir: Path,
) -> None:
# If you made a new room and haven't joined as that user, you can use
room: JoinResponse = await client.join(matrix.review_room)
if not room.transport_response.ok:
log.error("This can happen if the room doesn't exist or the bot isn't invited")
raise Exception(f"Failed to join room {room}")
# Get the members of the room
users = await get_room_members(client, room)
# Fetch the pull requests
tstart = time.time()
pulls = await fetch_pull_requests(gitea, http, limit=50, state=PullState.ALL)
# Read the last updated pull request
ping_hist_path = data_dir / "last_review_run.json"
ping_hist = read_locked_file(ping_hist_path)
# Check if the pull request is mergeable and needs review
# and if the pull request is newer than the last updated pull request
for pull in pulls:
requested_reviewers = pull["requested_reviewers"]
pid = str(pull["id"])
if requested_reviewers and pull["mergeable"]:
last_time_updated = ping_hist.get(pid, {}).get(
"updated_at", datetime.datetime.min.isoformat()
)
if ping_hist == {} or pull["updated_at"] > last_time_updated:
ping_hist[pid] = pull
else:
continue
# Check if the requested reviewers are in the room
requested_reviewers = [r["login"].lower() for r in requested_reviewers]
ping_users = []
for user in users:
if user.display_name.lower() in requested_reviewers:
ping_users.append(user.user_id)
# Send a message to the room and mention the users
log.info(f"Pull request {pull['title']} needs review")
message = f"Review Requested:\n[{pull['title']}]({pull['html_url']})"
await send_message(client, room, message, user_ids=ping_users)
# Write the new last updated pull request
write_locked_file(ping_hist_path, ping_hist)
# Time taken
tend = time.time()
tdiff = round(tend - tstart)
log.debug(f"Time taken: {tdiff}s")

View File

@ -1,59 +0,0 @@
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "matrix-bot"
description = "matrix bot for release messages from git commits"
dynamic = ["version"]
scripts = { mbot = "matrix_bot:main" }
license = {text = "MIT"}
[project.urls]
Homepage = "https://clan.lol/"
Documentation = "https://docs.clan.lol/"
Repository = "https://git.clan.lol/clan/clan-core"
[tool.setuptools.packages.find]
exclude = ["result"]
[tool.setuptools.package-data]
matrix_bot = ["py.typed"]
[tool.pytest.ini_options]
testpaths = "tests"
faulthandler_timeout = 60
log_level = "DEBUG"
log_format = "%(levelname)s: %(message)s\n %(pathname)s:%(lineno)d::%(funcName)s"
addopts = "--cov . --cov-report term --cov-report html:.reports/html --no-cov-on-fail --durations 5 --color=yes --new-first" # Add --pdb for debugging
norecursedirs = "tests/helpers"
markers = ["impure", "with_core"]
[tool.mypy]
python_version = "3.11"
warn_redundant_casts = true
disallow_untyped_calls = true
disallow_untyped_defs = true
no_implicit_optional = true
[[tool.mypy.overrides]]
module = "argcomplete.*"
ignore_missing_imports = true
[[tool.mypy.overrides]]
module = "ipdb.*"
ignore_missing_imports = true
[[tool.mypy.overrides]]
module = "pytest.*"
ignore_missing_imports = true
[[tool.mypy.overrides]]
module = "setuptools.*"
ignore_missing_imports = true
[tool.ruff]
target-version = "py311"
line-length = 88
lint.select = [ "E", "F", "I", "U", "N", "RUF", "ANN", "A" ]
lint.ignore = ["E501", "E402", "E731", "ANN101", "ANN401", "A003"]

View File

@ -1,30 +0,0 @@
{
matrix-bot,
mkShell,
ruff,
python3,
}:
let
devshellTestDeps =
matrix-bot.passthru.testDependencies
++ (with python3.pkgs; [
rope
setuptools
wheel
ipdb
pip
]);
in
mkShell {
buildInputs = [ ruff ] ++ devshellTestDeps;
PYTHONBREAKPOINT = "ipdb.set_trace";
shellHook = ''
export GIT_ROOT="$(git rev-parse --show-toplevel)"
export PKG_ROOT="$GIT_ROOT/pkgs/matrix-bot"
# Add clan command to PATH
export PATH="$PKG_ROOT/bin":"$PATH"
'';
}

View File

@ -0,0 +1,17 @@
# This file has been generated by node2nix 1.11.1. Do not edit!
{pkgs ? import <nixpkgs> {
inherit system;
}, system ? builtins.currentSystem, nodejs ? pkgs."nodejs_18"}:
let
nodeEnv = import ./node-env.nix {
inherit (pkgs) stdenv lib python2 runCommand writeTextFile writeShellScript;
inherit pkgs nodejs;
libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null;
};
in
import ./node-packages.nix {
inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit;
inherit nodeEnv;
}

View File

@ -0,0 +1,8 @@
{ pkgs, system, nodejs-18_x, makeWrapper }:
let
nodePackages = import ./composition.nix {
inherit pkgs system;
nodejs = nodejs-18_x;
};
in
nodePackages

5
pkgs/renovate/generate.sh Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p nodePackages.node2nix
rm -f node-env.nix
node2nix -18 -i node-packages.json -o node-packages.nix -c composition.nix

689
pkgs/renovate/node-env.nix Normal file
View File

@ -0,0 +1,689 @@
# This file originates from node2nix
{lib, stdenv, nodejs, python2, pkgs, libtool, runCommand, writeTextFile, writeShellScript}:
let
# Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master
utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux;
python = if nodejs ? python then nodejs.python else python2;
# Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise
tarWrapper = runCommand "tarWrapper" {} ''
mkdir -p $out/bin
cat > $out/bin/tar <<EOF
#! ${stdenv.shell} -e
$(type -p tar) "\$@" --warning=no-unknown-keyword --delay-directory-restore
EOF
chmod +x $out/bin/tar
'';
# Function that generates a TGZ file from a NPM project
buildNodeSourceDist =
{ name, version, src, ... }:
stdenv.mkDerivation {
name = "node-tarball-${name}-${version}";
inherit src;
buildInputs = [ nodejs ];
buildPhase = ''
export HOME=$TMPDIR
tgzFile=$(npm pack | tail -n 1) # Hooks to the pack command will add output (https://docs.npmjs.com/misc/scripts)
'';
installPhase = ''
mkdir -p $out/tarballs
mv $tgzFile $out/tarballs
mkdir -p $out/nix-support
echo "file source-dist $out/tarballs/$tgzFile" >> $out/nix-support/hydra-build-products
'';
};
# Common shell logic
installPackage = writeShellScript "install-package" ''
installPackage() {
local packageName=$1 src=$2
local strippedName
local DIR=$PWD
cd $TMPDIR
unpackFile $src
# Make the base dir in which the target dependency resides first
mkdir -p "$(dirname "$DIR/$packageName")"
if [ -f "$src" ]
then
# Figure out what directory has been unpacked
packageDir="$(find . -maxdepth 1 -type d | tail -1)"
# Restore write permissions to make building work
find "$packageDir" -type d -exec chmod u+x {} \;
chmod -R u+w "$packageDir"
# Move the extracted tarball into the output folder
mv "$packageDir" "$DIR/$packageName"
elif [ -d "$src" ]
then
# Get a stripped name (without hash) of the source directory.
# On old nixpkgs it's already set internally.
if [ -z "$strippedName" ]
then
strippedName="$(stripHash $src)"
fi
# Restore write permissions to make building work
chmod -R u+w "$strippedName"
# Move the extracted directory into the output folder
mv "$strippedName" "$DIR/$packageName"
fi
# Change to the package directory to install dependencies
cd "$DIR/$packageName"
}
'';
# Bundle the dependencies of the package
#
# Only include dependencies if they don't exist. They may also be bundled in the package.
includeDependencies = {dependencies}:
lib.optionalString (dependencies != []) (
''
mkdir -p node_modules
cd node_modules
''
+ (lib.concatMapStrings (dependency:
''
if [ ! -e "${dependency.packageName}" ]; then
${composePackage dependency}
fi
''
) dependencies)
+ ''
cd ..
''
);
# Recursively composes the dependencies of a package
composePackage = { name, packageName, src, dependencies ? [], ... }@args:
builtins.addErrorContext "while evaluating node package '${packageName}'" ''
installPackage "${packageName}" "${src}"
${includeDependencies { inherit dependencies; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
'';
pinpointDependencies = {dependencies, production}:
let
pinpointDependenciesFromPackageJSON = writeTextFile {
name = "pinpointDependencies.js";
text = ''
var fs = require('fs');
var path = require('path');
function resolveDependencyVersion(location, name) {
if(location == process.env['NIX_STORE']) {
return null;
} else {
var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json");
if(fs.existsSync(dependencyPackageJSON)) {
var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON));
if(dependencyPackageObj.name == name) {
return dependencyPackageObj.version;
}
} else {
return resolveDependencyVersion(path.resolve(location, ".."), name);
}
}
}
function replaceDependencies(dependencies) {
if(typeof dependencies == "object" && dependencies !== null) {
for(var dependency in dependencies) {
var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency);
if(resolvedVersion === null) {
process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n");
} else {
dependencies[dependency] = resolvedVersion;
}
}
}
}
/* Read the package.json configuration */
var packageObj = JSON.parse(fs.readFileSync('./package.json'));
/* Pinpoint all dependencies */
replaceDependencies(packageObj.dependencies);
if(process.argv[2] == "development") {
replaceDependencies(packageObj.devDependencies);
}
else {
packageObj.devDependencies = {};
}
replaceDependencies(packageObj.optionalDependencies);
replaceDependencies(packageObj.peerDependencies);
/* Write the fixed package.json file */
fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2));
'';
};
in
''
node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"}
${lib.optionalString (dependencies != [])
''
if [ -d node_modules ]
then
cd node_modules
${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies}
cd ..
fi
''}
'';
# Recursively traverses all dependencies of a package and pinpoints all
# dependencies in the package.json file to the versions that are actually
# being used.
pinpointDependenciesOfPackage = { packageName, dependencies ? [], production ? true, ... }@args:
''
if [ -d "${packageName}" ]
then
cd "${packageName}"
${pinpointDependencies { inherit dependencies production; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
fi
'';
# Extract the Node.js source code which is used to compile packages with
# native bindings
nodeSources = runCommand "node-sources" {} ''
tar --no-same-owner --no-same-permissions -xf ${nodejs.src}
mv node-* $out
'';
# Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty)
addIntegrityFieldsScript = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
function augmentDependencies(baseDir, dependencies) {
for(var dependencyName in dependencies) {
var dependency = dependencies[dependencyName];
// Open package.json and augment metadata fields
var packageJSONDir = path.join(baseDir, "node_modules", dependencyName);
var packageJSONPath = path.join(packageJSONDir, "package.json");
if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored
console.log("Adding metadata fields to: "+packageJSONPath);
var packageObj = JSON.parse(fs.readFileSync(packageJSONPath));
if(dependency.integrity) {
packageObj["_integrity"] = dependency.integrity;
} else {
packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads.
}
if(dependency.resolved) {
packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided
} else {
packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories.
}
if(dependency.from !== undefined) { // Adopt from property if one has been provided
packageObj["_from"] = dependency.from;
}
fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2));
}
// Augment transitive dependencies
if(dependency.dependencies !== undefined) {
augmentDependencies(packageJSONDir, dependency.dependencies);
}
}
}
if(fs.existsSync("./package-lock.json")) {
var packageLock = JSON.parse(fs.readFileSync("./package-lock.json"));
if(![1, 2].includes(packageLock.lockfileVersion)) {
process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n");
process.exit(1);
}
if(packageLock.dependencies !== undefined) {
augmentDependencies(".", packageLock.dependencies);
}
}
'';
};
# Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes
reconstructPackageLock = writeTextFile {
name = "reconstructpackagelock.js";
text = ''
var fs = require('fs');
var path = require('path');
var packageObj = JSON.parse(fs.readFileSync("package.json"));
var lockObj = {
name: packageObj.name,
version: packageObj.version,
lockfileVersion: 2,
requires: true,
packages: {
"": {
name: packageObj.name,
version: packageObj.version,
license: packageObj.license,
bin: packageObj.bin,
dependencies: packageObj.dependencies,
engines: packageObj.engines,
optionalDependencies: packageObj.optionalDependencies
}
},
dependencies: {}
};
function augmentPackageJSON(filePath, packages, dependencies) {
var packageJSON = path.join(filePath, "package.json");
if(fs.existsSync(packageJSON)) {
var packageObj = JSON.parse(fs.readFileSync(packageJSON));
packages[filePath] = {
version: packageObj.version,
integrity: "sha1-000000000000000000000000000=",
dependencies: packageObj.dependencies,
engines: packageObj.engines,
optionalDependencies: packageObj.optionalDependencies
};
dependencies[packageObj.name] = {
version: packageObj.version,
integrity: "sha1-000000000000000000000000000=",
dependencies: {}
};
processDependencies(path.join(filePath, "node_modules"), packages, dependencies[packageObj.name].dependencies);
}
}
function processDependencies(dir, packages, dependencies) {
if(fs.existsSync(dir)) {
var files = fs.readdirSync(dir);
files.forEach(function(entry) {
var filePath = path.join(dir, entry);
var stats = fs.statSync(filePath);
if(stats.isDirectory()) {
if(entry.substr(0, 1) == "@") {
// When we encounter a namespace folder, augment all packages belonging to the scope
var pkgFiles = fs.readdirSync(filePath);
pkgFiles.forEach(function(entry) {
if(stats.isDirectory()) {
var pkgFilePath = path.join(filePath, entry);
augmentPackageJSON(pkgFilePath, packages, dependencies);
}
});
} else {
augmentPackageJSON(filePath, packages, dependencies);
}
}
});
}
}
processDependencies("node_modules", lockObj.packages, lockObj.dependencies);
fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2));
'';
};
# Script that links bins defined in package.json to the node_modules bin directory
# NPM does not do this for top-level packages itself anymore as of v7
linkBinsScript = writeTextFile {
name = "linkbins.js";
text = ''
var fs = require('fs');
var path = require('path');
var packageObj = JSON.parse(fs.readFileSync("package.json"));
var nodeModules = Array(packageObj.name.split("/").length).fill("..").join(path.sep);
if(packageObj.bin !== undefined) {
fs.mkdirSync(path.join(nodeModules, ".bin"))
if(typeof packageObj.bin == "object") {
Object.keys(packageObj.bin).forEach(function(exe) {
if(fs.existsSync(packageObj.bin[exe])) {
console.log("linking bin '" + exe + "'");
fs.symlinkSync(
path.join("..", packageObj.name, packageObj.bin[exe]),
path.join(nodeModules, ".bin", exe)
);
}
else {
console.log("skipping non-existent bin '" + exe + "'");
}
})
}
else {
if(fs.existsSync(packageObj.bin)) {
console.log("linking bin '" + packageObj.bin + "'");
fs.symlinkSync(
path.join("..", packageObj.name, packageObj.bin),
path.join(nodeModules, ".bin", packageObj.name.split("/").pop())
);
}
else {
console.log("skipping non-existent bin '" + packageObj.bin + "'");
}
}
}
else if(packageObj.directories !== undefined && packageObj.directories.bin !== undefined) {
fs.mkdirSync(path.join(nodeModules, ".bin"))
fs.readdirSync(packageObj.directories.bin).forEach(function(exe) {
if(fs.existsSync(path.join(packageObj.directories.bin, exe))) {
console.log("linking bin '" + exe + "'");
fs.symlinkSync(
path.join("..", packageObj.name, packageObj.directories.bin, exe),
path.join(nodeModules, ".bin", exe)
);
}
else {
console.log("skipping non-existent bin '" + exe + "'");
}
})
}
'';
};
prepareAndInvokeNPM = {packageName, bypassCache, reconstructLock, npmFlags, production}:
let
forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com";
in
''
# Pinpoint the versions of all dependencies to the ones that are actually being used
echo "pinpointing versions of dependencies..."
source $pinpointDependenciesScriptPath
# Patch the shebangs of the bundled modules to prevent them from
# calling executables outside the Nix store as much as possible
patchShebangs .
# Deploy the Node.js package by running npm install. Since the
# dependencies have been provided already by ourselves, it should not
# attempt to install them again, which is good, because we want to make
# it Nix's responsibility. If it needs to install any dependencies
# anyway (e.g. because the dependency parameters are
# incomplete/incorrect), it fails.
#
# The other responsibilities of NPM are kept -- version checks, build
# steps, postprocessing etc.
export HOME=$TMPDIR
cd "${packageName}"
runHook preRebuild
${lib.optionalString bypassCache ''
${lib.optionalString reconstructLock ''
if [ -f package-lock.json ]
then
echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!"
echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!"
rm package-lock.json
else
echo "No package-lock.json file found, reconstructing..."
fi
node ${reconstructPackageLock}
''}
node ${addIntegrityFieldsScript}
''}
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild
runHook postRebuild
if [ "''${dontNpmInstall-}" != "1" ]
then
# NPM tries to download packages even when they already exist if npm-shrinkwrap is used.
rm -f npm-shrinkwrap.json
npm ${forceOfflineFlag} --nodedir=${nodeSources} --no-bin-links --ignore-scripts ${npmFlags} ${lib.optionalString production "--production"} install
fi
# Link executables defined in package.json
node ${linkBinsScript}
'';
# Builds and composes an NPM package including all its dependencies
buildNodePackage =
{ name
, packageName
, version ? null
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, preRebuild ? ""
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, meta ? {}
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" "meta" ];
in
stdenv.mkDerivation ({
name = "${name}${if version == null then "" else "-${version}"}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit nodejs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall preRebuild unpackPhase buildPhase;
compositionScript = composePackage args;
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "compositionScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
# Create and enter a root node_modules/ folder
mkdir -p $out/lib/node_modules
cd $out/lib/node_modules
# Compose the package and all its dependencies
source $compositionScriptPath
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Create symlink to the deployed executable folder, if applicable
if [ -d "$out/lib/node_modules/.bin" ]
then
ln -s $out/lib/node_modules/.bin $out/bin
# Fixup all executables
ls $out/bin/* | while read i
do
file="$(readlink -f "$i")"
chmod u+rwx "$file"
if isScript "$file"
then
sed -i 's/\r$//' "$file" # convert crlf to lf
fi
done
fi
# Create symlinks to the deployed manual page folders, if applicable
if [ -d "$out/lib/node_modules/${packageName}/man" ]
then
mkdir -p $out/share
for dir in "$out/lib/node_modules/${packageName}/man/"*
do
mkdir -p $out/share/man/$(basename "$dir")
for page in "$dir"/*
do
ln -s $page $out/share/man/$(basename "$dir")
done
done
fi
# Run post install hook, if provided
runHook postInstall
'';
meta = {
# default to Node.js' platforms
platforms = nodejs.meta.platforms;
} // meta;
} // extraArgs);
# Builds a node environment (a node_modules folder and a set of binaries)
buildNodeDependencies =
{ name
, packageName
, version ? null
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ];
in
stdenv.mkDerivation ({
name = "node-dependencies-${name}${if version == null then "" else "-${version}"}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall unpackPhase buildPhase;
includeScript = includeDependencies { inherit dependencies; };
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "includeScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
mkdir -p $out/${packageName}
cd $out/${packageName}
source $includeScriptPath
# Create fake package.json to make the npm commands work properly
cp ${src}/package.json .
chmod 644 package.json
${lib.optionalString bypassCache ''
if [ -f ${src}/package-lock.json ]
then
cp ${src}/package-lock.json .
chmod 644 package-lock.json
fi
''}
# Go to the parent folder to make sure that all packages are pinpointed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Expose the executables that were installed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
mv ${packageName} lib
ln -s $out/lib/node_modules/.bin $out/bin
'';
} // extraArgs);
# Builds a development shell
buildNodeShell =
{ name
, packageName
, version ? null
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
nodeDependencies = buildNodeDependencies args;
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "unpackPhase" "buildPhase" ];
in
stdenv.mkDerivation ({
name = "node-shell-${name}${if version == null then "" else "-${version}"}";
buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs;
buildCommand = ''
mkdir -p $out/bin
cat > $out/bin/shell <<EOF
#! ${stdenv.shell} -e
$shellHook
exec ${stdenv.shell}
EOF
chmod +x $out/bin/shell
'';
# Provide the dependencies in a development shell through the NODE_PATH environment variable
inherit nodeDependencies;
shellHook = lib.optionalString (dependencies != []) ''
export NODE_PATH=${nodeDependencies}/lib/node_modules
export PATH="${nodeDependencies}/bin:$PATH"
'';
} // extraArgs);
in
{
buildNodeSourceDist = lib.makeOverridable buildNodeSourceDist;
buildNodePackage = lib.makeOverridable buildNodePackage;
buildNodeDependencies = lib.makeOverridable buildNodeDependencies;
buildNodeShell = lib.makeOverridable buildNodeShell;
}

View File

@ -0,0 +1,3 @@
[
"renovate"
]

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,12 @@
{ { lib
lib, , bash
bash, , coreutils
coreutils, , gawk
gawk, , path
path, , # nixpkgs path
# nixpkgs path writeScript
writeScript, , writeScriptBin
writeScriptBin, , ...
...
}: }:
let let
# Create a script that runs in a `pure` environment, in the sense that: # Create a script that runs in a `pure` environment, in the sense that:
@ -19,12 +18,12 @@ let
# - all environment variables are unset, except: # - all environment variables are unset, except:
# - the ones listed in `keepVars` defined in ./default.nix # - the ones listed in `keepVars` defined in ./default.nix
# - the ones listed via the `KEEP_VARS` variable # - the ones listed via the `KEEP_VARS` variable
writePureShellScript = PATH: script: writeScript "script.sh" (mkScript PATH script); writePureShellScript = PATH: script:
writeScript "script.sh" (mkScript PATH script);
# Creates a script in a `bin/` directory in the output; suitable for use with `lib.makeBinPath`, etc. # Creates a script in a `bin/` directory in the output; suitable for use with `lib.makeBinPath`, etc.
# See {option}`writers.writePureShellScript` # See {option}`writers.writePureShellScript`
writePureShellScriptBin = writePureShellScriptBin = binName: PATH: script:
binName: PATH: script:
writeScriptBin binName (mkScript PATH script); writeScriptBin binName (mkScript PATH script);
mkScript = PATH: scriptText: '' mkScript = PATH: scriptText: ''
@ -92,5 +91,8 @@ let
''; '';
in in
{ {
inherit writePureShellScript writePureShellScriptBin; inherit
writePureShellScript
writePureShellScriptBin
;
} }

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:58ptmutnKoe4R6IE053eEm1gtgY1evYQM+WJtMRTuNm9Z1lE40Q8VJ4gDZ8xkc2ZWssizEgB0Iw=,iv:pNEUemTqKU4joMU9mJI4yYrLGfoHsD10G7BFbqsbSVA=,tag:oJfePGGn/OXJT7l1cugnkQ==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBBeVpORHNpdk1TZURNQlVE\nRFRtb0ZMODZ5WXdPOHoyVm42TUxnWVJRTGhrCmdOcndyTHlTMUdKYlJnajF0bXRj\ndDNYTmNNanpUbWF4NDJIdlNVQVpZS0EKLS0tIHRCYlpNMHVIMklQbkc2d3Vaenpl\ncysrK3FnSFpTdTVsQUhWTVRmb2h1eFkKmhJdVLu1zb+lEIlDHeoeExaiRQW075mY\nw6dM9dSW1BXTQmKT9q3WsAfF1SDafhSvBpphXTKBI58vrtFNFxJquQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPc2h2VEErMmRpMndBY3hN\nMlJVM3ZJTHJSQWppQ2wzV0V2T2xiS3BUSXdjCk9WaS9RL3pzSUJCakh4ZkIvQXk0\nV3VweE4yNDZZUHViZHZ3clNHMDB4UEUKLS0tIHkrMXpib2pneHl0a1kyM2VreGty\nMzNQMnJVaXRCT2ZneCtSNlFwREFza0UK2QUqLP6MfsJD1zsI5w/Oq/t87L3k4z/6\nxCe5ZTSBJcksV9v3E20jmFBcJHN/7Yrvp/FeQZRTUr8J9xY5DTBPHQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-04-30T11:28:18Z",
"mac": "ENC[AES256_GCM,data:umJSHZSWw/EYeinv2QCsJjq7t+awSj4LY8dthXWrX5nLPEzuzGpQrGfAGNle15SudfpZ0XpzeoiFrK6LqeQUr6BwlyWRjuwZjBD0Eo/RG5zvv0lEcQ666KWVlq8v7lP1rNuXIXGSef4ZN/Oqel0HAJW4d05YedwShD6/99HyLhw=,iv:VusNFfl5MRjv1Vrbkcw9auY4DxW9tkMvEJ4KPDEpk18=,tag:0yESnJbjneyG5PQagcsSOQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:wAUAcK0gtlCSCuXUMp6w/MBnn+J407iObssBVFjR7I1VUe9enghf4/Q=,iv:nbcgGyOCt8iO1FLPnV4aakLugr6/7fj/DB75KwqC93I=,tag:1D6B38fKIpQFdxobQ89mrg==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBTa29QelppdVhheGR0YytK\nR1NWNTU1MUt0WHY1cDBqN2YwRURzN1lEVEhjCmpvK0tmNEZReWpKVGlkUWREakpa\nYnJYbGRUcGJHdGVnYmhKTktVckpKR00KLS0tIDZicFZERnlNckEwTUFaTk11bWsw\nb3hjblFvTWwrZXJLNFp2SkhuN0c5aXMKkYTrgforNlHLf14TLkV2G2qEE87u4dSC\niiywv7ltnotTiAgG2RgQwkmHubpFaEhVyhRskNmVjQI8gZ74AxmC+w==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwUzlmMFdySGE0UjJlWEFZ\nL3lNbWpHYUNNTDRHTUE1bVNvMnkzOUZzOEc4CjUrYUNnLzNxQXlJWGJvY2RyU0w1\nWlFpTVFybXdEUVB4cHZIUWFja0poSXMKLS0tIFErMEk3dS9qcWhUUGVnZE41VE0w\nQlBpUCtlQkR2RzlKSjNKMHpHd2xaMUEKe5DRJeyGqMeGWzzWXrdhzLmriXs6BDMq\nA8s4AApF8ojwZdZ7K7k8lslof/kxuFhD7KLhrOJmSgvfRZ8a8vcz7w==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-05-01T09:44:51Z",
"mac": "ENC[AES256_GCM,data:Bofuu/7Mk1qbsFUE5HTeX9daEQg2NDby0ev/Q96fiLKwcg0rpIFk84NxwPKB/hLGAiUoHEegnzrCFCcAmGPaVQtr/W6dEKsdeVH3R3UBTekEwkXGAnKvrmcS7Vbd/bzvcSA+NuuO93laAgeU/HjMOmkwZwR8GN1LkxGfinVCGhM=,iv:mqMoCB5welSRzSzaIgi9P+Y60n+/ZrB0LlR8Mx2bIRM=,tag:Ytvv38xMoXzHow4qheRLQA==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:Bzc+7/1WPH1P9L9B/fzhtD4PAtsvplXU7SKVyC2o,iv:aLq+EZ1twpHa47nvcIv0M1SIb+IzzIa0lYiu92/GMwI=,tag:+zXRw99x/E2R5MZqIXgz/g==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6WlBpTXlhZHBsM2VsVFAw\nUlY3S2l4R3hDZmJoM2lJd0lkNXY5YUNiRVJzClc1b0NxYndEYkZUMy9TS1BmdDBO\nTks3Q2llL1Jkc2NIeWV1QWVpdVdZYVkKLS0tIFovMEtBbU1hTURHZDNzZ0drUndY\nYVU2YTJxVENXdkFTRTdVT0FWa3RoU0EKqZ1XST0fbbagViwG8xtAjjts9AA/Hn0m\nIO5mpZNYNUzf+l0Zi/AjtAnaRrpZowV0gcskfcj3LX30CbwwySH3qA==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBEMjBoZHpBN0JBZVFlUnd2\nWVpYMXNaOVQ2eExuWlBWYWppL2ltYXBnRWlRClRReUcrT2RYck1XQlFINGUyVFNn\nNGdiSkQzUERaTXNEaUJycHBXZ1pXelUKLS0tIDUxMzl1MzBDdmpXRnphUkdhRzRz\nUm9UbWhjUFA4M0JxLyt4d1pMMFJEbUUKwiJziQs5qqTc6Tlm55wHobu5PKGpsoRm\ndKTjasrcUEFWu0cNAxdGXvOUipT8hPazvLl3Ajdo8KYXwP7/LVaTuQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-04-30T12:50:37Z",
"mac": "ENC[AES256_GCM,data:vOuXOCzTFrS4M8ZKWc8wVdccTfcqiFjtuRAAPToLOVk1AlY97cT0SIMCNOniSmChYIHIx1rvPqmc16BWYZr0AhYpw8a0XH2XrpCo3M3oLJ8UMiwvn5R2FdU3P9Q+feDpWL5KPy3ii/OuoQBCAovywSs3fhi/dQZfjIQHVs5bqvs=,iv:F7egkb6zDIKYAxRJwRYChR1dboeHGgqS85Er23YT2es=,tag:0UurFP2e0vFw0RbkjnizcA==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:rbKMhNQwkuMFJCQHXiwxyEpQLqLsLqBeE6o=,iv:Fo8SoR9wPV0e7r42zpuELHcr0r5YwWpAWhVZJy3rt4Q=,tag:sGHXyai6d5VLMotE1P33Fg==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBWQnNiN0FXZ0Nkek5DVElW\ncUc4MmNBa0pHOUFsclp6ekZUMWdBVnNjSVJVCkdlME5sWC9rSGhUdjhRSDY3S3Jt\nQjBWLzRIbDNvWVg0eDNITnBnNHlVcGsKLS0tIFFWVms4SmZjSmE3RGZSbnVCYnJH\nUFdZRm1aSkVWZkRLdmlEQkVpa1lQNDAKBomS4CHmrfwiF5UTzVZZsCFqZ2wyCyQE\ndzFQe0ysLekbRTw1FfHnz/vJYsOV1Hk5PqTEFdTFNrYO+I6Rh/0ZIw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB0TmVtbXl4R3QxOGZ0VmJG\nMlJWMFJGTDIvS0M1cVFmNjFXMkdCR3RDaENZClFac0Zxc0gwUkpHYkdVZWg0NUhs\nRk9va2ZQVFlXVG1VZE10Z3ZuS2NheUUKLS0tIHRhNHlWQ21JNkNnN094LzVwb0tJ\nMUQ3T1Vycm1yQ1l0d0tNdytFcFhoVDQKVaGaWAOXwHWm+FqxILcPlZ+7eDSeNftZ\nZFAP3ANmPMkl311Ucl8kub0a9bY9RhU0ZZn1WGgJD/qL/EAtmudFSA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-04-30T14:56:43Z",
"mac": "ENC[AES256_GCM,data:sH/X2WLD3OCJ4Z20s+Mqnoe/xDZzfp0DL0w8HhBshbRu0NtTbQ6MyPwZ7ar3Gl6wBVBVXDfHTX5x2/6Vs/C59NIJCKjeDrkuRWLL1qd1kF9Iqf5CyBjv3Pv/bZVGRkFSQ4IG5SZDRrGyz5+FZEGUbxvYOzZWW6gDrBWsyNn62rM=,iv:ITVFQJEqhqO3w/7m4+tH2d76FI4mghNRd+Em7yZ3QiQ=,tag:kq/rD8MUuWorSDKWGKQQnA==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:5IICNx79F7NM4LzU8dWgnmkqn/6zgx/m9swqHsCo6wrqV0C+OCC9lWsBGbQ7sGDZHP9OPo4xXijzgBPelceb6Tb2CrwDo3Ud0UCMNA==,iv:wUMUI6gqaR1it4CaT+qbJfSIKDAXuLIPrfGDpwr+TwY=,tag:pIPF878PCJc/HcOfTEoA/w==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBvNWZRUnJpY1pLZ2RKY2Vu\neXNQUXdzTUNEbkZyTFFRWVVFRDhCQzdjWGlBCnEvbXlzKzBwQ3c4T0R6RFR3bTRz\nTXcyNEYzMGhoOE5KV0pDTXVBcVRiVjAKLS0tIDBHWTByK0NmRlZLZmxudk1XMFFP\nSU1YLzN0WElPbWk0TTlOMlE5azcrQzQKDBP5mZGRgR9W8jN5nC0SifqR/x5poMOy\nUPsAQx8JVarvbAAXn2btTkjkUCG0ATdIxPDeJenocMzLX8kFOZsV4g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwNXpBU0x6QkhwYWpMVVJh\nTlllS2pkbWNwa01HZjNwNHhNemFlbVNSZUVnCmUrM3lpL0FtdjVwanN6YWJFMjZU\nOUV5ZVIydUFrYWxKNDJiMGVOc0VaSWMKLS0tIGhocjg2RkFDV1IyM0Viamg1QVRX\nNDFTN1M1clB3NHZqV2NrcFBmOURkc0kKmrFWs9yEJ7gyWdyH15HepzYt0d9jkx2w\nqVqYfLx79GHmrZVyzM+10wHrkjP+LJBorcz6QR68JMgagcAbPxi6nQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-05-01T10:05:01Z",
"mac": "ENC[AES256_GCM,data:evJedhmyh4E8jHr4YZzaexzoeWok3imHUBBNwKNXwxip0X/BpWdBV8E0+uVMIxhg5PMI58VzRVVrSlcuda2yLBT94+iHWPXIedbk0RxYMhyw21oR53OAgN5/CM5SjfvBB58tr9r1X+kdB6kaCEbH2nVUfsax+A27AGh9m0IcQtc=,iv:Q4PLC3dml+RcSTYf74k5bnoikJX0wwM1pLaiWayOfnM=,tag:eWY312KepmAHiUMFuvhLsQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:iJTjs8bG2GLGnGp/Hf4Egtorrk87rkgh9Yn+gPuWAJ61wIAtN3g9SU3vyYpvRrIqHVUyLObGbrWYi3Ol07M=,iv:YTOctq9aw4tc9xwoOO4UbR2cYPHV0ZmuE1FRWn13sgk=,tag:zU3HFqxwZcn/9S02bj3/fA==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBUa1FIbWt2aHduZVBlcGpq\nOUsybTRmR1I4M1JZY3A4ZUFpZEN3dlZCTkZRCmdVajFUcDMyeEdJMThVZElJdmlJ\naEhvSk9sYThXdkxoaXVLem15dlJMcm8KLS0tIEJsdFY4L0M3Q2cwdzFOdy9LN0k3\nOEdCM09PUWlZbE91U2ZYNmVHeU43bUUKC+z+6XZCiVfwGQQCAHoB+WGE5Mm3qJZq\nuyD5r3Ra6MAvvwIhnqbwadRoxVH1HcdIB6hJsNREE/x6YNLxi3T7nw==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1zwte859d9nvg6wy5dugjkf38dqe8w8qkt2as7xcc5pw3285833xs797uan",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFOHBub0J3U2FuOFNuS2xl\nRkExYVEvcmowWDlyUGlpYko3N1dIcmN5dmxJCkRtRm9qVVNQK3FQcTB1U3g3OHhS\ncU9XaHUxNWVlL2tpblpZUHF1UWt4am8KLS0tIGJOZkJ4eDJ6WWx1d2R1VG1qODZS\ncHhXMVhEUHdLZjIvNUYxRmduZkpjaU0KqZKUb9KYpSvwxaJRAbYhkuOdnzsU3p9Q\nU2WO5TIwS762yNqWTzyYdxb9YxvTOatW7uWTorRXZu1yqCTMTuq+1Q==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-07-03T09:47:40Z",
"mac": "ENC[AES256_GCM,data:dpdmkhedaqivzIlxhoWb+u77JmfWRo94iWDolAa9UKvnjBo1QE5sHbqWasCH81wjO0wPBPRUqnj9JQ7kG9AFp24Fad+gAp74Gwx5M/PSx1dsd6xkcxt6PJ8sFXGb0H3lYduCaNfDGgsJTVoDcbk8rgYzjo5+mxs2pqrrn10t4iU=,iv:MGMIq2rF4+hr89/dppi2JDVbpAShscYTMM9viHPepIY=,tag:Dj9B6qvAkmiUmgRvZ6B94Q==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/qubasa

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:WW0RmSs3k81jSgYLt8dHEiJOxlncPWl3QWvRtmNgtIxvup7h,iv:nw7SP15EVWfS78dJE37msnxAZ/goYb7rGqAKNzhXFP4=,tag:yxVyGUMFczq8cGuU4V/FzA==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiM0FBVkhPc2luMjlpSW1R\nN0NlUU9ZQkxIOVAwa3hlMVg3bFluTjRlRUdRCmMxSkMvZjg2ckUyUThhSC9VOW1H\nZExFY2owcHQ5NzJtUW5pbDFjd2oyaEUKLS0tIG1Fd25acHdYWEdlQkMxajhRQXNw\nTGxJUDdPMlRrQ0t3SkVSaWdZZXJGT0EK7WfQ+6jVzOBToqO9wJby/qaF6kM00hMh\n+Y4A08X/ItLzyfCc5LQ97GQ2VlwXK5+HoD7jNnn//3xeH6YC1VBdkg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBhNlpOQ1QydEJuM3pCbEpK\nSDUzVlppNkFnSDJLSU1ITEdWWCtaUEE0THcwCmljSUl0amx2OTBVZXBPMFNGbjJP\nakNQcWlad1R3cDZYWWZpQkJkQmEvUEEKLS0tIFpJOU1GUnNaTnlaL25GRkdxZnhs\nUEhIVEpNWjNOV2FTSmVnRkVCWm90MDgKMvz6QdPRoYb2bPjS9oSOVA5gTfwrgn4q\nIyboQIMV3oAaAs9LSUcUMBvERzQ31JXnHRzrnqtdiNX0NLbIrN47yg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-06-06T16:09:34Z",
"mac": "ENC[AES256_GCM,data:7iKDT5577mLLeNyi46JHa4AUumqbQm65V3DXqNdNyLWccpIcML8n7jgFNxuK9gTqV2LM6bG18qS1orBJtPdawKnvxJwUaFb3Mo06C2+LVnWG4fT6MV+5eF8y6SM3IngT9BPk7IhTTGWe8lGJ6HTlg+9/f4/cq5NSKfeRgTkDEcE=,iv:T8wjeq2D1J8krhWeQJbVCOPY5sr05z/wMJqvr9onQK8=,tag:XgDTOTa2zv4NiBFN0b3rqA==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:ybX1/Uc+LqfgUoZQqCURgPfsTyzlsO+Xn7z8/0H9v5kyfJYX7PI1VlXVFBgR3Xh+2iuTF+v98PQyYeJOYLk3NTWggZayQQ4ivt0DLdhgG+DRFbeN8GMiqV5NWNhnL2tgLBu9DZViBSpgcbg9aHfI2cagboJnCSqyS2w1i/anvKaEgKa5YucrS4jywVxhBbvON6Oa2v8Hb0f/R8Ldl9HSqMM6o3pQEaYOsTNieNy63h9C4ERP/jIhKSajggpeHENdnuQC7Kavz54faL9xaz0jwRHb1fd+IGTM7fxqbyB5702nKEGytDwKzH0fh6q1HJNHbhWeWyCmGFKOkqywaQjcpJsczP2FIwkZmoui0juTEluNk1KzugP0gxtsuwjUiJlZeJxtZEgsnifLPpHyCaN99jzPjhd1TknT7MWZMVJT/R14bdD+QdwvR8rHK8IMctMGrNsqu3+Crdwu3WSfDH9jEM1zAZQNvLUT13azRABz1rpJvNFnvhDTBwDbUJlLpIQcPOPtVKO0IQeM5EUnCr+oCfBrP/To3mqo82s3,iv:jbY6WK0BcyLlU3Sbo7qNOHfCGU4TjUqTiww546Tyq20=,tag:VklHST51z5XI9+UiASBO9g==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBlenFrWW45UXcyd3pwaTE0\nY3ROWWFPdzIwK1JuU2h2NCsyanF1VzhoM1NjCndscHZpTUNmcWc0TDd4V2xHM0lh\ndVNZQW1jMGFNeDdxbDhwdFB1Z3AwdVUKLS0tIGNmbVNMRUZGb1lPcnlrdkhnZ3JX\nM2szRHVydldGN3haV2lhZlFMeUgzcU0KDDwWVSjsua4DKXlqqk2Ns2e1zkzJK2Y2\n8+r8bXkBLJyXqQCQteXBrc5U+0n1KfHVkkvPmuBI3BmcAiVVmr/RxQ==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJdHEyT3U0TzJQUW9wM2Z3\nYVdTYmRSeUwrZy9iNTVpcmZJMnhOeHlGRGp3CnVYbk15NWxadk9waFdJVFBKa0Vx\nYUtkYWZuYmhabk1xREtDdzFGdUcyaW8KLS0tIGYxcDVuQXZwdk1rVHJOOHljTmVl\nMXNXTHdSam12djE3Z0UyU2dSZDcrRGcKaQnrYuUpSTjOYYHH0EsqnTLHkU5Md4Ro\nUpeJX1GmAoIAUGruB/8jPbMaDQQXbjNLDCCalStlbqMgbgz/Ty4ukQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-06-06T16:09:35Z",
"mac": "ENC[AES256_GCM,data:2RptHkE/k4JfqdybmnI3sbeEDaaD4bUtEPLuBcpltZjR5EHFYLsEB1Woxlzj2rLqq+8Wr6kWZtsG3uJSxsColUbazJd1CoVJxHpm6tAnM47Mv1YG5PdLwqpwJWji4AI5lAer4ZMfuGDpNbrwvbO3qB8R55r5SYay4b4Yc49wQXA=,iv:ESimFSybysRrgEj+27ECUi6kIklv1IunWVclTjX7C5g=,tag:LONP+cUm5NVcBvgVStZnwQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:zCWFFE6+923po+i6g+ehKgC3FdAEhbmFDTbc6VZIXdBqNO7qvC8K1Q34aZVzQ3HaE6l/p5V7Ax0U0xRypQ==,iv:NJhOMcGg55fznrpM6bSqNvr/lOYAsUUVtfK8eJRs0Iw=,tag:6jadN151/70a7BBXsqMClg==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHcHNaTjJlejlJYy90eGFT\nVTloRFV3OVV4enI1OENaeGVpcXpCV0dUenlBCkdONUE3eXhlY1JMRko5Q0VJVFN6\nMkdSR1krYjlJRyswOExRSW9UeUI2czAKLS0tIEJWRDZwRWp1U3V4S0NLOXJDS0ZZ\ncXRFNGxnNXZHNHpvOUpVcTYvM3RoNU0KPgJoJ/22jyUtqGeXfO+DInB3zIwrB+OP\ncjw6Dt7mPYT/OUG6Cq12D6+xMYCm+r4jswtkvWaPhnzGcIOcqMJHwg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBUa1IvdklYcENvUzlwdnNi\nbnlidGVvMzZLRS9EU1RzZ0VzMUtvOGRGR25zCklqVTA4T2FIR3l2MER2RjRsbkZH\nRWlxUkYyUjIwSzl5SWJHblMvclZwOGsKLS0tICtaYW83M3lXakJsMFNEc0FjYWdC\nU3ZDUEplYk1tOFRiUUpXTVA0NTUyaHMKdtR+rqRz+Jjf4BfCd5B7ygRLYKTDDRJk\nq0eSNG+i+Xjz/kLWsMpmO4Cevhp0SPyLZV2g2CiDo5vXZQ5Qiy8pSQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-06-10T09:11:22Z",
"mac": "ENC[AES256_GCM,data:D+NLO8U8mXc4wzQC1OHoba5t+i92P3ZeZy7M8nPhBvnWFznhWBmHRLTI55c8+Q3tkNJI0rBt43+XjC7X1ij36eSza/8O6dh5+jM4UkvFBBJG8ZTPSqakISmPBN1k80qm6G15ELgRrJc0+DNAuuZVuBAwVNUFmaZNx6FmX/G4nRU=,iv:RlhgqQoXAeNFTLRJubVzFJq0wbZwZOeAyZs2nD7IHfg=,tag:6zgWakwYjf93qyMwKlSG9g==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

View File

@ -1 +0,0 @@
../../../machines/web01

View File

@ -1,24 +0,0 @@
{
"data": "ENC[AES256_GCM,data:bcYm9Jx6NS5T2085GmeUJJeLdD1ZtGSfMtXNWcNkeL7F,iv:jR8k0EMO20ZiBXmb1ddJS5x0c95y9vEPvMig0Y0iXBg=,tag:wZBLbCe8ucQSIGrNOjN1jg==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age17n64ahe3wesh8l8lj0zylf4nljdmqn28hvqns2g7hgm9mdkhlsvsjuvkxz",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBSVWx6TzM4MEpmZ3ExczZo\nNS9kU1Z5NEl1aDdwSzUwSy93anlPQXdOVVVNClBqMENEWUhLVml6dkRZaVk4OU1V\nNjBNV0p2MjFLMDI1c3paOUU0Zndsd28KLS0tIEJZVFA4akVLMzVSanJMcWwweCtE\nZ2h2NE1mdWJNd1VWZDFyT0tvTmlrV0kKfsW5qG12wP+hI/ZCcZNsjv5ububSITLp\n4SzzyeTzpDrGlu/h52szD0VYnB0w3/fF2Ar/lvBYN0y9MXXYUQGdRA==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age17xuvz0fqtynzdmf8rfh4g3e46tx8w3mc6zgytrmuj5v9dhnldgxs7ue7ct",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArdmZUaThzQzQ0em9reXJM\nTkNNRlhKRWoxR3dVTXc0TEdXV2pNQytXK3lrCkk1Z1g2d2R6V002d2lXNWtFMmo5\nT2tiTGpyRTE4WXk4c0hYOGdFejBITWsKLS0tIFdib0UzL2dNbXRjZHFYOEVGSWVU\nTDlNN0xSQWgzdFVhV21SSE9JNkM0OGcK2icnV6pvh7PMVp5r51b+Ukgl95XiiTHG\nDjj3M24jEh9UX2bYraGyRNnLh3piQe7Jim3/ZAHSOzl105GulapU5g==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-06-10T09:11:20Z",
"mac": "ENC[AES256_GCM,data:Ie9j/N4dB6qKtpzPrQROPbsGQCfzYL8dhtptOB0XQw+mh19vpcvWyzLqYOorM1eBKrUWYob6ZHe27KXxN+9RtPe+KFABlFAQRENfPBVPi9Y7/XxMiMQ2gL6JQkvN47Aou/jWhPIOeuCXuEqr4VEOa0F6jPLmS9aPPc95MV/cHxo=,iv:/R67c5rBG3nIm6iAJedPdXL8R+b1RGez/ejzBDW4tf4=,tag:2A9njvLHsAzda+kh8PYj5w==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View File

@ -1 +0,0 @@
../../../users/joerg

Some files were not shown because too many files have changed in this diff Show More