forked from the-distro/infra
Compare commits
1 commit
main
...
channel-sc
Author | SHA1 | Date | |
---|---|---|---|
592af97142 |
5 changed files with 232 additions and 0 deletions
32
common/channels.nix
Normal file
32
common/channels.nix
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
# Taken from https://github.com/NixOS/infra/blob/master/channels.nix
|
||||||
|
{
|
||||||
|
# "Channel name" = {
|
||||||
|
# # This should be the <value> part of
|
||||||
|
# # https://hydra.forkos.org/job/<value>/latest-finished
|
||||||
|
# job = "project/jobset/jobname";
|
||||||
|
#
|
||||||
|
# # When adding a new version, determine if it needs to be tagged as a
|
||||||
|
# # variant -- for example:
|
||||||
|
# # nixos-xx.xx => primary
|
||||||
|
# # nixos-xx.xx-small => small
|
||||||
|
# # nixos-xx.xx-darwin => darwin
|
||||||
|
# # nixos-xx.xx-aarch64 => aarch64
|
||||||
|
# variant = "primary";
|
||||||
|
#
|
||||||
|
# # Channel Status:
|
||||||
|
# # '*-unstable' channels are always "rolling"
|
||||||
|
# # Otherwise a release generally progresses through the following phases:
|
||||||
|
# #
|
||||||
|
# # - Directly after branch off => "beta"
|
||||||
|
# # - Once the channel is released => "stable"
|
||||||
|
# # - Once the next channel is released => "deprecated"
|
||||||
|
# # - N months after the next channel is released => "unmaintained"
|
||||||
|
# # (check the release notes for when this should happen)
|
||||||
|
# status = "beta";
|
||||||
|
# };
|
||||||
|
"forkos-unstable" = {
|
||||||
|
job = "forkos/nixos-main/tested";
|
||||||
|
variant = "primary";
|
||||||
|
status = "rolling";
|
||||||
|
};
|
||||||
|
}
|
|
@ -110,6 +110,15 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bagel.nixpkgs.channel-scripts = {
|
||||||
|
enable = true;
|
||||||
|
nixpkgsUrl = "https://cl.forkos.org/nixpkgs.git";
|
||||||
|
releaseBucketCredentialsFile = config.age.secrets.channel-scripts-s3.path;
|
||||||
|
deployKeyFile = config.age.secrets.priv-ssh-key.path;
|
||||||
|
hydraUrl = "https://hydra.forkos.org";
|
||||||
|
channels = import ../common/channels.nix;
|
||||||
|
};
|
||||||
|
|
||||||
i18n.defaultLocale = "fr_FR.UTF-8";
|
i18n.defaultLocale = "fr_FR.UTF-8";
|
||||||
|
|
||||||
system.stateVersion = "24.05";
|
system.stateVersion = "24.05";
|
||||||
|
|
127
services/channel-scripts/default.nix
Normal file
127
services/channel-scripts/default.nix
Normal file
|
@ -0,0 +1,127 @@
|
||||||
|
{ lib, config, pkgs, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkEnableOption mkOption types mkIf mapAttrsToList;
|
||||||
|
cfg = config.bagel.nixpkgs.channel-scripts;
|
||||||
|
orderLib = import ./service-order.nix { inherit lib; };
|
||||||
|
makeUpdateJob = channelName: mainJob: {
|
||||||
|
name = "update-${channelName}";
|
||||||
|
value = {
|
||||||
|
description = "Update channel ${channelName}";
|
||||||
|
path = with pkgs; [ git ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = false;
|
||||||
|
User = "channel-scripts";
|
||||||
|
DynamicUser = true;
|
||||||
|
MemoryHigh = "80%";
|
||||||
|
EnvironmentFile = [
|
||||||
|
cfg.releaseBucketCredentialsFile
|
||||||
|
];
|
||||||
|
};
|
||||||
|
unitConfig.After = [ "networking.target" ];
|
||||||
|
script =
|
||||||
|
''
|
||||||
|
# A stateful copy of nixpkgs
|
||||||
|
dir=/var/lib/channel-scripts/nixpkgs
|
||||||
|
if ! [[ -e $dir ]]; then
|
||||||
|
git clone --bare ${cfg.nixpkgsUrl} $dir
|
||||||
|
fi
|
||||||
|
GIT_DIR=$dir git config credential.helper 'store --file=${config.age.secrets.hydra-mirror-git-credentials.path}'
|
||||||
|
GIT_DIR=$dir git config remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*'
|
||||||
|
|
||||||
|
export AWS_ACCESS_KEY_ID=$(sed 's/aws_access_key_id=\(.*\)/\1/ ; t; d' ${config.age.secrets.hydra-mirror-s3-credentials.path})
|
||||||
|
export AWS_SECRET_ACCESS_KEY=$(sed 's/aws_secret_access_key=\(.*\)/\1/ ; t; d' ${config.age.secrets.hydra-mirror-s3-credentials.path})
|
||||||
|
exec mirror-nixos-branch ${channelName} ${cfg.hydraUrl}/job/${mainJob}/latest-finished
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
updateJobs = orderLib.mkOrderedChain (mapAttrsToList (n: { job, ... }: makeUpdateJob job) cfg.channels);
|
||||||
|
channelOpts = { ... }: {
|
||||||
|
job = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
example = "nixos/trunk-combined/tested";
|
||||||
|
};
|
||||||
|
|
||||||
|
variant = mkOption {
|
||||||
|
type = types.enum [ "primary" "small" "darwin" "aarch64" ];
|
||||||
|
example = "primary";
|
||||||
|
};
|
||||||
|
|
||||||
|
status = mkOption {
|
||||||
|
type = types.enum [ "beta" "stable" "deprecated" "unmaintained" "rolling" ];
|
||||||
|
example = "rolling";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.bagel.nixpkgs.channel-scripts = {
|
||||||
|
enable = mkEnableOption ''the channel scripts.
|
||||||
|
Fast forwarding channel branches which are read-only except for this privileged bot
|
||||||
|
based on our Hydra acceptance tests.
|
||||||
|
'';
|
||||||
|
|
||||||
|
nixpkgsUrl = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "https://cl.forkos.org/nixpkgs.git";
|
||||||
|
description = "URL to the nixpkgs repository to clone and to push to";
|
||||||
|
};
|
||||||
|
|
||||||
|
releaseBucketCredentialsFile = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
description = ''Path to the release bucket credentials file exporting S3-style environment variables.
|
||||||
|
For example, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` for the S3 operations to work.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
deployKeyFile = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
description = ''Path to the private SSH key which is allowed to deploy things to the protected channel references on the Git repository.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
hydraUrl = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "https://hydra.forkos.org";
|
||||||
|
description = "URL to the Hydra instance";
|
||||||
|
};
|
||||||
|
|
||||||
|
channels = mkOption {
|
||||||
|
type = types.attrsOf (types.submodule channelOpts);
|
||||||
|
description = "List of channels to mirror";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
users.users.channel-scripts = {
|
||||||
|
description = "Channel scripts user";
|
||||||
|
isSystemUser = true;
|
||||||
|
group = "channel-scripts";
|
||||||
|
};
|
||||||
|
users.groups.channel-scripts = {};
|
||||||
|
|
||||||
|
systemd.services = (lib.listToAttrs updateJobs) // {
|
||||||
|
"update-all-channels" = {
|
||||||
|
description = "Start all channel updates.";
|
||||||
|
unitConfig = {
|
||||||
|
After = map
|
||||||
|
(service: "${service.name}.service")
|
||||||
|
updateJobs;
|
||||||
|
Wants = map
|
||||||
|
(service: "${service.name}.service")
|
||||||
|
updateJobs;
|
||||||
|
};
|
||||||
|
script = "true";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.timers."update-all-channels" = {
|
||||||
|
description = "Start all channel updates.";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnUnitInactiveSec = 600;
|
||||||
|
OnBootSec = 900;
|
||||||
|
AccuracySec = 300;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
63
services/channel-scripts/service-order.nix
Normal file
63
services/channel-scripts/service-order.nix
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
# Vendored from https://raw.githubusercontent.com/NixOS/infra/master/lib/service-order.nix
|
||||||
|
# TODO: get rid of me?
|
||||||
|
# Ordering Services
|
||||||
|
#
|
||||||
|
# Given a set of services, make them run one at a time in a specific
|
||||||
|
# order, on a timer.
|
||||||
|
{ lib }:
|
||||||
|
{
|
||||||
|
# Given a list of systemd service, give each one an After
|
||||||
|
# attribute, so they start in a specific order. The returned
|
||||||
|
# list can be converted in to a systemd.services attrset with
|
||||||
|
# `lib.listToAttrs`.
|
||||||
|
#
|
||||||
|
# Example:
|
||||||
|
#
|
||||||
|
# mkOrderedChain [
|
||||||
|
# { name = "foo"; value = { script = "true"; }; }
|
||||||
|
# { name = "bar"; value = { script = "true"; }; }
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# => [
|
||||||
|
# {
|
||||||
|
# name = "foo";
|
||||||
|
# value = {
|
||||||
|
# script = "true";
|
||||||
|
# unitConfig = { After = []; };
|
||||||
|
# };
|
||||||
|
# }
|
||||||
|
# {
|
||||||
|
# name = "bar";
|
||||||
|
# value = {
|
||||||
|
# script = "true";
|
||||||
|
# unitConfig = { After = [ "bar" ]; };
|
||||||
|
# };
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
mkOrderedChain = jobs: let
|
||||||
|
unitConfigFrom = job: job.unitConfig or {};
|
||||||
|
afterFrom = job: (unitConfigFrom job).After or [];
|
||||||
|
previousFrom = collector:
|
||||||
|
if collector ? previous
|
||||||
|
then [collector.previous]
|
||||||
|
else [];
|
||||||
|
|
||||||
|
ordered = builtins.foldl'
|
||||||
|
(collector: item: {
|
||||||
|
services = collector.services
|
||||||
|
++ [{
|
||||||
|
inherit (item) name;
|
||||||
|
value = item.value // {
|
||||||
|
unitConfig = (unitConfigFrom item.value) //
|
||||||
|
{
|
||||||
|
After = (afterFrom item.value) ++
|
||||||
|
(previousFrom collector);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}];
|
||||||
|
previous = "${item.name}.service";
|
||||||
|
})
|
||||||
|
{ services = []; }
|
||||||
|
jobs;
|
||||||
|
in ordered.services;
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./gerrit
|
./gerrit
|
||||||
|
./channel-scripts
|
||||||
./hydra
|
./hydra
|
||||||
./monitoring
|
./monitoring
|
||||||
./netbox
|
./netbox
|
||||||
|
|
Loading…
Reference in a new issue