feat(nixpkgs): run oxidized channel scripts

We don't need weird Perl scripts where we are going. Here's a streaming
channel-scripts deployment with plenty of bells, including OTLP.

Signed-off-by: Raito Bezarius <masterancpp@gmail.com>
This commit is contained in:
raito 2024-08-02 00:33:42 +02:00
parent c1712dc1fa
commit 9a04ef909b
10 changed files with 372 additions and 0 deletions

32
common/channels.nix Normal file
View file

@ -0,0 +1,32 @@
# Taken from https://github.com/NixOS/infra/blob/master/channels.nix
{
# "Channel name" = {
# # This should be the <value> part of
# # https://hydra.forkos.org/job/<value>/latest-finished
# job = "project/jobset/jobname";
#
# # When adding a new version, determine if it needs to be tagged as a
# # variant -- for example:
# # nixos-xx.xx => primary
# # nixos-xx.xx-small => small
# # nixos-xx.xx-darwin => darwin
# # nixos-xx.xx-aarch64 => aarch64
# variant = "primary";
#
# # Channel Status:
# # '*-unstable' channels are always "rolling"
# # Otherwise a release generally progresses through the following phases:
# #
# # - Directly after branch off => "beta"
# # - Once the channel is released => "stable"
# # - Once the next channel is released => "deprecated"
# # - N months after the next channel is released => "unmaintained"
# # (check the release notes for when this should happen)
# status = "beta";
# };
"forkos-unstable" = {
job = "forkos/nixos-main/tested";
variant = "primary";
status = "rolling";
};
}

View file

@ -101,6 +101,26 @@
"url": "https://git.lix.systems/lix-project/buildbot-nix.git"
}
},
"channel-scripts": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1725125429,
"narHash": "sha256-NUnlreY8tdWTzAMY82hMxEhvsv9bKCCG4qAQ0LJanHA=",
"ref": "refs/heads/main",
"rev": "cb5a2a2b07570fcbe3ad128d3d2a147305524600",
"revCount": 258,
"type": "git",
"url": "https://git.lix.systems/the-distro/channel-scripts.git"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/the-distro/channel-scripts.git"
}
},
"colmena": {
"inputs": {
"flake-compat": "flake-compat",
@ -684,6 +704,7 @@
"inputs": {
"agenix": "agenix",
"buildbot-nix": "buildbot-nix",
"channel-scripts": "channel-scripts",
"colmena": "colmena",
"gerrit-dashboard": "gerrit-dashboard",
"grapevine": "grapevine",

View file

@ -25,6 +25,9 @@
buildbot-nix.url = "git+https://git.lix.systems/lix-project/buildbot-nix.git?ref=refs/heads/non-flakes";
buildbot-nix.inputs.nixpkgs.follows = "nixpkgs";
channel-scripts.url = "git+https://git.lix.systems/the-distro/channel-scripts.git";
channel-scripts.inputs.nixpkgs.follows = "nixpkgs";
lix.follows = "hydra/lix";
grapevine = {
@ -51,6 +54,7 @@
inputs.hydra.overlays.default
inputs.lix.overlays.default
inputs.nix-gerrit.overlays.default
inputs.channel-scripts.overlays.default
];
};
terraform = pkgs.opentofu;

View file

@ -121,6 +121,26 @@
};
};
age.secrets.s3-channel-staging-keys.file = ../../secrets/s3-channel-staging-keys.age;
bagel.nixpkgs.channel-scripts = {
enable = true;
otlp.enable = true;
nixpkgsUrl = "https://cl.forkos.org/nixpkgs.git";
hydraUrl = "https://hydra.forkos.org";
binaryCacheUrl = "https://cache.forkos.org";
baseUriForGitRevisions = "https://cl.forkos.org/plugins/gitiles/nixpkgs/+";
s3 = {
release = "bagel-channel-scripts-test";
channel = "bagel-channel-scripts-test";
};
releaseBucketCredentialsFile = config.age.secrets.s3-channel-staging-keys.path;
deployKeyFile = config.age.secrets.priv-ssh-key.path;
extraArgs = [
"--bypass-preflight-checks"
];
channels = import ../../common/channels.nix;
};
i18n.defaultLocale = "fr_FR.UTF-8";
system.stateVersion = "24.05";

View file

@ -33,6 +33,8 @@ let
metrics-push-password = builtins.attrValues machines;
ows-deploy-key = [ machines.gerrit01 ];
s3-channel-staging-keys = [ machines.gerrit01 ];
s3-channel-keys = [ machines.gerrit01 ];
postgres-ca-priv = [ machines.bagel-box ];
postgres-tls-priv = [ machines.bagel-box ];

BIN
secrets/s3-channel-keys.age Normal file

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,229 @@
{ lib, config, pkgs, ... }:
let
inherit (lib) mkEnableOption mkOption types mkIf mapAttrsToList mkPackageOption concatStringsSep;
cfg = config.bagel.nixpkgs.channel-scripts;
toml = pkgs.formats.toml { };
configFile = toml.generate "forkos.toml" cfg.settings;
orderLib = import ./service-order.nix { inherit lib; };
makeUpdateJob = channelName: mainJob: {
name = "update-${channelName}";
value = {
description = "Update channel ${channelName}";
path = with pkgs; [ git ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = false;
User = "channel-scripts";
DynamicUser = true;
StateDirectory = "channel-scripts";
MemoryHigh = "80%";
EnvironmentFile = [
cfg.releaseBucketCredentialsFile
];
Environment = cfg.extraEnvironment;
};
unitConfig.After = [ "networking.target" ];
script =
''
# A stateful copy of nixpkgs
dir=/var/lib/channel-scripts/nixpkgs
if ! [[ -e $dir ]]; then
git clone --bare ${cfg.nixpkgsUrl} $dir
fi
GIT_DIR=$dir git config remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*'
# TODO: use escapeShellArgs
exec ${cfg.package}/bin/mirror-forkos -c ${configFile} ${concatStringsSep " " cfg.extraArgs} apply ${channelName} ${mainJob}
'';
};
};
updateJobs = orderLib.mkOrderedChain (mapAttrsToList (n: { job, ... }: makeUpdateJob n job) cfg.channels);
channelOpts = { ... }: {
options = {
job = mkOption {
type = types.str;
example = "nixos/trunk-combined/tested";
};
variant = mkOption {
type = types.enum [ "primary" "small" "darwin" "aarch64" ];
example = "primary";
};
status = mkOption {
type = types.enum [ "beta" "stable" "deprecated" "unmaintained" "rolling" ];
example = "rolling";
};
};
};
in
{
options.bagel.nixpkgs.channel-scripts = {
enable = mkEnableOption ''the channel scripts.
Fast forwarding channel branches which are read-only except for this privileged bot
based on our Hydra acceptance tests.
'';
otlp.enable = mkEnableOption "the OTLP export process";
s3 = {
release = mkOption {
type = types.str;
};
channel = mkOption {
type = types.str;
};
};
package = mkPackageOption pkgs "mirror-forkos" { };
settings = mkOption {
type = types.attrsOf types.anything;
};
nixpkgsUrl = mkOption {
type = types.str;
default = "https://cl.forkos.org/nixpkgs.git";
description = "URL to the nixpkgs repository to clone and to push to";
};
binaryCacheUrl = mkOption {
type = types.str;
default = "https://cache.forkos.org";
description = "URL to the binary cache";
};
baseUriForGitRevisions = mkOption {
type = types.str;
description = "Base URI to generate link to a certain revision";
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra arguments passed to the mirroring program";
};
releaseBucketCredentialsFile = mkOption {
type = types.path;
description = ''Path to the release bucket credentials file exporting S3-style environment variables.
For example, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` for the S3 operations to work.
'';
};
deployKeyFile = mkOption {
type = types.path;
description = ''Path to the private SSH key which is allowed to deploy things to the protected channel references on the Git repository.
'';
};
hydraUrl = mkOption {
type = types.str;
default = "https://hydra.forkos.org";
description = "URL to the Hydra instance";
};
channels = mkOption {
type = types.attrsOf (types.submodule channelOpts);
description = "List of channels to mirror";
};
extraEnvironment = mkOption {
type = types.listOf types.str;
default = [
"RUST_LOG=info"
];
};
};
config = mkIf cfg.enable {
bagel.nixpkgs.channel-scripts.settings = {
hydra_uri = cfg.hydraUrl;
binary_cache_uri = cfg.binaryCacheUrl;
base_git_uri_for_revision = cfg.baseUriForGitRevisions;
nixpkgs_dir = "/var/lib/channel-scripts/nixpkgs";
s3_release_bucket_name = cfg.s3.release;
s3_channel_bucket_name = cfg.s3.channel;
};
# services.alloy = {
# enable = cfg.otlp.enable;
# };
#
# bagel.services.channel-scripts.extraEnvironment = mkIf cfg.otlp.enable {
# OTLP_EXPORTER_OTLP_ENDPOINT = "127.0.0.1:9000";
# OTEL_EXPORTER_OTLP_PROTOCOL = "grpc";
# };
#
# environment.etc."alloy/config.alloy".text = ''
# otelcol.auth.basic "forkos" {
# username = "promtail"
# password = env("/run/credentials/alloy.service/password")
# }
# otelcol.receiver.otlp "default" {
# grpc {
# endpoint = "127.0.0.1:9000"
# }
#
# output {
# metrics = [otelcol.processor.batch.default.input]
# logs = [otelcol.processor.batch.default.input]
# traces = [otelcol.processor.batch.default.input]
# }
# }
#
# otelcol.processor.batch "default" {
# output {
# metrics = [otelcol.exporter.otlp.default.input]
# logs = [otelcol.exporter.otlp.default.input]
# traces = [otelcol.exporter.otlp.default.input]
# }
# }
#
# otelcol.exporter.otlp "default" {
# client {
# endpoint {
# url = "https://tempo.forkos.org"
# basic_auth {
# username = "promtail"
# password_file = "/run/credentials/alloy.service/password"
# }
# }
# }
# }
# '';
#
users.users.channel-scripts = {
description = "Channel scripts user";
isSystemUser = true;
group = "channel-scripts";
};
users.groups.channel-scripts = {};
systemd.services = (lib.listToAttrs updateJobs) // {
"update-all-channels" = {
description = "Start all channel updates.";
unitConfig = {
After = map
(service: "${service.name}.service")
updateJobs;
Wants = map
(service: "${service.name}.service")
updateJobs;
};
script = "true";
};
};
systemd.timers."update-all-channels" = {
description = "Start all channel updates.";
wantedBy = [ "timers.target" ];
timerConfig = {
OnUnitInactiveSec = 600;
OnBootSec = 900;
AccuracySec = 300;
};
};
};
}

View file

@ -0,0 +1,63 @@
# Vendored from https://raw.githubusercontent.com/NixOS/infra/master/lib/service-order.nix
# TODO: get rid of me?
# Ordering Services
#
# Given a set of services, make them run one at a time in a specific
# order, on a timer.
{ lib }:
{
# Given a list of systemd service, give each one an After
# attribute, so they start in a specific order. The returned
# list can be converted in to a systemd.services attrset with
# `lib.listToAttrs`.
#
# Example:
#
# mkOrderedChain [
# { name = "foo"; value = { script = "true"; }; }
# { name = "bar"; value = { script = "true"; }; }
# ]
#
# => [
# {
# name = "foo";
# value = {
# script = "true";
# unitConfig = { After = []; };
# };
# }
# {
# name = "bar";
# value = {
# script = "true";
# unitConfig = { After = [ "bar" ]; };
# };
# }
#
mkOrderedChain = jobs: let
unitConfigFrom = job: job.unitConfig or {};
afterFrom = job: (unitConfigFrom job).After or [];
previousFrom = collector:
if collector ? previous
then [collector.previous]
else [];
ordered = builtins.foldl'
(collector: item: {
services = collector.services
++ [{
inherit (item) name;
value = item.value // {
unitConfig = (unitConfigFrom item.value) //
{
After = (afterFrom item.value) ++
(previousFrom collector);
};
};
}];
previous = "${item.name}.service";
})
{ services = []; }
jobs;
in ordered.services;
}

View file

@ -1,6 +1,7 @@
{
imports = [
./gerrit
./channel-scripts
./hydra
./matrix
./monitoring