Compare commits

..

2 commits

Author SHA1 Message Date
raito 2b90c5f551 admins: provision jade
Signed-off-by: Raito Bezarius <masterancpp@gmail.com>
2024-07-05 19:39:44 +02:00
raito cfc83be763 systems: add fodwatch.forkos.org
Signed-off-by: Raito Bezarius <masterancpp@gmail.com>
2024-07-05 19:27:33 +02:00
143 changed files with 9879 additions and 13105 deletions

View file

@ -1,7 +0,0 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8

6
.gitignore vendored
View file

@ -1,8 +1,4 @@
result
.gcroots
config.tf.json
.direnv
.terraform
.terraform.lock.hcl
secrets/*
!secrets/*.age
.direnv

View file

@ -3,14 +3,15 @@ let
in {
users.users.root.openssh.authorizedKeys.keys =
keys.users.delroth ++
keys.users.emilylange ++
keys.users.hexchen ++
keys.users.jade ++
keys.users.janik ++
keys.users.k900 ++
keys.users.lukegb ++
keys.users.maxine ++
keys.users.k900 ++
keys.users.raito ++
keys.users.thubrecht ++
keys.users.yuka;
keys.users.maxine ++
keys.users.jade ++
[
# more raito
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcEkYM1r8QVNM/G5CxJInEdoBCWjEHHDdHlzDYNSUIdHHsn04QY+XI67AdMCm8w30GZnLUIj5RiJEWXREUApby0GrfxGGcy8otforygfgtmuUKAUEHdU2MMwrQI7RtTZ8oQ0USRGuqvmegxz3l5caVU7qGvBllJ4NUHXrkZSja2/51vq80RF4MKkDGiz7xUTixI2UcBwQBCA/kQedKV9G28EH+1XfvePqmMivZjl+7VyHsgUVj9eRGA1XWFw59UPZG8a7VkxO/Eb3K9NF297HUAcFMcbY6cPFi9AaBgu3VC4eetDnoN/+xT1owiHi7BReQhGAy/6cdf7C/my5ehZwD"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0xMwWedkKosax9+7D2OlnMxFL/eV4CvFZLsbLptpXr"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiXXYkhRh+s7ixZ8rvG8ntIqd6FELQ9hh7HoaHQJRPU"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJFsZ7PMDt80tYXHyScQajNhqH4wuYg/o0OxfOHaZD4rXuT0VIKflKH1M9LslfHWIEH3XNeqhQOziH9r+Ny5JcM="
];
}

View file

@ -1,14 +1,9 @@
{ lib, pkgs, ... }: {
imports = [
./known-ssh-keys.nix
];
nixpkgs.overlays = import ../overlays;
nix.package = lib.mkDefault pkgs.lix;
services.openssh.enable = lib.mkForce true;
networking.nftables.enable = true;
networking.firewall.enable = true;
networking.firewall.logRefusedConnections = false;
networking.firewall.logReversePathDrops = true;
@ -18,56 +13,12 @@
recommendedTlsSettings = lib.mkDefault true;
recommendedProxySettings = lib.mkDefault true;
recommendedGzipSettings = lib.mkDefault true;
eventsConfig = ''
worker_connections 8192;
'';
appendConfig = ''
worker_rlimit_nofile 16384;
'';
};
nix.gc = {
automatic = true;
persistent = true;
dates = lib.mkDefault "daily";
dates = "daily";
options = "--delete-older-than 30d";
};
services.journald.extraConfig = "SystemMaxUse=512M";
boot.kernelParams = [
"panic=30" "boot.panic_on_fail"
];
boot.kernel.sysctl = {
# Set default TCP congestion control algorithm
"net.ipv4.tcp_congestion_control" = "bbr";
# Enable ECN
"net.ipv4.tcp_ecn" = 1;
# Enable TCP fast open
"net.ipv4.tcp_fastopen" = 3;
};
# reduce closure size, feel free to add your locale here
i18n.supportedLocales = [
"en_US.UTF-8/UTF-8"
"fr_FR.UTF-8/UTF-8"
];
time.timeZone = "UTC";
security.acme.acceptTerms = true;
security.acme.defaults.email = "infra@forkos.org";
# Enable system diffs.
system.activationScripts.system-diff = {
supportsDryActivation = true; # safe: only outputs to stdout
text = ''
if [ -e /run/current-system ]; then
PATH=$PATH:${pkgs.nix}/bin ${pkgs.nvd}/bin/nvd diff /run/current-system $systemConfig
fi
'';
};
}

View file

@ -1,32 +0,0 @@
# Taken from https://github.com/NixOS/infra/blob/master/channels.nix
{
# "Channel name" = {
# # This should be the <value> part of
# # https://hydra.forkos.org/job/<value>/latest-finished
# job = "project/jobset/jobname";
#
# # When adding a new version, determine if it needs to be tagged as a
# # variant -- for example:
# # nixos-xx.xx => primary
# # nixos-xx.xx-small => small
# # nixos-xx.xx-darwin => darwin
# # nixos-xx.xx-aarch64 => aarch64
# variant = "primary";
#
# # Channel Status:
# # '*-unstable' channels are always "rolling"
# # Otherwise a release generally progresses through the following phases:
# #
# # - Directly after branch off => "beta"
# # - Once the channel is released => "stable"
# # - Once the next channel is released => "deprecated"
# # - N months after the next channel is released => "unmaintained"
# # (check the release notes for when this should happen)
# status = "beta";
# };
"forkos-unstable" = {
job = "forkos/nixos-main/tested";
variant = "primary";
status = "rolling";
};
}

View file

@ -1,12 +1,9 @@
{
imports = [
./admins.nix
./base-server.nix
./hardening.nix
./nix.nix
./raito-proxy-aware-nginx.nix
./raito-vm.nix
./raito-proxy-aware-nginx.nix
./base-server.nix
./sysadmin
./zsh.nix
];
}

View file

@ -1,23 +0,0 @@
{ config, lib, ... }:
{
nix.settings.allowed-users = [ "root" ];
boot.specialFileSystems = lib.mkIf (!config.security.rtkit.enable && !config.security.polkit.enable) {
"/proc".options = [ "hidepid=2" ];
};
boot.kernel.sysctl."kernel.dmesg_restrict" = 1;
services.openssh = {
settings.PasswordAuthentication = false;
settings.KbdInteractiveAuthentication = false;
# prevents mutable /home/$user/.ssh/authorized_keys from being loaded to ensure that all user keys are config managed
authorizedKeysFiles = lib.mkForce [
"/etc/ssh/authorized_keys.d/%u"
];
};
users.mutableUsers = false;
}

View file

@ -1,6 +0,0 @@
{ ... }:
{
programs.ssh.knownHosts = {
"[cl.forkos.org]:29418".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM82mJ259C8Nc+BHHNBeRWXWhL3dfirQhmFbDAwHMle3";
};
}

View file

@ -1,21 +0,0 @@
{ lib, pkgs, ... }:
{
nix.extraOptions = ''
experimental-features = nix-command flakes
'';
# Provision a useful nixpkgs in NIX_PATH and flake registry on infra
# machines.
nixpkgs.flake = {
source = lib.cleanSource pkgs.path;
setNixPath = true;
setFlakeRegistry = true;
};
# Use our cache and trust its signing key. Still use cache.nixos.org as
# fallback.
nix.settings.substituters = [ "https://cache.forkos.org/" ];
nix.settings.trusted-public-keys = [
"cache.forkos.org:xfXIUJO1yiEITJmYsVmNDa9BFSlgTh/YqZ+4ei1EhQg="
];
}

View file

@ -1,10 +1,9 @@
# This enables an IPv6-only server which is proxied by kurisu.lahfa.xyz to have proper IPv4 logs via PROXY protocol.
{ config, lib, ... }:
let
inherit (lib) mkEnableOption mkIf concatStringsSep;
inherit (lib) mkEnableOption mkIf;
cfg = config.bagel.raito.v6-proxy-awareness;
# outside of raito infra inside of raito infra
allowedUpstreams = [ "2001:bc8:38ee::1/128" "2001:bc8:38ee:99::1/128" ];
allowedUpstream = "2001:bc8:38ee:99::1/128";
in
{
options.bagel.raito.v6-proxy-awareness.enable = mkEnableOption "the kurisu.lahfa.xyz's sniproxy awareness for NGINX";
@ -21,8 +20,8 @@ in
];
appendHttpConfig = ''
# Kurisu nodes
${concatStringsSep "\n" (map (up: "set_real_ip_from ${up};") allowedUpstreams)}
# Kurisu node
set_real_ip_from ${allowedUpstream};
real_ip_header proxy_protocol;
'';
};
@ -30,7 +29,7 @@ in
# Move to nftables if firewall is enabled.
networking.nftables.enable = true;
networking.firewall.extraInputRules = ''
${concatStringsSep "\n" (map (up: "ip6 saddr ${up} tcp dport 444 accept") allowedUpstreams)}
ip6 saddr ${allowedUpstream} tcp dport 444 accept
'';
};
}

View file

@ -30,6 +30,8 @@ in
config = mkIf cfg.enable {
services.qemuGuest.enable = true;
systemd.network.enable = true;
security.acme.defaults.email = "bagel-acme@lahfa.xyz";
security.acme.acceptTerms = true;
networking.useDHCP = lib.mkDefault false;
systemd.network.networks."10-nat-lan" = {

View file

@ -2,54 +2,19 @@
machines = {
bagel-box = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAsO4bNqY04uG13Pg3ubHfRDssTphDLzZ4YUniE5/p+M";
meta01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM5t9gYorOWgpCFDJgb24pyCKIabGpeI2H/UfdvXODcT";
public01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPBy8G8rfLA6E9i+t5kjVafxU1c2NXATXKxoXTH4Kgtm";
gerrit01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA+eSZu+u9sCynrMlsmFzQHLIELQAuVg0Cs1pBvwb4+A";
fodwatch = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFRyTNfvKl5FcSyzGzw+h+bNFNOxdhvI67WdUZ2iIJ1L";
buildbot = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJgIu6ouagYqBeMLfmn1CbaDJMuZcPH9bnUhkht8GfuB";
git = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEQJcpkCUOx8+5oukMX6lxrYcIX8FyHu8Mc/3+ieKMUn";
build-coord = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINpAEJP7F+XtJBpQP1jTzwXwQgJrFxwEJjPf/rnCXkJA";
builder-0 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBHSNcDGctvlG6BHcJuYIzW9WsBJsts2vpwSketsbXoL";
builder-1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIQOGUjERK7Mx8UPM/rbOdMqVyn1sbWqYOG6CbOzH2wm";
builder-2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMKzXIqCoYElEKIYgjbSpqEcDeOvV+Wo3Agq3jba83cB";
builder-3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGq0A5233XGt34T097KaEKBUqFvaa7a6nYZRsSO0166l";
builder-4 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB9dVo2xZhgIMDgB1rUj5ApmppL39BtYu/+OFHeduvXr";
builder-5 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE7vZTBxrVHmHpv7slQ8A8XwjjbfN+ZJA0V5C3k0wNBD";
builder-6 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOt1qR/2BRtc6PABuSBulowwJVO6wBNDyEFzh0qsTeOF";
builder-7 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFinAAw1v8TJB8/wcmTVBbHHc4LCYh6z4TO6ViwUPkoh";
builder-8 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKGSWHNeqT0kF/e4yVy2ieW98X5QMyCYIYZh9WTmQDs1";
builder-9 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOhws9zGgocVY36dMtOL+CXadpvRMffxoWMkfEcTBJm7";
builder-10 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE7sgIuTSqZiZhp8TvObSbIEhcHHsL5hcmYA22uzwxth";
wob-vpn-gw = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINVytPPW8XnXf/rD5TFzsw//CZc2lBjQLmDzlVGPZsjh";
};
users = {
delroth = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII3tjB4KYDok3KlWxdBp/yEmqhhmybd+w0VO4xUwLKKV" ];
emilylange = [ "no-touch-required sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIL7jgq3i+N3gVJhs4shm7Kmw6dIocs2OuR0GBMG1RxfKAAAABHNzaDo=" ];
hexchen = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINJ0tCxsEilAzV6LaNpUpcjzyEn4ptw8kFz3R+Z3YjEF hexchen@backup"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI3T1eFS77URHZ/HVWkMOqx7W1U54zJtn9C7QWsHOtyH72i/4EVj8SxYqLllElh1kuKUXSUipPeEzVsipFVvfH0wEuTDgFffiSQ3a8lfUgdEBuoySwceEoPgc5deapkOmiDIDeeWlrRe3nqspLRrSWU1DirMxoFPbwqJXRvpl6qJPxRg+2IolDcXlZ6yxB4Vv48vzRfVzZNUz7Pjmy2ebU8PbDoFWL/S3m7yOzQpv3L7KYBz7+rkjuF3AU2vy6CAfIySkVpspZZLtkTGCIJF228ev0e8NvhuN6ZnjzXxVTQOy32HCdPdbBbicu0uHfZ5O7JX9DjGd8kk1r2dnZwwy/ hexchen@yubi5"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4CLJ+mFfq5XiBXROKewmN9WYmj+79bj/AoaR6Iud2pirulot3tkrrLe2cMjiNWFX8CGVqrsAELKUA8EyUTJfStlcTE0/QNESTRmdDaC+lZL41pWUO9KOiD6/0axAhHXrSJ0ScvbqtD0CtpnCKKxtuOflVPoUGZsH9cLKJNRKfEka0H0GgeKb5Tp618R/WNAQOwaCcXzg/nG4Bgv3gJW4Nm9IKy/MwRZqtILi8Mtd+2diTqpMwyNRmbenmRHCQ1vRw46joYkledVqrmSlfSMFgIHI1zRSBXb/JkG2IvIyB5TGbTkC4N2fqJNpH8wnCKuOvs46xmgdiRA26P48C2em3 hexchen@yubi5c"
];
raito = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICaw9ihTG7ucB8P38XdalEWev8+q96e2yNm4B+/I9IJp" ];
k900 = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOi9vgVGs+S5kEsUqHPvyMMh1Q9gqL4TcbHoe5d73tun" ];
maxine = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILpWQfhNFdrxMTP/1DwBVuk49f3df9iH7Tbdu8ltIKjr" ];
jade = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNldAg4t13/i69TD786The+U3wbiNUdW2Kc9KNWvEhgpf4y4x4Sft0oYfkPw5cjX4H3APqfD+b7ItAG0GCbwHw6KMYPoVMNK08zBMJUqt1XExbqGeFLqBaeqDsmEAYXJRbjMTAorpOCtgQdoCKK/DvZ51zUWXxT8UBNHSl19Ryv5Ry5VVdbAE35rqs57DQ9+ma6htXnsBEmmnC+1Zv1FE956m/OpBTId50mor7nS2FguAtPZnDPpTd5zl9kZmJEuWCrmy6iinw5V4Uy1mLeZkQv+/FtozbyifCRCvps9nHpv4mBSU5ABLgnRRvXs+D41Jx7xloNADr1nNgpsNrYaTh hed-bot-ssh-tpm-rsa"
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIKYljH8iPMrH00lOb3ETxRrZimdKzPPEdsJQ5D5ovtOwAAAACnNzaDpzc2hrZXk= ssh:sshkey"
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBO4idMfdJxDJuBNOid60d4I+qxj09RHt+YkCYV2eXt6tGrEXg+S8hTQusy/SqooiXUH9pt4tea2RuBPN9+UwrH0= type-a yubikey slot 9a"
];
janik = [
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIJ4yq7oHBO2iPs4xj797a//0ypnBr27sSadKUeL2NsK6AAAABHNzaDo="
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIOYg513QZsVzoyVycXZjg4F3T3+OwtcY3WAhrlfyLgLTAAAABHNzaDo="
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLZxVITpJ8xbiCa/u2gjSSIupeiqOnRh+8tFIoVhCON"
];
k900 = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOi9vgVGs+S5kEsUqHPvyMMh1Q9gqL4TcbHoe5d73tun" ];
lukegb = [ ''cert-authority,principals="lukegb" ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEqNOwlR7Qa8cbGpDfSCOweDPbAGQOZIcoRgh6s/J8DR'' ];
maxine = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILpWQfhNFdrxMTP/1DwBVuk49f3df9iH7Tbdu8ltIKjr" ];
raito = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICaw9ihTG7ucB8P38XdalEWev8+q96e2yNm4B+/I9IJp"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcEkYM1r8QVNM/G5CxJInEdoBCWjEHHDdHlzDYNSUIdHHsn04QY+XI67AdMCm8w30GZnLUIj5RiJEWXREUApby0GrfxGGcy8otforygfgtmuUKAUEHdU2MMwrQI7RtTZ8oQ0USRGuqvmegxz3l5caVU7qGvBllJ4NUHXrkZSja2/51vq80RF4MKkDGiz7xUTixI2UcBwQBCA/kQedKV9G28EH+1XfvePqmMivZjl+7VyHsgUVj9eRGA1XWFw59UPZG8a7VkxO/Eb3K9NF297HUAcFMcbY6cPFi9AaBgu3VC4eetDnoN/+xT1owiHi7BReQhGAy/6cdf7C/my5ehZwD"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0xMwWedkKosax9+7D2OlnMxFL/eV4CvFZLsbLptpXr"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiXXYkhRh+s7ixZ8rvG8ntIqd6FELQ9hh7HoaHQJRPU"
];
thubrecht = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM1jpXR7BWQa7Sed7ii3SbvIPRRlKb3G91qC0vOwfJn" ];
yuka = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKath4/fDnlv/4fzxkPrQN1ttmoPRNu/m9bEtdPJBDfY cardno:16_933_242" ];
};
}

View file

@ -20,10 +20,6 @@ in
bcc
tcpdump
ncdu
# Useful to invoke `coredumpctl gdb`
gdb
htop
btop
];
] ++ lib.optional (lib.hasAttr "pwru" pkgs) pkgs.pwru;
};
}

View file

@ -1,16 +0,0 @@
{ lib, pkgs, config, ... }: {
users.defaultUserShell = pkgs.zsh;
programs.zsh = {
enable = true;
enableCompletion = true;
autosuggestions.enable = true;
interactiveShellInit = ''
${lib.getExe pkgs.nix-your-shell} zsh | source /dev/stdin
'';
promptInit = ''
# https://grml.org/zsh/grml-zsh-refcard.pdf
source ${pkgs.grml-zsh-config}/etc/zsh/zshrc
PS1='%n@${config.networking.fqdn} %/ \$ '
'';
};
}

View file

@ -1,43 +0,0 @@
{ gerrit-dashboard, stdenv, symlinkJoin, jsonnet, fetchFromGitHub, lib, ... }:
let
inherit (lib) concatMapStringsSep;
datasource-id = "mimir";
in
rec {
grafonnet = fetchFromGitHub {
owner = "grafana";
repo = "grafonnet-lib";
# TODO: figure out how to read the jsonnet lockfile
# and propagate this a bit cleverly.
rev = "a1d61cce1da59c71409b99b5c7568511fec661ea";
hash = "sha256-fs5JZJbcL6sQXBjYhp5eeRtjTFw0J1O/BcwBC8Vm9EM=";
};
buildJsonnetDashboards = dashboardSrc: targets: stdenv.mkDerivation {
name = "jsonnet-grafana-dashboards";
src = dashboardSrc;
buildInputs = [ jsonnet ];
buildPhase = ''
runHook preBuild
mkdir -p $out
${concatMapStringsSep "\n" (target: "jsonnet -J ${grafonnet} --ext-str datasource=${datasource-id} --ext-code publish=true $src/${target} > $out/${baseNameOf target}.json") targets}
runHook postBuild
'';
};
allDashboards = symlinkJoin {
name = "all-jsonnet-dashboards";
paths = [
(buildJsonnetDashboards gerrit-dashboard [
"dashboards/gerrit/caches/gerrit-caches.jsonnet"
"dashboards/gerrit/fetch-clone/gerrit-fetch-clone.jsonnet"
"dashboards/gerrit/fetch-clone/gerrit-phases.jsonnet"
"dashboards/gerrit/healthcheck/gerrit-healthcheck.jsonnet"
"dashboards/gerrit/latency/gerrit-push-latency.jsonnet"
"dashboards/gerrit/latency/gerrit-ui-actions-latency.jsonnet"
"dashboards/gerrit/overview/gerrit-overview.jsonnet"
"dashboards/gerrit/process/gerrit-process.jsonnet"
"dashboards/gerrit/queues/gerrit-queues.jsonnet"
])
];
};
}

View file

@ -10,11 +10,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1723293904,
"narHash": "sha256-b+uqzj+Wa6xgMS9aNbX4I+sXeb5biPDi39VgvSFqFvU=",
"lastModified": 1718371084,
"narHash": "sha256-abpBi61mg0g+lFFU0zY4C6oP6fBwPzbHPKBGw676xsA=",
"owner": "ryantm",
"repo": "agenix",
"rev": "f6291c5935fdc4e0bef208cfc0dcab7e3f7a1c41",
"rev": "3a56735779db467538fb2e577eda28a9daacaca6",
"type": "github"
},
"original": {
@ -23,29 +23,6 @@
"type": "github"
}
},
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat_2",
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1711742460,
"narHash": "sha256-0O4v6e4a1toxXZ2gf5INhg4WPE5C5T+SVvsBt+45Mcc=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "4dbdbee45728d8ce5788db6461aaaa89d98081f0",
"type": "github"
},
"original": {
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"bats-assert": {
"flake": false,
"locked": {
@ -78,49 +55,6 @@
"type": "github"
}
},
"buildbot-nix": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1722939563,
"narHash": "sha256-lMe8aXgF550iQLRaoU+yn8yYQ4x2qiyqANgsFyjfWwA=",
"ref": "refs/heads/non-flakes",
"rev": "4a162a8aa5dad6cecdb33bd8534e67e0bdaeb13f",
"revCount": 295,
"type": "git",
"url": "https://git.lix.systems/lix-project/buildbot-nix.git"
},
"original": {
"ref": "refs/heads/non-flakes",
"type": "git",
"url": "https://git.lix.systems/lix-project/buildbot-nix.git"
}
},
"channel-scripts": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1725128016,
"narHash": "sha256-4TvaXELsl+1OcGNgqB/5HVXVxBvdIQkhJsY4FyiDcNU=",
"ref": "refs/heads/main",
"rev": "23b6c38ed7e11417bf624f6e4fb6cde0d2be6400",
"revCount": 261,
"type": "git",
"url": "https://git.lix.systems/the-distro/channel-scripts.git"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/the-distro/channel-scripts.git"
}
},
"colmena": {
"inputs": {
"flake-compat": "flake-compat",
@ -144,50 +78,6 @@
"type": "github"
}
},
"crane": {
"inputs": {
"nixpkgs": [
"grapevine",
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1702918879,
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
"owner": "ipetkov",
"repo": "crane",
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"crane_2": {
"inputs": {
"nixpkgs": [
"grapevine",
"nixpkgs"
]
},
"locked": {
"lastModified": 1716569590,
"narHash": "sha256-5eDbq8TuXFGGO3mqJFzhUbt5zHVTf5zilQoyW5jnJwo=",
"owner": "ipetkov",
"repo": "crane",
"rev": "109987da061a1bf452f435f1653c47511587d919",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"darwin": {
"inputs": {
"nixpkgs": [
@ -210,29 +100,6 @@
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"grapevine",
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1716359173,
"narHash": "sha256-pYcjP6Gy7i6jPWrjiWAVV0BCQp+DdmGaI/k65lBb/kM=",
"owner": "nix-community",
"repo": "fenix",
"rev": "b6fc5035b28e36a98370d0eac44f4ef3fd323df6",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "main",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@ -250,39 +117,6 @@
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"ref": "master",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_4": {
"flake": false,
"locked": {
"lastModified": 1696426674,
@ -298,49 +132,6 @@
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"buildbot-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1706830856,
"narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"hydra",
"nix-eval-jobs",
"nixpkgs"
]
},
"locked": {
"lastModified": 1722555600,
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1659877975,
@ -357,40 +148,6 @@
}
},
"flake-utils_2": {
"locked": {
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_4": {
"locked": {
"lastModified": 1634851050,
"narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=",
@ -405,51 +162,6 @@
"type": "github"
}
},
"gerrit-dashboard": {
"flake": false,
"locked": {
"lastModified": 1724509518,
"narHash": "sha256-fwYXZVddxfzrlDa3QnFCwHqrbEX+3PrWy0QOlbO+8jk=",
"ref": "refs/heads/master",
"rev": "e544abac81c581558d68abb2a8dd583049073939",
"revCount": 75,
"type": "git",
"url": "https://git.lix.systems/the-distro/gerrit-monitoring.git"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/the-distro/gerrit-monitoring.git"
}
},
"grapevine": {
"inputs": {
"attic": "attic",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils_3",
"nix-filter": "nix-filter",
"nixpkgs": [
"nixpkgs"
],
"rust-manifest": "rust-manifest"
},
"locked": {
"host": "gitlab.computer.surgery",
"lastModified": 1723576377,
"narHash": "sha256-sTa4XT5xMQkhhLknOfVd433YS1TvkMrE45qAsI1ZB6U=",
"owner": "matrix",
"repo": "grapevine-fork",
"rev": "3b99032456700d06dd937db6a85976a8be9d4fa7",
"type": "gitlab"
},
"original": {
"host": "gitlab.computer.surgery",
"owner": "matrix",
"repo": "grapevine-fork",
"type": "gitlab"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
@ -473,18 +185,17 @@
},
"hydra": {
"inputs": {
"lix": "lix",
"nix-eval-jobs": "nix-eval-jobs",
"nix": "nix",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1724616313,
"narHash": "sha256-9syppf9Gm/6F4wQQAbsf7rGY1DooMsprnsEY/0eaewg=",
"lastModified": 1719258100,
"narHash": "sha256-Eu8ausj0RsXV5MraCPezwX+j51iZD0ukif110Yj2+6k=",
"ref": "refs/heads/main",
"rev": "44b9a7b95d23e7a8587cb963f00382046707f2db",
"revCount": 4202,
"rev": "a9a2679793a17325c966dec4cbb27d44b0531694",
"revCount": 4172,
"type": "git",
"url": "https://git.lix.systems/lix-project/hydra.git"
},
@ -493,9 +204,9 @@
"url": "https://git.lix.systems/lix-project/hydra.git"
}
},
"lix": {
"nix": {
"inputs": {
"flake-compat": "flake-compat_4",
"flake-compat": "flake-compat_2",
"nix2container": "nix2container",
"nixpkgs": [
"hydra",
@ -505,113 +216,27 @@
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1723919517,
"narHash": "sha256-D6+zmRXzr85p7riphuIrJQqangoJe70XM5jHhMWwXws=",
"lastModified": 1719211568,
"narHash": "sha256-oIgmvhe3CV/36LC0KXgqWnKXma39wabks8U9JBMDfO4=",
"ref": "refs/heads/main",
"rev": "278fddc317cf0cf4d3602d0ec0f24d1dd281fadb",
"revCount": 16138,
"rev": "4c3d93611f2848c56ebc69c85f2b1e18001ed3c7",
"revCount": 15877,
"type": "git",
"url": "https://git.lix.systems/lix-project/lix"
"url": "https://git@git.lix.systems/lix-project/lix"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/lix-project/lix"
}
},
"nix-eval-jobs": {
"inputs": {
"flake-parts": "flake-parts_2",
"lix": [
"hydra",
"lix"
],
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"hydra",
"nixpkgs"
],
"treefmt-nix": "treefmt-nix_2"
},
"locked": {
"lastModified": 1723579251,
"narHash": "sha256-xnHtfw0gRhV+2S9U7hQwvp2klTy1Iv7FlMMO0/WiMVc=",
"ref": "refs/heads/main",
"rev": "42a160bce2fd9ffebc3809746bc80cc7208f9b08",
"revCount": 609,
"type": "git",
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
}
},
"nix-filter": {
"locked": {
"lastModified": 1710156097,
"narHash": "sha256-1Wvk8UP7PXdf8bCCaEoMnOT1qe5/Duqgj+rL8sRQsSM=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "3342559a24e85fc164b295c3444e8a139924675b",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "nix-filter",
"type": "github"
}
},
"nix-gerrit": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1720891381,
"narHash": "sha256-bdZRPgnkROSejmwMOrlcqHMWmuPIVIzjk6r5FbS+fqU=",
"ref": "refs/heads/main",
"rev": "23dd318e6741ff686d3069c53ecf475eac8a0565",
"revCount": 5,
"type": "git",
"url": "https://git.lix.systems/the-distro/nix-gerrit.git"
},
"original": {
"type": "git",
"url": "https://git.lix.systems/the-distro/nix-gerrit.git"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"hydra",
"nix-eval-jobs",
"nixpkgs"
]
},
"locked": {
"lastModified": 1720066371,
"narHash": "sha256-uPlLYH2S0ACj0IcgaK9Lsf4spmJoGejR9DotXiXSBZQ=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "622f829f5fe69310a866c8a6cd07e747c44ef820",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
"url": "https://git@git.lix.systems/lix-project/lix"
}
},
"nix2container": {
"flake": false,
"locked": {
"lastModified": 1720642556,
"narHash": "sha256-qsnqk13UmREKmRT7c8hEnz26X3GFFyIQrqx4EaRc1Is=",
"lastModified": 1712990762,
"narHash": "sha256-hO9W3w7NcnYeX8u8cleHiSpK2YJo7ecarFTUlbybl7k=",
"owner": "nlewo",
"repo": "nix2container",
"rev": "3853e5caf9ad24103b13aa6e0e8bcebb47649fe4",
"rev": "20aad300c925639d5d6cbe30013c8357ce9f2a2e",
"type": "github"
},
"original": {
@ -622,11 +247,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1711401922,
"narHash": "sha256-QoQqXoj8ClGo0sqD/qWKFWezgEwUL0SUh37/vY2jNhc=",
"lastModified": 1719082008,
"narHash": "sha256-jHJSUH619zBQ6WdC21fFAlDxHErKVDJ5fpN0Hgx4sjs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "07262b18b97000d16a4bdb003418bd2fb067a932",
"rev": "9693852a2070b398ee123a329e68f0dab5526681",
"type": "github"
},
"original": {
@ -652,34 +277,17 @@
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1711460390,
"narHash": "sha256-akSgjDZL6pVHEfSE6sz1DNSXuYX6hq+P/1Z5IoYWs7E=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "44733514b72e732bd49f5511bd0203dea9b9a434",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1723221148,
"narHash": "sha256-7pjpeQlZUNQ4eeVntytU3jkw9dFK3k1Htgk2iuXjaD8=",
"owner": "NixOS",
"lastModified": 1636823747,
"narHash": "sha256-oWo1nElRAOZqEf90Yek2ixdHyjD+gqtS/pAgwaQ9UhQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "154bcb95ad51bc257c2ce4043a725de6ca700ef6",
"rev": "f6a2ed2082d9a51668c86ba27d0b5496f7a2ea93",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
@ -687,11 +295,11 @@
"pre-commit-hooks": {
"flake": false,
"locked": {
"lastModified": 1721042469,
"narHash": "sha256-6FPUl7HVtvRHCCBQne7Ylp4p+dpP3P/OYuzjztZ4s70=",
"lastModified": 1712055707,
"narHash": "sha256-4XLvuSIDZJGS17xEwSrNuJLL7UjDYKGJSbK1WWX2AK8=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "f451c19376071a90d8c58ab1a953c6e9840527fd",
"rev": "e35aed5fda3cc79f88ed7f1795021e559582093a",
"type": "github"
},
"original": {
@ -703,50 +311,16 @@
"root": {
"inputs": {
"agenix": "agenix",
"buildbot-nix": "buildbot-nix",
"channel-scripts": "channel-scripts",
"colmena": "colmena",
"gerrit-dashboard": "gerrit-dashboard",
"grapevine": "grapevine",
"hydra": "hydra",
"lix": [
"hydra",
"lix"
"nix"
],
"nix-gerrit": "nix-gerrit",
"nixpkgs": "nixpkgs_2",
"nixpkgs": "nixpkgs",
"terranix": "terranix"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1716107283,
"narHash": "sha256-NJgrwLiLGHDrCia5AeIvZUHUY7xYGVryee0/9D3Ir1I=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "21ec8f523812b88418b2bfc64240c62b3dd967bd",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-manifest": {
"flake": false,
"locked": {
"narHash": "sha256-aZFye4UrtlcvLHrISldx4g9uGt3thDbVlLMK5keBSj0=",
"type": "file",
"url": "https://static.rust-lang.org/dist/channel-rust-1.78.0.toml"
},
"original": {
"type": "file",
"url": "https://static.rust-lang.org/dist/channel-rust-1.78.0.toml"
}
},
"stable": {
"locked": {
"lastModified": 1696039360,
@ -778,29 +352,12 @@
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"terranix": {
"inputs": {
"bats-assert": "bats-assert",
"bats-support": "bats-support",
"flake-utils": "flake-utils_4",
"nixpkgs": [
"nixpkgs"
],
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_2",
"terranix-examples": "terranix-examples"
},
"locked": {
@ -831,49 +388,6 @@
"repo": "terranix-examples",
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"buildbot-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1708897213,
"narHash": "sha256-QECZB+Hgz/2F/8lWvHNk05N6NU/rD9bWzuNn6Cv8oUk=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "e497a9ddecff769c2a7cbab51e1ed7a8501e7a3a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"treefmt-nix_2": {
"inputs": {
"nixpkgs": [
"hydra",
"nix-eval-jobs",
"nixpkgs"
]
},
"locked": {
"lastModified": 1723454642,
"narHash": "sha256-S0Gvsenh0II7EAaoc9158ZB4vYyuycvMGKGxIbERNAM=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "349de7bc435bdff37785c2466f054ed1766173be",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
}
},
"root": "root",

201
flake.nix
View file

@ -3,9 +3,7 @@
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
terranix.url = "github:terranix/terranix";
terranix.inputs.nixpkgs.follows = "nixpkgs";
agenix.url = "github:ryantm/agenix";
agenix.inputs.nixpkgs.follows = "nixpkgs";
@ -16,124 +14,111 @@
hydra.url = "git+https://git.lix.systems/lix-project/hydra.git";
hydra.inputs.nixpkgs.follows = "nixpkgs";
nix-gerrit.url = "git+https://git.lix.systems/the-distro/nix-gerrit.git";
nix-gerrit.inputs.nixpkgs.follows = "nixpkgs";
gerrit-dashboard.url = "git+https://git.lix.systems/the-distro/gerrit-monitoring.git";
gerrit-dashboard.flake = false;
buildbot-nix.url = "git+https://git.lix.systems/lix-project/buildbot-nix.git?ref=refs/heads/non-flakes";
buildbot-nix.inputs.nixpkgs.follows = "nixpkgs";
channel-scripts.url = "git+https://git.lix.systems/the-distro/channel-scripts.git";
channel-scripts.inputs.nixpkgs.follows = "nixpkgs";
lix.follows = "hydra/lix";
grapevine = {
type = "gitlab";
host = "gitlab.computer.surgery";
owner = "matrix";
repo = "grapevine-fork";
inputs.nixpkgs.follows = "nixpkgs";
};
lix.follows = "hydra/nix";
};
outputs = { self, nixpkgs, terranix, colmena, ... } @ inputs:
outputs = { self, nixpkgs, terranix, ... } @ inputs:
let
supportedSystems = [ "x86_64-linux" "aarch64-linux" ];
forEachSystem = f: builtins.listToAttrs (map (system: {
name = system;
value = f system;
}) supportedSystems);
systemBits = forEachSystem (system: rec {
system = "x86_64-linux";
pkgs = import nixpkgs {
localSystem = system;
overlays = [
inputs.hydra.overlays.default
inputs.lix.overlays.default
];
};
lib = pkgs.lib;
terraform = pkgs.opentofu;
terraformCfg = terranix.lib.terranixConfiguration {
inherit system;
pkgs = import nixpkgs {
modules = [ ];
};
in
{
apps.${system} = {
apply = {
type = "app";
program = toString (pkgs.writers.writeBash "apply" ''
set -eo pipefail
rm -f config.tf.json
cp ${terraformCfg} config.tf.json
${lib.getExe terraform} init
${lib.getExe terraform} apply
'');
};
# nix run ".#destroy"
destroy = {
type = "app";
program = toString (pkgs.writers.writeBash "destroy" ''
set -eo pipefail
ln -snf ${terraformCfg} config.tf.json
${lib.getExe terraform} init
${lib.getExe terraform} destroy
'');
};
};
apps.${system}.default = self.apps.${system}.apply;
devShells.${system}.default = pkgs.mkShell {
packages = [
inputs.agenix.packages.${system}.agenix
inputs.colmena.packages.${system}.colmena
];
};
colmena = {
meta.nixpkgs = import nixpkgs {
localSystem = system;
overlays = [
inputs.hydra.overlays.default
inputs.lix.overlays.default
inputs.nix-gerrit.overlays.default
inputs.channel-scripts.overlays.default
];
};
terraform = pkgs.opentofu;
terraformCfg = terranix.lib.terranixConfiguration {
inherit system;
modules = [
./terraform
{
bagel.dnsimple.enable = true;
bagel.gandi.enable = true;
bagel.hydra.enable = true;
}
];
};
});
forEachSystem' = f: forEachSystem (system: (f systemBits.${system}));
inherit (nixpkgs) lib;
in
{
apps = forEachSystem' ({ system, pkgs, terraformCfg, terraform, ... }: {
tf = {
type = "app";
program = toString (pkgs.writers.writeBash "tf" ''
set -eo pipefail
ln -snf ${terraformCfg} config.tf.json
exec ${lib.getExe terraform} "$@"
'');
};
default = self.apps.${system}.tf;
});
devShells = forEachSystem' ({ system, pkgs, ... }: {
default = pkgs.mkShell {
packages = [
inputs.agenix.packages.${system}.agenix
pkgs.opentofu
(pkgs.callPackage ./lib/colmena-wrapper.nix { })
];
};
});
nixosConfigurations = (colmena.lib.makeHive self.outputs.colmena).nodes;
colmena = let
commonModules = [
inputs.agenix.nixosModules.default
inputs.hydra.nixosModules.hydra
inputs.buildbot-nix.nixosModules.buildbot-coordinator
inputs.buildbot-nix.nixosModules.buildbot-worker
./services
./common
];
makeBuilder = i: lib.nameValuePair "builder-${toString i}" {
imports = commonModules;
bagel.baremetal.builders = { enable = true; num = i; netboot = i >= 6; };
};
builders = lib.listToAttrs (lib.genList makeBuilder 11);
in {
meta.nixpkgs = systemBits.x86_64-linux.pkgs;
meta.specialArgs.inputs = inputs;
bagel-box.imports = commonModules ++ [ ./hosts/bagel-box ];
meta01.imports = commonModules ++ [ ./hosts/meta01 ];
gerrit01.imports = commonModules ++ [ ./hosts/gerrit01 ];
fodwatch.imports = commonModules ++ [ ./hosts/fodwatch ];
git.imports = commonModules ++ [ ./hosts/git ];
wob-vpn-gw.imports = commonModules ++ [ ./hosts/wob-vpn-gw ];
buildbot.imports = commonModules ++ [ ./hosts/buildbot ];
public01.imports = commonModules ++ [ ./hosts/public01 ];
build-coord.imports = commonModules ++ [ ./hosts/build-coord ];
} // builders;
bagel-box = {
imports = [
inputs.agenix.nixosModules.default
inputs.hydra.nixosModules.hydra
hydraJobs = builtins.mapAttrs (n: v: v.config.system.build.netbootDir or v.config.system.build.toplevel) self.nixosConfigurations;
buildbotJobs = builtins.mapAttrs (_: v: v.config.system.build.toplevel) self.nixosConfigurations;
./services
./common
./hosts/bagel-box
];
};
meta01 = {
imports = [
inputs.agenix.nixosModules.default
inputs.hydra.nixosModules.hydra
./services
./common
./hosts/meta01.nixpkgs.lahfa.xyz
];
};
gerrit01 = {
imports = [
inputs.agenix.nixosModules.default
inputs.hydra.nixosModules.hydra
./services
./common
./hosts/cl.forkos.org
];
};
fodwatch = {
imports = [
inputs.agenix.nixosModules.default
inputs.hydra.nixosModules.hydra
./services
./common
./hosts/fodwatch.forkos.org
];
};
};
};
}

View file

@ -20,7 +20,6 @@
useHostResolvConf = false;
hostName = "bagel-box";
domain = "infra.forkos.org";
nameservers = [ "2001:4860:4860::8844" ];
interfaces.host0.ipv6.addresses = [
@ -37,13 +36,16 @@
bagel.services = {
postgres.enable = true;
ofborg.enable = true;
};
bagel.sysadmin.enable = true;
hydra.enable = true;
hydra.dbi = "dbi:Pg:dbname=hydra;user=hydra";
};
bagel.meta.monitoring.address = "bagel-box.delroth.net";
security.acme.acceptTerms = true;
security.acme.defaults.email = "bagel@delroth.net";
services.openssh.enable = true;
system.stateVersion = "24.11";
deployment.targetHost = "bagel-box.infra.forkos.org";
deployment.targetHost = "bagel-box.delroth.net";
}

View file

@ -1,21 +0,0 @@
{ lib, ... }:
{
imports = [ ./hardware.nix ];
networking.hostName = "build-coord";
networking.domain = "wob01.infra.forkos.org";
bagel.sysadmin.enable = true;
bagel.services = {
hydra.enable = true;
# Takes 10 builders (0 → 9).
hydra.builders = lib.genList (i: "builder-${builtins.toString i}") 10;
};
# Hydra is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
system.stateVersion = "24.05";
deployment.targetHost = "build-coord.wob01.infra.forkos.org";
}

View file

@ -1,87 +0,0 @@
{
boot.initrd.availableKernelModules = [ "ahci" "ehci_pci" "usb_storage" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = true;
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.initrd.systemd.enable = true;
boot.initrd.services.lvm.enable = true;
boot.kernelParams = [
"console=tty1"
"console=ttyS0,115200"
];
fileSystems = {
"/" = {
device = "/dev/disk/by-label/root";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-label/BOOT";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
};
swapDevices = [
{
device = "/swapfile";
size = 20 * 1024; # 50GiB
}
];
zramSwap = {
enable = true;
memoryPercent = 25;
};
networking.useNetworkd = true;
systemd.network = {
netdevs = {
"40-uplink" = {
netdevConfig = {
Kind = "bond";
Name = "uplink";
};
bondConfig = {
Mode = "802.3ad";
TransmitHashPolicy = "layer3+4";
};
};
};
networks = {
"40-eno1" = {
name = "eno1";
bond = [ "uplink" ];
};
"40-eno2" = {
name = "eno2";
bond = [ "uplink" ];
};
};
};
networking.interfaces.uplink.ipv6.addresses = [
{ address = "2a01:584:11::1:11"; prefixLength = 64; }
];
networking.defaultGateway6 = { interface = "uplink"; address = "2a01:584:11::1"; };
services.coredns = {
enable = true;
config = ''
. {
bind lo
forward . 2001:4860:4860::6464
template ANY A { rcode NOERROR }
}
'';
};
services.resolved.enable = false;
networking.resolvconf.useLocalResolver = true;
}

View file

@ -1,36 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
networking.hostName = "buildbot";
# TODO: make it the default
networking.domain = "infra.forkos.org";
bagel.sysadmin.enable = true;
# Buildbot is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
bagel.hardware.raito-vm = {
enable = true;
networking = {
nat-lan-mac = "BC:24:11:E7:42:8B";
wan = {
address = "2001:bc8:38ee:100:1000::50/64";
mac = "BC:24:11:C9:BA:6C";
};
};
};
bagel.services.buildbot = {
enable = true;
domain = "buildbot.forkos.org";
builders = [ "builder-10" ];
};
i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05";
deployment.targetHost = "buildbot.infra.forkos.org";
}

46
hosts/cl.forkos.org/default.nix Executable file
View file

@ -0,0 +1,46 @@
{
config,
lib,
pkgs,
...
}:
{
networking.hostName = "gerrit01";
# TODO: make it the default
networking.domain = "infra.forkos.org";
time.timeZone = "Europe/Paris";
bagel.sysadmin.enable = true;
# Gerrit is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
bagel.hardware.raito-vm = {
enable = true;
networking = {
nat-lan-mac = "bc:24:11:f7:29:6c";
wan = {
address = "2001:bc8:38ee:100:1000::10/64";
mac = "bc:24:11:4a:9d:32";
};
};
};
bagel.meta.monitoring.address = "gerrit01.infra.forkos.org";
fileSystems."/gerrit-data" = {
device = "/dev/disk/by-uuid/d1062305-0dea-4740-9a27-b6b1691862a4";
fsType = "ext4";
};
bagel.services.gerrit = {
enable = true;
domains = [
"cl.forkos.org"
];
data = "/gerrit-data";
};
i18n.defaultLocale = "fr_FR.UTF-8";
system.stateVersion = "24.05";
deployment.targetHost = "gerrit01.infra.forkos.org";
}

View file

@ -8,6 +8,8 @@
networking.hostName = "fodwatch";
networking.domain = "infra.forkos.org";
time.timeZone = "Europe/Paris";
bagel.sysadmin.enable = true;
# Fodwatch will be proxied.
bagel.raito.v6-proxy-awareness.enable = true;
@ -22,6 +24,8 @@
};
};
bagel.meta.monitoring.address = "fodwatch.infra.forkos.org";
i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05";

View file

@ -1,148 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
networking.hostName = "gerrit01";
# TODO: make it the default
networking.domain = "infra.forkos.org";
bagel.sysadmin.enable = true;
# Gerrit is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
bagel.hardware.raito-vm = {
enable = true;
networking = {
nat-lan-mac = "bc:24:11:f7:29:6c";
wan = {
address = "2001:bc8:38ee:100:1000::10/64";
mac = "bc:24:11:4a:9d:32";
};
};
};
fileSystems."/gerrit-data" = {
device = "/dev/disk/by-uuid/d1062305-0dea-4740-9a27-b6b1691862a4";
fsType = "ext4";
};
bagel.services.gerrit = {
enable = true;
pyroscope.enable = true;
domains = [
"cl.forkos.org"
];
canonicalDomain = "cl.forkos.org";
data = "/gerrit-data";
};
age.secrets.ows-deploy-key = {
file = ../../secrets/ows-deploy-key.age;
mode = "0600";
owner = "git";
group = "git";
};
bagel.nixpkgs.one-way-sync =
let
mkNixpkgsJob = { timer, fromRefspec, localRefspec ? fromRefspec }: {
fromUri = "https://github.com/NixOS/nixpkgs";
inherit fromRefspec localRefspec timer;
};
mkLocalJob = { timer, fromRefspec, localRefspec }: {
fromUri = "https://cl.forkos.org/nixpkgs";
inherit fromRefspec localRefspec timer;
};
in
{
enable = true;
pushUrl = "ssh://ows_bot@cl.forkos.org:29418/nixpkgs";
deployKeyPath = config.age.secrets.ows-deploy-key.path;
# Sync main -> staging-next -> staging
branches."main-to-staging-next" = mkLocalJob {
timer = "00/8:20:00"; # every 8 hours, 20 minutes past the full hour
fromRefspec = "main";
localRefspec = "staging-next";
};
branches."staging-next-to-staging" = mkLocalJob {
timer = "00/8:40:00"; # every 8 hours, 40 minutes past the full hour
fromRefspec = "staging-next";
localRefspec = "staging";
};
# Sync nixpkgs -> fork
branches."nixpkgs-master" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "master";
localRefspec = "main";
};
branches."nixpkgs-staging" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "staging";
};
branches."nixpkgs-release-24.05" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "release-24.05";
};
branches."nixpkgs-staging-24.05" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "staging-24.05";
};
branches."nixpkgs-release-23.11" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "release-23.11";
};
branches."nixpkgs-staging-23.11" = mkNixpkgsJob {
timer = "hourly";
fromRefspec = "staging-23.11";
};
# Testing jobs for personal sandbox branches
branches."raito-unstable-sync" = {
fromUri = "https://github.com/NixOS/nixpkgs";
fromRefspec = "nixos-unstable-small";
localRefspec = "sandbox/raito/raito-unstable-small";
timer = "*-*-* 12:00:00";
};
branches."raito-release-sync" = {
fromUri = "https://github.com/NixOS/nixpkgs";
fromRefspec = "nixos-24.05";
localRefspec = "sandbox/raito/raito-nixos-24.05";
timer = "daily";
};
};
age.secrets.s3-channel-staging-keys.file = ../../secrets/s3-channel-staging-keys.age;
bagel.nixpkgs.channel-scripts = {
enable = true;
otlp.enable = true;
nixpkgsUrl = "https://cl.forkos.org/nixpkgs.git";
hydraUrl = "https://hydra.forkos.org";
binaryCacheUrl = "https://cache.forkos.org";
baseUriForGitRevisions = "https://cl.forkos.org/plugins/gitiles/nixpkgs/+";
s3 = {
release = "bagel-channel-scripts-test";
channel = "bagel-channel-scripts-test";
};
releaseBucketCredentialsFile = config.age.secrets.s3-channel-staging-keys.path;
deployKeyFile = config.age.secrets.priv-ssh-key.path;
extraArgs = [
"--bypass-preflight-checks"
];
channels = import ../../common/channels.nix;
};
i18n.defaultLocale = "fr_FR.UTF-8";
system.stateVersion = "24.05";
deployment.targetHost = "gerrit01.infra.forkos.org";
}

View file

@ -1,47 +0,0 @@
let
ipv6 = {
openssh ="2001:bc8:38ee:100:1000::41";
forgejo = "2001:bc8:38ee:100:1000::40";
};
in
{
networking.hostName = "git";
networking.domain = "infra.forkos.org";
bagel.sysadmin.enable = true;
# Forgejo will be proxied.
bagel.raito.v6-proxy-awareness.enable = true;
bagel.hardware.raito-vm = {
enable = true;
networking = {
nat-lan-mac = "BC:24:11:83:71:56";
wan = {
address = "${ipv6.forgejo}/64";
mac = "BC:24:11:0B:8A:81";
};
};
};
# Add one additional IPv6, so we can have both OpenSSH and
# Forgejo's built-in server bind on port :22.
systemd.network.networks."10-wan".networkConfig.Address = [ "${ipv6.openssh}/64" ];
services.openssh.listenAddresses = [{
addr = "[${ipv6.openssh}]";
}];
# Defaults to network.target, but networkd may take a while to settle and set up
# the required (additional) IPv6 address, leading to sshd to not being able to
# bind to the requested IP, crashing 5 times and running into the default
# restart counter limit (5).
systemd.services.sshd.wants = [ "network-online.target" ];
systemd.services.sshd.after = [ "network-online.target" ];
bagel.services.forgejo = {
enable = true;
sshBindAddr = ipv6.forgejo;
};
i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05";
deployment.targetHost = "git.infra.forkos.org";
}

View file

@ -2,6 +2,8 @@
networking.hostName = "meta01";
networking.domain = "infra.forkos.org";
time.timeZone = "Europe/Paris";
bagel.sysadmin.enable = true;
# netbox is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
@ -19,18 +21,10 @@
enable = true;
domain = "netbox.forkos.org";
};
bagel.meta.monitoring.address = "meta01.infra.forkos.org";
bagel.services.prometheus.enable = true;
bagel.services.loki.enable = true;
bagel.services.grafana.enable = true;
bagel.services.grapevine.enable = true;
bagel.services.pyroscope.enable = true;
bagel.services.tempo.enable = true;
bagel.services.hookshot = {
enable = true;
admins = [
"@k900:0upti.me"
];
};
i18n.defaultLocale = "fr_FR.UTF-8";

View file

@ -1,45 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
networking.hostName = "public01";
# TODO: make it the default
networking.domain = "infra.forkos.org";
bagel.sysadmin.enable = true;
# Newsletter is proxied.
bagel.raito.v6-proxy-awareness.enable = true;
bagel.newsletter = {
enable = true;
domain = "news.forkos.org";
};
bagel.hardware.raito-vm = {
enable = true;
networking = {
nat-lan-mac = "BC:24:11:A4:F7:D3";
wan = {
address = "2001:bc8:38ee:100:1000::60/64";
mac = "BC:24:11:DB:B8:10";
};
};
};
bagel.services.s3-revproxy = {
enable = true;
domain = "forkos.org";
s3.apiUrl = "s3.delroth.net";
targets = {
channels = "bagel-channels";
releases = "bagel-releases";
channel-scripts-test = "bagel-channel-scripts-test";
};
};
i18n.defaultLocale = "en_US.UTF-8";
system.stateVersion = "24.05";
deployment.targetHost = "public01.infra.forkos.org";
}

View file

@ -1,126 +0,0 @@
{ pkgs, lib, ... }:
{
imports = [
./netboot.nix
];
###### Hardware ######
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "ehci_pci" "sd_mod" "sdhci_pci" ];
boot.kernelModules = [ "kvm-amd" ];
boot.loader.grub.device = "/dev/sda";
fileSystems."/" =
{ device = "/dev/disk/by-uuid/58688a5c-e3ce-4868-804b-4e34d1370f36";
fsType = "f2fs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/38caa628-3b6d-4fb4-8767-beee09a196a6";
fsType = "ext2";
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.amd.updateMicrocode = true;
# Enable serial output
boot.loader.grub.extraConfig = ''
serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1
terminal_input serial
terminal_output serial
'';
boot.kernelParams = [
"console=ttyS0,115200"
"console=tty1"
];
###### Config #######
boot.initrd.systemd.enable = true;
networking.useNetworkd = true;
systemd.network = {
netdevs = {
"40-uplink" = {
netdevConfig = {
Kind = "bond";
Name = "uplink";
};
bondConfig = {
Mode = "802.3ad";
TransmitHashPolicy = "layer3+4";
};
};
"40-oob" = {
netdevConfig = {
Kind = "bond";
Name = "oob";
};
bondConfig = {
Mode = "802.3ad";
TransmitHashPolicy = "layer3+4";
};
};
};
networks = {
"40-enp1s0" = {
name = "enp1s0";
bond = [ "uplink" ];
};
"40-enp2s0" = {
name = "enp2s0";
bond = [ "uplink" ];
};
"40-enp3s0" = {
name = "enp3s0";
bond = [ "oob" ];
};
"40-enp4s0" = {
name = "enp4s0";
bond = [ "oob" ];
};
} // lib.listToAttrs (map (x: lib.nameValuePair "40-bmc${toString x}" {
name = "bmc${toString x}";
address = [ "192.168.1.${toString (x*4 + 1)}/30" ];
#address = [ "192.168.${toString x}.1/24" ];
networkConfig.DHCPServer = true;
}) (lib.genList lib.id 12));
};
networking.nftables.enable = true;
networking.firewall.extraInputRules = ''
iifname { "bmc*" } meta nfproto ipv4 udp dport 67 accept comment "DHCP server"
'';
networking.vlans = lib.listToAttrs (map (x: lib.nameValuePair "bmc${toString x}" {
interface = "oob";
id = 101 + x;
}) (lib.genList lib.id 12));
networking.interfaces = {
uplink = {
ipv6.addresses = [
{
address = "2a01:584:11::2";
prefixLength = 64;
}
];
};
};
networking.defaultGateway6 = { interface = "uplink"; address = "2a01:584:11::1"; };
networking.hostName = "vpn-gw";
networking.domain = "wob01.infra.forkos.org";
deployment.targetHost = "2a01:584:11::2";
bagel.sysadmin.enable = true;
environment.systemPackages = [ pkgs.ipmitool ];
system.stateVersion = "24.05";
}

View file

@ -1,61 +0,0 @@
{ config, lib, pkgs, nodes, modulesPath, ... }:
# The way the connection is established is specific to the wob01 site and the Intel S2600KPR blades.
# Proper netboot is not possible, because while the blades and the APU board (which is the netboot
# server here) are in the same L2 network, the uplink connection of each blade is an LACP LAG,
# meaning that the switch on the other side will only enable the port if it sees valid LACP packets.
# We work around this by presenting a virtual floppy drive using the "IUSB" protocol of the BMC.
# This virtual floppy drive contains an per-blade customized initramfs which will initialize the
# network connection including IP configuration and load the actual image off hydra.
let
netboot-server-ip = "2a01:584:11::2";
netbootNodes = lib.filterAttrs (_: node: node.config.bagel.baremetal.builders.enable && node.config.bagel.baremetal.builders.netboot) nodes;
in {
assertions = [
{
assertion = !(lib.elem 443 config.networking.firewall.allowedTCPPorts);
message = ''
Port 443 is in networking.firewalls.allowedTCPPorts, but should be only manually
allowed for specific IPs and source ports in ${builtins.toJSON __curPos}
'';
}
];
systemd.services = lib.mapAttrs' (nodename: node: let
bmcIp = "192.168.1.${toString (node.config.bagel.baremetal.builders.num * 4 + 2)}";
notipxe = node.config.system.build.notipxe.config.system.build.usbImage;
in lib.nameValuePair "iusb-spoof-${nodename}" {
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Restart = "always";
};
script = ''
AUTH_TOKEN=$(${pkgs.iusb-spoof}/bin/make-token ${bmcIp})
exec ${pkgs.iusb-spoof}/bin/iusb-spoof -r ${bmcIp} 5123 $AUTH_TOKEN ${notipxe}
'';
}) netbootNodes;
# Since the builders are stateless, they can not store their ssh hostkeys
networking.firewall.allowedTCPPorts = [ 80 ]; # for ACME
networking.firewall.extraInputRules = ''
ip6 saddr 2a01:584:11::/64 tcp sport < 1024 tcp dport 443 accept;
'';
services.nginx = {
enable = true;
virtualHosts."vpn-gw.wob01.infra.forkos.org" = {
enableACME = true;
forceSSL = true;
locations = lib.mapAttrs' (nodename: node: let
ip = "2a01:584:11::1:${toString node.config.bagel.baremetal.builders.num}";
in lib.nameValuePair "/${nodename}/" {
root = "/var/www";
extraConfig = ''
allow ${ip};
deny all;
'';
}) netbootNodes;
};
};
}

View file

@ -1,14 +0,0 @@
# A wrapper for colmena that prevents accidentally deploying changes without
# having pulled.
{ colmena, runCommandNoCC }:
runCommandNoCC "colmena-wrapper"
{
env.colmena = "${colmena}/bin/colmena";
} ''
mkdir -p $out
ln -s ${colmena}/share $out/share
mkdir $out/bin
substituteAll ${./colmena-wrapper.sh.in} $out/bin/colmena
chmod +x $out/bin/colmena
''

View file

@ -1,29 +0,0 @@
#!/usr/bin/env bash
doChecks() {
# creates refs in the refs/prefetch/remotes/origin namespace
echo "Prefetching repo changes..." >&2
git fetch --quiet --prefetch --no-write-fetch-head origin
diffs=$(git rev-list --left-right --count HEAD...refs/prefetch/remotes/origin/main)
only_in_local=$(echo "$diffs" | cut -f1)
only_in_main=$(echo "$diffs" | cut -f2)
if [[ $only_in_main -gt 0 && ! -v $FOOTGUN_ME_UWU ]]; then
echo >&2
echo "Attempting to deploy when main has $only_in_main commits not in your branch!" >&2
echo "This will probably revert someone's changes. Consider merging them." >&2
echo "If you really mean it, set the environment variable FOOTGUN_ME_UWU" >&2
exit 1
fi
if [[ $only_in_local -gt 0 ]]; then
echo "You have $only_in_local commits not yet pushed to main. Reminder to push them after :)" >&2
fi
}
if [[ $1 == 'apply' ]]; then
doChecks
fi
exec @colmena@ "$@"

View file

@ -1,9 +1,3 @@
[
(final: prev: {
iusb-spoof = final.callPackage ./iusb-spoof.nix {};
u-root = final.callPackage ./u-root {};
pyroscope = final.callPackage ./pyroscope {};
s3-revproxy = final.callPackage ./s3-revproxy {};
git-gc-preserve = final.callPackage ./git-gc-preserve {};
})
(import ./gerrit.nix)
]

11
overlays/gerrit.nix Normal file
View file

@ -0,0 +1,11 @@
self: super: {
buildGerrit = self.callPackage ../pkgs/gerrit { };
gerrit = self.buildGerrit { };
buildGerritBazelPlugin = self.callPackage ../pkgs/gerrit_plugins/builder.nix {
inherit (self) buildGerrit;
};
gerritPlugins = {
code-owners = self.callPackage ../pkgs/gerrit_plugins/code-owners { };
oauth = self.callPackage ../pkgs/gerrit_plugins/oauth { };
};
}

View file

@ -1,9 +0,0 @@
{ writeShellApplication, git, nettools }:
writeShellApplication {
name = "git-gc-preserve";
runtimeInputs = [ git nettools ];
text = (builtins.readFile ./script.sh);
}

View file

@ -1,132 +0,0 @@
#!/usr/bin/env bash
set +o errexit
# Copyright (C) 2022 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() { # exit code
cat <<-EOF
NAME
git-gc-preserve - Run git gc and preserve old packs to avoid races for JGit
SYNOPSIS
git gc-preserve
DESCRIPTION
Runs git gc and can preserve old packs to avoid races with concurrently
executed commands in JGit.
This command uses custom git config options to configure if preserved packs
from the last run of git gc should be pruned and if packs should be preserved.
This is similar to the implementation in JGit [1] which is used by
JGit to avoid errors [2] in such situations.
The command prevents concurrent runs of the command on the same repository
by acquiring an exclusive file lock on the file
"\$repopath/gc-preserve.pid"
If it cannot acquire the lock it fails immediately with exit code 3.
Failure Exit Codes
1: General failure
2: Couldn't determine repository path. If the current working directory
is outside of the working tree of the git repository use git option
--git-dir to pass the root path of the repository.
E.g.
$ git --git-dir ~/git/foo gc-preserve
3: Another process already runs $0 on the same repository
[1] https://git.eclipse.org/r/c/jgit/jgit/+/87969
[2] https://git.eclipse.org/r/c/jgit/jgit/+/122288
CONFIGURATION
"pack.prunepreserved": if set to "true" preserved packs from the last gc run
are pruned before current packs are preserved.
"pack.preserveoldpacks": if set to "true" current packs will be hard linked
to objects/pack/preserved before git gc is executed. JGit will
fallback to the preserved packs in this directory in case it comes
across missing objects which might be caused by a concurrent run of
git gc.
EOF
exit "$1"
}
# acquire file lock, unlock when the script exits
lock() { # repo
readonly LOCKFILE="$1/gc-preserve.pid"
test -f "$LOCKFILE" || touch "$LOCKFILE"
exec 9> "$LOCKFILE"
if flock -nx 9; then
echo -n "$$ $USER@$(hostname)" >&9
trap unlock EXIT
else
echo "$0 is already running"
exit 3
fi
}
unlock() {
# only delete if the file descriptor 9 is open
if { : >&9 ; } &> /dev/null; then
rm -f "$LOCKFILE"
fi
# close the file handle to release file lock
exec 9>&-
}
# prune preserved packs if pack.prunepreserved == true
prune_preserved() { # repo
configured=$(git --git-dir="$1" config --get pack.prunepreserved)
if [ "$configured" != "true" ]; then
return 0
fi
local preserved=$1/objects/pack/preserved
if [ -d "$preserved" ]; then
printf "Pruning old preserved packs: "
count=$(find "$preserved" -name "*.old-pack" | wc -l)
rm -rf "$preserved"
echo "$count, done."
fi
}
# preserve packs if pack.preserveoldpacks == true
preserve_packs() { # repo
configured=$(git --git-dir="$1" config --get pack.preserveoldpacks)
if [ "$configured" != "true" ]; then
return 0
fi
local packdir=$1/objects/pack
pushd "$packdir" >/dev/null || exit 1
mkdir -p preserved
printf "Preserving packs: "
count=0
for file in pack-*{.pack,.idx} ; do
ln -f "$file" preserved/"$(get_preserved_packfile_name "$file")"
if [[ "$file" == pack-*.pack ]]; then
((count++))
fi
done
echo "$count, done."
popd >/dev/null || exit 1
}
# pack-0...2.pack to pack-0...2.old-pack
# pack-0...2.idx to pack-0...2.old-idx
get_preserved_packfile_name() { # packfile > preserved_packfile
local old=${1/%\.pack/.old-pack}
old=${old/%\.idx/.old-idx}
echo "$old"
}
# main
while [ $# -gt 0 ] ; do
case "$1" in
-u|-h) usage 0 ;;
esac
shift
done
args=$(git rev-parse --sq-quote "$@")
repopath=$(git rev-parse --git-dir)
if [ -z "$repopath" ]; then
usage 2
fi
lock "$repopath"
prune_preserved "$repopath"
preserve_packs "$repopath"
git gc ${args:+"$args"} || { EXIT_CODE="$?"; echo "git gc failed"; exit "$EXIT_CODE"; }

View file

@ -1,23 +0,0 @@
{ rustPlatform, python3, makeWrapper }:
let
pythonEnv = python3.withPackages (p: with p; [ requests ]);
in
rustPlatform.buildRustPackage rec {
pname = "iusb-spoof";
version = "0.1.0";
src = builtins.fetchGit {
url = "https://git.lix.systems/the-distro/iusb-spoof/";
rev = "fafd47986239cc2f4dfbbae74b17555608806581";
};
cargoLock.lockFile = src + "/Cargo.lock";
nativeBuildInputs = [ makeWrapper ];
postInstall = ''
install -Dm644 $src/make-token.py $out/opt/make-token.py
makeWrapper ${pythonEnv.interpreter} $out/bin/make-token --add-flags "$out/opt/make-token.py"
'';
}

View file

@ -1,42 +0,0 @@
{ lib
, buildGoModule
, fetchFromGitHub
}:
buildGoModule rec {
pname = "pyroscope";
version = "1.7.1";
src = fetchFromGitHub {
owner = "grafana";
repo = "pyroscope";
rev = "v${version}";
hash = "sha256-iMP67J0Q8Cgo52iImMzAM3PEkk6uLF7r6v9TyXZVaIE=";
};
env.GOWORK = "off";
vendorHash = "sha256-ggntpnU9s2rpkv6S0LnZNexrdkBsdsUrGPc93SVrK4M=";
subPackages = [ "cmd/profilecli" "cmd/pyroscope" ];
ldflags = [
"-extldflags"
"-static"
"-s"
"-w"
"-X=github.com/grafana/pyroscope/pkg/util/build.Branch=${src.rev}"
"-X=github.com/grafana/pyroscope/pkg/util/build.Version=${version}"
"-X=github.com/grafana/pyroscope/pkg/util/build.Revision=${src.rev}"
"-X=github.com/grafana/pyroscope/pkg/util/build.BuildDate=1970-01-01T00:00:00Z"
];
meta = with lib; {
description = "Continuous profiling platform";
homepage = "https://github.com/grafana/pyroscope";
changelog = "https://github.com/grafana/pyroscope/blob/${src.rev}/CHANGELOG.md";
license = licenses.agpl3Only;
maintainers = with maintainers; [ raitobezarius ];
mainProgram = "pyroscope";
};
}

View file

@ -1,39 +0,0 @@
# Originally written by Jade Lovelace for Lix.
{ lib, buildGoModule, fetchFromGitHub }:
buildGoModule rec {
pname = "s3-revproxy";
version = "4.15.0";
src = fetchFromGitHub {
owner = "oxyno-zeta";
repo = "s3-proxy";
rev = "v${version}";
hash = "sha256-q0cfAo8Uz7wtKljmSDaJ320bjg2yXydvvxubAsMKzbc=";
};
vendorHash = "sha256-dOwNQtTfOCQcjgNBV/FeWdwbW9xi1OK5YD7PBPPDKOQ=";
ldflags = [
"-X github.com/oxyno-zeta/s3-proxy/pkg/s3-proxy/version.Version=${version}"
"-X github.com/oxyno-zeta/s3-proxy/pkg/s3-proxy/version.Metadata="
];
postPatch = ''
# Refer to the included templates in the package instead of cwd-relative
sed -i "s#Path = \"templates/#Path = \"$out/share/s3-revproxy/templates/#" pkg/s3-proxy/config/config.go
'';
postInstall = ''
mkdir -p $out/share/s3-revproxy
cp -r templates/ $out/share/s3-revproxy/templates
'';
meta = {
description = "S3 Reverse Proxy with GET, PUT and DELETE methods and authentication (OpenID Connect and Basic Auth)";
homepage = "https://oxyno-zeta.github.io/s3-proxy";
# hm, not having a maintainers entry is kind of inconvenient
maintainers = [ ];
licenses = lib.licenses.asl20;
mainProgram = "s3-proxy";
};
}

View file

@ -1,20 +0,0 @@
{ buildGoModule, fetchFromGitHub }:
buildGoModule rec {
pname = "u-root";
version = "0.14.0";
src = fetchFromGitHub {
owner = "u-root";
repo = "u-root";
rev = "v${version}";
hash = "sha256-8zA3pHf45MdUcq/MA/mf0KCTxB1viHieU/oigYwIPgo=";
};
patches = [
./u-root-allow-https.patch
];
vendorHash = null;
doCheck = false;
}

View file

@ -1,12 +0,0 @@
diff --git a/pkg/curl/schemes.go b/pkg/curl/schemes.go
index 8bac3bc0..cd396cbc 100644
--- a/pkg/curl/schemes.go
+++ b/pkg/curl/schemes.go
@@ -81,6 +81,7 @@ var (
DefaultSchemes = Schemes{
"tftp": DefaultTFTPClient,
"http": DefaultHTTPClient,
+ "https": DefaultHTTPClient,
"file": &LocalFileClient{},
}
)

View file

@ -1,59 +0,0 @@
diff --git a/services/repository/branch.go b/services/repository/branch.go
index e1a313749f..5a8d823eef 100644
--- a/services/repository/branch.go
+++ b/services/repository/branch.go
@@ -26,7 +26,6 @@ import (
"code.gitea.io/gitea/modules/timeutil"
webhook_module "code.gitea.io/gitea/modules/webhook"
notify_service "code.gitea.io/gitea/services/notify"
- files_service "code.gitea.io/gitea/services/repository/files"
"xorm.io/builder"
)
@@ -129,21 +128,7 @@ func loadOneBranch(ctx context.Context, repo *repo_model.Repository, dbBranch *g
p := protectedBranches.GetFirstMatched(branchName)
isProtected := p != nil
- var divergence *git.DivergeObject
-
- // it's not default branch
- if repo.DefaultBranch != dbBranch.Name && !dbBranch.IsDeleted {
- var err error
- divergence, err = files_service.CountDivergingCommits(ctx, repo, git.BranchPrefix+branchName)
- if err != nil {
- return nil, fmt.Errorf("CountDivergingCommits: %v", err)
- }
- }
-
- if divergence == nil {
- // tolerate the error that we cannot get divergence
- divergence = &git.DivergeObject{Ahead: -1, Behind: -1}
- }
+ divergence := &git.DivergeObject{Ahead: -1, Behind: -1}
pr, err := issues_model.GetLatestPullRequestByHeadInfo(ctx, repo.ID, branchName)
if err != nil {
diff --git a/templates/repo/branch/list.tmpl b/templates/repo/branch/list.tmpl
index a577fed450..e102796315 100644
--- a/templates/repo/branch/list.tmpl
+++ b/templates/repo/branch/list.tmpl
@@ -102,19 +102,6 @@
{{end}}
</td>
<td class="two wide ui">
- {{if and (not .DBBranch.IsDeleted) $.DefaultBranchBranch}}
- <div class="commit-divergence">
- <div class="bar-group">
- <div class="count count-behind">{{.CommitsBehind}}</div>
- {{/* old code bears 0/0.0 = NaN output, so it might output invalid "width: NaNpx", it just works and doesn't caues any problem. */}}
- <div class="bar bar-behind" style="width: {{Eval 100 "*" .CommitsBehind "/" "(" .CommitsBehind "+" .CommitsAhead "+" 0.0 ")"}}%"></div>
- </div>
- <div class="bar-group">
- <div class="count count-ahead">{{.CommitsAhead}}</div>
- <div class="bar bar-ahead" style="width: {{Eval 100 "*" .CommitsAhead "/" "(" .CommitsBehind "+" .CommitsAhead "+" 0.0 ")"}}%"></div>
- </div>
- </div>
- {{end}}
</td>
<td class="two wide right aligned">
{{if not .LatestPullRequest}}

View file

@ -1,32 +0,0 @@
diff --git a/routers/web/repo/commit.go b/routers/web/repo/commit.go
index 718454e063..8fa299710c 100644
--- a/routers/web/repo/commit.go
+++ b/routers/web/repo/commit.go
@@ -408,12 +408,6 @@ func Diff(ctx *context.Context) {
}
}
- ctx.Data["BranchName"], err = commit.GetBranchName()
- if err != nil {
- ctx.ServerError("commit.GetBranchName", err)
- return
- }
-
ctx.HTML(http.StatusOK, tplCommitPage)
}
diff --git a/templates/repo/commit_page.tmpl b/templates/repo/commit_page.tmpl
index c37fb46975..18c9cf18f8 100644
--- a/templates/repo/commit_page.tmpl
+++ b/templates/repo/commit_page.tmpl
@@ -71,8 +71,8 @@
"branchForm" "branch-dropdown-form"
"branchURLPrefix" (printf "%s/_cherrypick/%s/" $.RepoLink .CommitID) "branchURLSuffix" ""
"setAction" true "submitForm" true}}
- <form method="get" action="{{$.RepoLink}}/_cherrypick/{{.CommitID}}/{{if $.BranchName}}{{PathEscapeSegments $.BranchName}}{{else}}{{PathEscapeSegments $.Repository.DefaultBranch}}{{end}}" id="branch-dropdown-form">
- <input type="hidden" name="ref" value="{{if $.BranchName}}{{$.BranchName}}{{else}}{{$.Repository.DefaultBranch}}{{end}}">
+ <form method="get" action="{{$.RepoLink}}/_cherrypick/{{.CommitID}}/{{PathEscapeSegments $.Repository.DefaultBranch}}" id="branch-dropdown-form">
+ <input type="hidden" name="ref" value="{{$.Repository.DefaultBranch}}">
<input type="hidden" name="refType" value="branch">
<input type="hidden" id="cherry-pick-type" name="cherry-pick-type"><br>
<button type="submit" id="cherry-pick-submit" class="ui primary button"></button>

View file

@ -1,40 +0,0 @@
{ forgejo-lts }:
forgejo-lts.overrideAttrs (prev: {
patches = [
# Branch divergence calculations for a single branch may take 100-200ms on something as big
# as nixpkgs. The branch view defaults to 20 branches for each page, taking roughtly 3s to
# calculate each branch sequentially and render, while consuming a single core at 100%.
# The idea is to look into making this less expensive or async.
# But for now, to get this going, we will simply drop that metric.
./branch-view_remove-expensive-commit-divergence-metric.patch
# This is literally broken and eats resources for nothing of value.
# We should upstream this.
# The tl;dr is: It calculates the nearest branch for the requested commit at
# /:owner/:repo/commit/:commit to use it as the default cherry-pick target branch
# selection in a drop-down only users with commit perms can actually view and use.
# It's expensive to calculate and happens on every request to /commit/:commit.
# To add insult to injury, it's hardly of any use: The nearest branch of a commit
# will almost always be a branch that already carries the commit. The branch you
# most likely don't want to cherry-pick to.
./commit-view_fix-broken-and-expensive-cherry-pick-default-branch-selection.patch
# Disable various /:owner/:repo/activity/ sub-views. They are expensive, which is
# totally fine and expected. There is even proper caching in place.
# However, on a scale of nixpkgs, those calculations take ages, while, of course,
# pinning a single CPU core at 100%.
# For now, we will simply disable this feature.
# Due to the 501 status code it returns, the frontend prints a "Not implemented"
# error, saving us from patching the frontend while still providing a helpful
# user-facing error text.
# It should be noted that this particular status code has the downside of being
# in the 5xx range, meaning it will show up as such in our prometheus metrics.
./disable-expensive-repository-activity-stats.patch
# Migrations and pull-mirrors are something easily abused to bring a public instance to a complete halt.
# Both features can be disabled via repository.DISABLE_MIGRATIONS and mirror.ENABLE, but we want to keep
# this functionality for admins.
./limit-migrations-and-pull-mirrors-to-admins.patch
];
})

View file

@ -1,34 +0,0 @@
diff --git a/routers/web/web.go b/routers/web/web.go
index ee9694f41c..f55b8d6f62 100644
--- a/routers/web/web.go
+++ b/routers/web/web.go
@@ -57,6 +57,10 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+func endpointNotImplemented(ctx *context.Context) {
+ ctx.JSON(http.StatusNotImplemented, "This endpoint has been removed due to performance issues with it and as such is not longer implemented.")
+}
+
// optionsCorsHandler return a http handler which sets CORS options if enabled by config, it blocks non-CORS OPTIONS requests.
func optionsCorsHandler() func(next http.Handler) http.Handler {
var corsHandler func(next http.Handler) http.Handler
@@ -1425,15 +1429,15 @@ func registerRoutes(m *web.Route) {
m.Get("/{period}", repo.Activity)
m.Group("/contributors", func() {
m.Get("", repo.Contributors)
- m.Get("/data", repo.ContributorsData)
+ m.Get("/data", endpointNotImplemented)
}, repo.MustBeNotEmpty, context.RequireRepoReaderOr(unit.TypeCode))
m.Group("/code-frequency", func() {
m.Get("", repo.CodeFrequency)
- m.Get("/data", repo.CodeFrequencyData)
+ m.Get("/data", endpointNotImplemented)
}, repo.MustBeNotEmpty, context.RequireRepoReaderOr(unit.TypeCode))
m.Group("/recent-commits", func() {
m.Get("", repo.RecentCommits)
- m.Get("/data", repo.RecentCommitsData)
+ m.Get("/data", endpointNotImplemented)
}, repo.MustBeNotEmpty, context.RequireRepoReaderOr(unit.TypeCode))
}, context.RepoRef(), context.RequireRepoReaderOr(unit.TypeCode, unit.TypePullRequests, unit.TypeIssues, unit.TypeReleases))

View file

@ -1,53 +0,0 @@
diff --git a/routers/api/v1/repo/migrate.go b/routers/api/v1/repo/migrate.go
index 2caaa130e8..455e89e93e 100644
--- a/routers/api/v1/repo/migrate.go
+++ b/routers/api/v1/repo/migrate.go
@@ -12,7 +12,6 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/models/db"
- "code.gitea.io/gitea/models/organization"
"code.gitea.io/gitea/models/perm"
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
@@ -86,22 +85,7 @@ func Migrate(ctx *context.APIContext) {
}
if !ctx.Doer.IsAdmin {
- if !repoOwner.IsOrganization() && ctx.Doer.ID != repoOwner.ID {
- ctx.Error(http.StatusForbidden, "", "Given user is not an organization.")
- return
- }
-
- if repoOwner.IsOrganization() {
- // Check ownership of organization.
- isOwner, err := organization.OrgFromUser(repoOwner).IsOwnedBy(ctx, ctx.Doer.ID)
- if err != nil {
- ctx.Error(http.StatusInternalServerError, "IsOwnedBy", err)
- return
- } else if !isOwner {
- ctx.Error(http.StatusForbidden, "", "Given user is not owner of organization.")
- return
- }
- }
+ ctx.Error(http.StatusForbidden, "", "You need to be administrator of this Forgejo instance to be able to create mirrors.")
}
remoteAddr, err := forms.ParseRemoteAddr(form.CloneAddr, form.AuthUsername, form.AuthPassword)
diff --git a/routers/web/repo/migrate.go b/routers/web/repo/migrate.go
index 97b0c425ea..554a470eab 100644
--- a/routers/web/repo/migrate.go
+++ b/routers/web/repo/migrate.go
@@ -150,6 +150,12 @@ func handleMigrateRemoteAddrError(ctx *context.Context, err error, tpl base.TplN
// MigratePost response for migrating from external git repository
func MigratePost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.MigrateRepoForm)
+
+ if !ctx.Doer.IsAdmin {
+ ctx.Error(http.StatusForbidden, "MigratePost: you need to be site administrator to use migrations and mirrors")
+ return
+ }
+
if setting.Repository.DisableMigrations {
ctx.Error(http.StatusForbidden, "MigratePost: the site administrator has disabled migrations")
return

View file

@ -0,0 +1,37 @@
From 084e4f92fb58f7cd85303ba602fb3c40133c8fcc Mon Sep 17 00:00:00 2001
From: Luke Granger-Brown <git@lukegb.com>
Date: Thu, 2 Jul 2020 23:02:32 +0100
Subject: [PATCH 1/3] Syntax highlight nix
---
.../app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts | 1 +
resources/com/google/gerrit/server/mime/mime-types.properties | 1 +
2 files changed, 2 insertions(+)
diff --git a/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts b/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
index a9f88bdd81..385249f280 100644
--- a/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
+++ b/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
@@ -93,6 +93,7 @@ const LANGUAGE_MAP = new Map<string, string>([
['text/x-vhdl', 'vhdl'],
['text/x-yaml', 'yaml'],
['text/vbscript', 'vbscript'],
+ ['text/x-nix', 'nix'],
]);
const CLASS_PREFIX = 'gr-diff gr-syntax gr-syntax-';
diff --git a/resources/com/google/gerrit/server/mime/mime-types.properties b/resources/com/google/gerrit/server/mime/mime-types.properties
index 2f9561ba2e..739818ec05 100644
--- a/resources/com/google/gerrit/server/mime/mime-types.properties
+++ b/resources/com/google/gerrit/server/mime/mime-types.properties
@@ -149,6 +149,7 @@ mscin = text/x-mscgen
msgenny = text/x-msgenny
nb = text/x-mathematica
nginx.conf = text/x-nginx-conf
+nix = text/x-nix
nsh = text/x-nsis
nsi = text/x-nsis
nt = text/n-triples
--
2.37.3

View file

@ -0,0 +1,37 @@
From aedf8ac8fa5113843bcd83ff85e2d9f3bffdb16c Mon Sep 17 00:00:00 2001
From: Luke Granger-Brown <git@lukegb.com>
Date: Thu, 2 Jul 2020 23:02:43 +0100
Subject: [PATCH 2/3] Syntax highlight rules.pl
---
.../app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts | 1 +
resources/com/google/gerrit/server/mime/mime-types.properties | 1 +
2 files changed, 2 insertions(+)
diff --git a/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts b/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
index 385249f280..7cb3068494 100644
--- a/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
+++ b/polygerrit-ui/app/embed/diff/gr-syntax-layer/gr-syntax-layer-worker.ts
@@ -68,6 +68,7 @@ const LANGUAGE_MAP = new Map<string, string>([
['text/x-perl', 'perl'],
['text/x-pgsql', 'pgsql'], // postgresql
['text/x-php', 'php'],
+ ['text/x-prolog', 'prolog'],
['text/x-properties', 'properties'],
['text/x-protobuf', 'protobuf'],
['text/x-puppet', 'puppet'],
diff --git a/resources/com/google/gerrit/server/mime/mime-types.properties b/resources/com/google/gerrit/server/mime/mime-types.properties
index 739818ec05..58eb727bf9 100644
--- a/resources/com/google/gerrit/server/mime/mime-types.properties
+++ b/resources/com/google/gerrit/server/mime/mime-types.properties
@@ -200,6 +200,7 @@ rq = application/sparql-query
rs = text/x-rustsrc
rss = application/xml
rst = text/x-rst
+rules.pl = text/x-prolog
README.md = text/x-gfm
s = text/x-gas
sas = text/x-sas
--
2.37.3

View file

@ -0,0 +1,215 @@
From f49c50ca9a84ca374b7bd91c171bbea0457f2c7a Mon Sep 17 00:00:00 2001
From: Luke Granger-Brown <git@lukegb.com>
Date: Thu, 2 Jul 2020 23:03:02 +0100
Subject: [PATCH 3/3] Add titles to CLs over HTTP
---
.../gerrit/httpd/raw/IndexHtmlUtil.java | 13 +++-
.../google/gerrit/httpd/raw/IndexServlet.java | 8 ++-
.../google/gerrit/httpd/raw/StaticModule.java | 5 +-
.../gerrit/httpd/raw/TitleComputer.java | 67 +++++++++++++++++++
.../gerrit/httpd/raw/PolyGerritIndexHtml.soy | 4 +-
5 files changed, 89 insertions(+), 8 deletions(-)
create mode 100644 java/com/google/gerrit/httpd/raw/TitleComputer.java
diff --git a/java/com/google/gerrit/httpd/raw/IndexHtmlUtil.java b/java/com/google/gerrit/httpd/raw/IndexHtmlUtil.java
index 72bfe40c3b..439bd73b44 100644
--- a/java/com/google/gerrit/httpd/raw/IndexHtmlUtil.java
+++ b/java/com/google/gerrit/httpd/raw/IndexHtmlUtil.java
@@ -41,6 +41,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
@@ -62,13 +63,14 @@ public class IndexHtmlUtil {
String faviconPath,
Map<String, String[]> urlParameterMap,
Function<String, SanitizedContent> urlInScriptTagOrdainer,
- String requestedURL)
+ String requestedURL,
+ TitleComputer titleComputer)
throws URISyntaxException, RestApiException {
ImmutableMap.Builder<String, Object> data = ImmutableMap.builder();
data.putAll(
staticTemplateData(
canonicalURL, cdnPath, faviconPath, urlParameterMap, urlInScriptTagOrdainer))
- .putAll(dynamicTemplateData(gerritApi, requestedURL));
+ .putAll(dynamicTemplateData(gerritApi, requestedURL, titleComputer));
Set<String> enabledExperiments = new HashSet<>();
enabledExperiments.addAll(experimentFeatures.getEnabledExperimentFeatures());
// Add all experiments enabled through url
@@ -81,7 +83,8 @@ public class IndexHtmlUtil {
/** Returns dynamic parameters of {@code index.html}. */
public static ImmutableMap<String, Object> dynamicTemplateData(
- GerritApi gerritApi, String requestedURL) throws RestApiException, URISyntaxException {
+ GerritApi gerritApi, String requestedURL, TitleComputer titleComputer)
+ throws RestApiException, URISyntaxException {
ImmutableMap.Builder<String, Object> data = ImmutableMap.builder();
Map<String, SanitizedContent> initialData = new HashMap<>();
Server serverApi = gerritApi.config().server();
@@ -129,6 +132,10 @@ public class IndexHtmlUtil {
}
data.put("gerritInitialData", initialData);
+
+ Optional<String> title = titleComputer.computeTitle(requestedURL);
+ title.ifPresent(s -> data.put("title", s));
+
return data.build();
}
diff --git a/java/com/google/gerrit/httpd/raw/IndexServlet.java b/java/com/google/gerrit/httpd/raw/IndexServlet.java
index fcb821e5ae..e1464b992b 100644
--- a/java/com/google/gerrit/httpd/raw/IndexServlet.java
+++ b/java/com/google/gerrit/httpd/raw/IndexServlet.java
@@ -48,13 +48,15 @@ public class IndexServlet extends HttpServlet {
private final ExperimentFeatures experimentFeatures;
private final SoySauce soySauce;
private final Function<String, SanitizedContent> urlOrdainer;
+ private TitleComputer titleComputer;
IndexServlet(
@Nullable String canonicalUrl,
@Nullable String cdnPath,
@Nullable String faviconPath,
GerritApi gerritApi,
- ExperimentFeatures experimentFeatures) {
+ ExperimentFeatures experimentFeatures,
+ TitleComputer titleComputer) {
this.canonicalUrl = canonicalUrl;
this.cdnPath = cdnPath;
this.faviconPath = faviconPath;
@@ -69,6 +71,7 @@ public class IndexServlet extends HttpServlet {
(s) ->
UnsafeSanitizedContentOrdainer.ordainAsSafe(
s, SanitizedContent.ContentKind.TRUSTED_RESOURCE_URI);
+ this.titleComputer = titleComputer;
}
@Override
@@ -86,7 +89,8 @@ public class IndexServlet extends HttpServlet {
faviconPath,
parameterMap,
urlOrdainer,
- getRequestUrl(req));
+ getRequestUrl(req),
+ titleComputer);
renderer = soySauce.renderTemplate("com.google.gerrit.httpd.raw.Index").setData(templateData);
} catch (URISyntaxException | RestApiException e) {
throw new IOException(e);
diff --git a/java/com/google/gerrit/httpd/raw/StaticModule.java b/java/com/google/gerrit/httpd/raw/StaticModule.java
index 15dcf42e0e..9f56bf33ce 100644
--- a/java/com/google/gerrit/httpd/raw/StaticModule.java
+++ b/java/com/google/gerrit/httpd/raw/StaticModule.java
@@ -241,10 +241,11 @@ public class StaticModule extends ServletModule {
@CanonicalWebUrl @Nullable String canonicalUrl,
@GerritServerConfig Config cfg,
GerritApi gerritApi,
- ExperimentFeatures experimentFeatures) {
+ ExperimentFeatures experimentFeatures,
+ TitleComputer titleComputer) {
String cdnPath = options.devCdn().orElse(cfg.getString("gerrit", null, "cdnPath"));
String faviconPath = cfg.getString("gerrit", null, "faviconPath");
- return new IndexServlet(canonicalUrl, cdnPath, faviconPath, gerritApi, experimentFeatures);
+ return new IndexServlet(canonicalUrl, cdnPath, faviconPath, gerritApi, experimentFeatures, titleComputer);
}
@Provides
diff --git a/java/com/google/gerrit/httpd/raw/TitleComputer.java b/java/com/google/gerrit/httpd/raw/TitleComputer.java
new file mode 100644
index 0000000000..8fd2053ad0
--- /dev/null
+++ b/java/com/google/gerrit/httpd/raw/TitleComputer.java
@@ -0,0 +1,67 @@
+package com.google.gerrit.httpd.raw;
+
+import com.google.common.flogger.FluentLogger;
+import com.google.gerrit.entities.Change;
+import com.google.gerrit.extensions.restapi.ResourceConflictException;
+import com.google.gerrit.extensions.restapi.ResourceNotFoundException;
+import com.google.gerrit.server.change.ChangeResource;
+import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.gerrit.server.restapi.change.ChangesCollection;
+import com.google.inject.Inject;
+import com.google.inject.Provider;
+import com.google.inject.Singleton;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+@Singleton
+public class TitleComputer {
+ private static final FluentLogger logger = FluentLogger.forEnclosingClass();
+
+ @Inject
+ public TitleComputer(Provider<ChangesCollection> changes) {
+ this.changes = changes;
+ }
+
+ public Optional<String> computeTitle(String requestedURI) {
+ URL url = null;
+ try {
+ url = new URL(requestedURI);
+ } catch (MalformedURLException e) {
+ logger.atWarning().log("Failed to turn %s into a URL.", requestedURI);
+ return Optional.empty();
+ }
+
+ // Try to turn this into a change.
+ Optional<Change.Id> changeId = tryExtractChange(url.getPath());
+ if (changeId.isPresent()) {
+ return titleFromChangeId(changeId.get());
+ }
+
+ return Optional.empty();
+ }
+
+ private static final Pattern extractChangeIdRegex = Pattern.compile("^/(?:c/.*/\\+/)?(?<changeId>[0-9]+)(?:/[0-9]+)?(?:/.*)?$");
+ private final Provider<ChangesCollection> changes;
+
+ private Optional<Change.Id> tryExtractChange(String path) {
+ Matcher m = extractChangeIdRegex.matcher(path);
+ if (!m.matches()) {
+ return Optional.empty();
+ }
+ return Change.Id.tryParse(m.group("changeId"));
+ }
+
+ private Optional<String> titleFromChangeId(Change.Id changeId) {
+ ChangesCollection changesCollection = changes.get();
+ try {
+ ChangeResource changeResource = changesCollection.parse(changeId);
+ return Optional.of(changeResource.getChange().getSubject());
+ } catch (ResourceConflictException | ResourceNotFoundException | PermissionBackendException e) {
+ return Optional.empty();
+ }
+ }
+}
diff --git a/resources/com/google/gerrit/httpd/raw/PolyGerritIndexHtml.soy b/resources/com/google/gerrit/httpd/raw/PolyGerritIndexHtml.soy
index dbfef44dfe..347ee75aab 100644
--- a/resources/com/google/gerrit/httpd/raw/PolyGerritIndexHtml.soy
+++ b/resources/com/google/gerrit/httpd/raw/PolyGerritIndexHtml.soy
@@ -33,10 +33,12 @@
{@param? defaultDashboardHex: ?}
{@param? dashboardQuery: ?}
{@param? userIsAuthenticated: ?}
+ {@param? title: ?}
<!DOCTYPE html>{\n}
<html lang="en">{\n}
<meta charset="utf-8">{\n}
- <meta name="description" content="Gerrit Code Review">{\n}
+ {if $title}<title>{$title} · Gerrit Code Review</title>{\n}{/if}
+ <meta name="description" content="{if $title}{$title} · {/if}Gerrit Code Review">{\n}
<meta name="referrer" content="never">{\n}
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=0">{\n}
--
2.37.3

152
pkgs/gerrit/default.nix Normal file
View file

@ -0,0 +1,152 @@
{ buildFHSUserEnv, writeShellScriptBin, buildBazelPackage, fetchgit, unzip }:
{ name ? "gerrit-${version}", version ? "3.9.1", src ? (fetchgit {
url = "https://gerrit.googlesource.com/gerrit";
rev = "620a819cbf3c64fff7a66798822775ad42c91d8e";
branchName = "v${version}";
sha256 = "sha256:1mdxbgnx3mpxand4wq96ic38bb4yh45q271h40jrk7dk23sgmz02";
fetchSubmodules = true;
}), bazelTargets ? [ "release" "api-skip-javadoc" ]
}:
let
bazelRunScript = writeShellScriptBin "bazel-run" ''
yarn config set cache-folder "$bazelOut/external/yarn_cache"
export HOME="$bazelOut/external/home"
mkdir -p "$bazelOut/external/home"
exec /bin/bazel "$@"
'';
bazelTop = buildFHSUserEnv {
name = "bazel";
targetPkgs = pkgs: [
(pkgs.bazel_5.override { enableNixHacks = true; })
pkgs.jdk17_headless
pkgs.zlib
pkgs.python3
pkgs.curl
pkgs.nodejs
pkgs.yarn
pkgs.git
bazelRunScript
];
runScript = "/bin/bazel-run";
};
bazel = bazelTop // { override = x: bazelTop; };
in
buildBazelPackage {
inherit name version src;
patches = [
./0001-Syntax-highlight-nix.patch
./0002-Syntax-highlight-rules.pl.patch
./0003-Add-titles-to-CLs-over-HTTP.patch
];
inherit bazel bazelTargets;
bazelFlags = [
"--repository_cache="
"--disk_cache="
];
removeRulesCC = false;
fetchConfigured = true;
fetchAttrs = {
sha256 = "sha256-rsYQR6/RO5NM3/fnB3lEmbz876B59QWxWpE3M/Z4rK4=";
preBuild = ''
rm .bazelversion
'';
installPhase = ''
runHook preInstall
# Remove all built in external workspaces, Bazel will recreate them when building
rm -rf $bazelOut/external/{bazel_tools,\@bazel_tools.marker}
rm -rf $bazelOut/external/{embedded_jdk,\@embedded_jdk.marker}
rm -rf $bazelOut/external/{local_config_cc,\@local_config_cc.marker}
rm -rf $bazelOut/external/{local_*,\@local_*.marker}
# Clear markers
find $bazelOut/external -name '@*\.marker' -exec sh -c 'echo > {}' \;
# Remove all vcs files
rm -rf $(find $bazelOut/external -type d -name .git)
rm -rf $(find $bazelOut/external -type d -name .svn)
rm -rf $(find $bazelOut/external -type d -name .hg)
# Removing top-level symlinks along with their markers.
# This is needed because they sometimes point to temporary paths (?).
# For example, in Tensorflow-gpu build:
#sha256:06bmzbcb9717s4b016kcbn8nr9pgaz04i8bnzg7ybkbdwpl8vxvv"; platforms -> NIX_BUILD_TOP/tmp/install/35282f5123611afa742331368e9ae529/_embedded_binaries/platforms
find $bazelOut/external -maxdepth 1 -type l | while read symlink; do
name="$(basename "$symlink")"
rm -rf "$symlink" "$bazelOut/external/@$name.marker"
done
# Patching symlinks to remove build directory reference
find $bazelOut/external -type l | while read symlink; do
new_target="$(readlink "$symlink" | sed "s,$NIX_BUILD_TOP,NIX_BUILD_TOP,")"
rm "$symlink"
ln -sf "$new_target" "$symlink"
done
echo '${bazel.name}' > $bazelOut/external/.nix-bazel-version
# Gerrit fixups:
# Normalize permissions on .yarn-{tarball,metadata} files
test -d $bazelOut/external/yarn_cache && find $bazelOut/external/yarn_cache \( -name .yarn-tarball.tgz -or -name .yarn-metadata.json \) -exec chmod 644 {} +
mkdir $bazelOut/_bits/
find . -name node_modules -prune -print | while read d; do
echo "$d" "$(dirname $d)"
mkdir -p $bazelOut/_bits/$(dirname $d)
cp -R "$d" "$bazelOut/_bits/$(dirname $d)/node_modules"
done
(cd $bazelOut/ && tar czf $out --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner external/ _bits/)
runHook postInstall
'';
};
buildAttrs = {
preConfigure = ''
rm .bazelversion
[ "$(ls -A $bazelOut/_bits)" ] && cp -R $bazelOut/_bits/* ./ || true
'';
postPatch = ''
# Disable all errorprone checks, since we might be using a different version.
sed -i \
-e '/-Xep:/d' \
-e '/-XepExcludedPaths:/a "-XepDisableAllChecks",' \
tools/BUILD
'';
installPhase = ''
mkdir -p "$out"/webapps/ "$out"/share/api/
cp bazel-bin/release.war "$out"/webapps/gerrit-${version}.war
unzip bazel-bin/api-skip-javadoc.zip -d "$out"/share/api
'';
nativeBuildInputs = [
unzip
];
};
passthru = {
# A list of plugins that are part of the gerrit.war file.
# Use `java -jar gerrit.war ls | grep -Po '(?<=plugins/)[^.]+' | sed -e 's,^,",' -e 's,$,",' | sort` to generate that list.
plugins = [
"codemirror-editor"
"commit-message-length-validator"
"delete-project"
"download-commands"
"gitiles"
"hooks"
"plugin-manager"
"replication"
"reviewnotes"
"singleusergroup"
"webhooks"
];
};
}

97
pkgs/gerrit/detzip.go Normal file
View file

@ -0,0 +1,97 @@
package main
import (
"archive/zip"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
)
var (
exclude = flag.String("exclude", "", "comma-separated list of filenames to exclude (in any directory)")
)
func init() {
flag.Usage = func() {
fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s [zip file] [directory]:\n", os.Args[0])
flag.PrintDefaults()
}
}
func listToMap(ss []string) map[string]bool {
m := make(map[string]bool)
for _, s := range ss {
m[s] = true
}
return m
}
func main() {
flag.Parse()
if flag.NArg() != 2 {
flag.Usage()
os.Exit(1)
}
outPath := flag.Arg(0)
dirPath := flag.Arg(1)
excludeFiles := listToMap(strings.Split(*exclude, ","))
// Aggregate all files first.
var files []string
filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if excludeFiles[info.Name()] {
return nil
}
files = append(files, path)
return nil
})
// Create zip
outW, err := os.Create(outPath)
if err != nil {
log.Fatalf("Create(%q): %v", outPath, err)
}
zipW := zip.NewWriter(outW)
// Output files in alphabetical order
sort.Strings(files)
for _, f := range files {
fw, err := zipW.CreateHeader(&zip.FileHeader{
Name: f,
Method: zip.Store,
})
if err != nil {
log.Fatalf("creating %q in zip: %v", f, err)
}
ff, err := os.Open(f)
if err != nil {
log.Fatalf("opening %q: %v", f, err)
}
if _, err := io.Copy(fw, ff); err != nil {
log.Fatalf("copying %q to zip: %v", f, err)
}
ff.Close()
}
if err := zipW.Close(); err != nil {
log.Fatalf("writing ZIP central directory: %v", err)
}
if err := outW.Close(); err != nil {
log.Fatalf("closing ZIP file: %v", err)
}
}

View file

@ -0,0 +1,36 @@
{ buildGerrit, gerrit, runCommandLocal, lib }:
{ name
, src
, depsOutputHash
, overlayPluginCmd ? ''
cp -R "${src}" "$out/plugins/${name}"
''
, postPatch ? ""
, patches ? [ ]
}: (buildGerrit {
name = "${name}.jar";
src = runCommandLocal "${name}-src" { } ''
cp -R "${gerrit.src}" "$out"
chmod +w "$out/plugins"
${overlayPluginCmd}
'';
bazelTargets = [ "//plugins/${name}" ];
}).overrideAttrs (super: {
deps = super.deps.overrideAttrs (superDeps: {
outputHash = depsOutputHash;
});
installPhase = ''
cp "bazel-bin/plugins/${name}/${name}.jar" "$out"
'';
postPatch = ''
${super.postPatch or ""}
pushd "plugins/${name}"
${lib.concatMapStringsSep "\n" (patch: ''
patch -p1 < ${patch}
'') patches}
popd
${postPatch}
'';
})

View file

@ -0,0 +1,14 @@
{ fetchgit, buildGerritBazelPlugin, lib }:
buildGerritBazelPlugin {
name = "code-owners";
depsOutputHash = "sha256-Ee2n7R/vi91drR+dNYB0QnGiiqcmz9/pynHhV9yDxdE=";
src = fetchgit {
url = "https://gerrit.googlesource.com/plugins/code-owners";
rev = "e654ae5bda2085bce9a99942bec440e004a114f3";
sha256 = "sha256:14d3x3iqskgw16pvyaa0swh252agj84p9pzlf24l8lgx9d7y4biz";
};
patches = [
./using-usernames.patch
];
}

View file

@ -0,0 +1,472 @@
commit 29ace6c38ac513f7ec56ca425230d5712c081043
Author: Luke Granger-Brown <git@lukegb.com>
Date: Wed Sep 21 03:15:38 2022 +0100
Add support for usernames and groups
Change-Id: I3ba8527f66216d08e555a6ac4451fe0d1e090de5
diff --git a/java/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolver.java b/java/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolver.java
index 70009591..6dc596c9 100644
--- a/java/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolver.java
+++ b/java/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolver.java
@@ -17,6 +17,8 @@ package com.google.gerrit.plugins.codeowners.backend;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
+import static com.google.common.collect.ImmutableSetMultimap.flatteningToImmutableSetMultimap;
+import static com.google.common.collect.ImmutableSetMultimap.toImmutableSetMultimap;
import static com.google.gerrit.plugins.codeowners.backend.CodeOwnersInternalServerErrorException.newInternalServerError;
import static java.util.Objects.requireNonNull;
@@ -25,6 +27,7 @@ import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMultimap;
import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Streams;
import com.google.common.flogger.FluentLogger;
@@ -33,17 +36,24 @@ import com.google.gerrit.entities.Project;
import com.google.gerrit.metrics.Timer0;
import com.google.gerrit.plugins.codeowners.backend.config.CodeOwnersPluginConfiguration;
import com.google.gerrit.plugins.codeowners.metrics.CodeOwnerMetrics;
+import com.google.gerrit.server.AnonymousUser;
import com.google.gerrit.server.CurrentUser;
import com.google.gerrit.server.IdentifiedUser;
import com.google.gerrit.server.account.AccountCache;
import com.google.gerrit.server.account.AccountControl;
import com.google.gerrit.server.account.AccountState;
+import com.google.gerrit.server.account.GroupBackend;
+import com.google.gerrit.server.account.GroupBackends;
+import com.google.gerrit.server.account.InternalGroupBackend;
import com.google.gerrit.server.account.externalids.ExternalId;
import com.google.gerrit.server.account.externalids.ExternalIdCache;
import com.google.gerrit.server.permissions.GlobalPermission;
import com.google.gerrit.server.permissions.PermissionBackend;
import com.google.gerrit.server.permissions.PermissionBackendException;
+import com.google.gerrit.server.util.RequestContext;
+import com.google.gerrit.server.util.ThreadLocalRequestContext;
import com.google.inject.Inject;
+import com.google.inject.OutOfScopeException;
import com.google.inject.Provider;
import java.io.IOException;
import java.nio.file.Path;
@@ -102,6 +112,8 @@ public class CodeOwnerResolver {
@VisibleForTesting public static final String ALL_USERS_WILDCARD = "*";
+ public static final String GROUP_PREFIX = "group:";
+
private final CodeOwnersPluginConfiguration codeOwnersPluginConfiguration;
private final PermissionBackend permissionBackend;
private final Provider<CurrentUser> currentUser;
@@ -112,6 +124,8 @@ public class CodeOwnerResolver {
private final CodeOwnerMetrics codeOwnerMetrics;
private final UnresolvedImportFormatter unresolvedImportFormatter;
private final TransientCodeOwnerCache transientCodeOwnerCache;
+ private final InternalGroupBackend groupBackend;
+ private final ThreadLocalRequestContext context;
// Enforce visibility by default.
private boolean enforceVisibility = true;
@@ -132,7 +146,9 @@ public class CodeOwnerResolver {
PathCodeOwners.Factory pathCodeOwnersFactory,
CodeOwnerMetrics codeOwnerMetrics,
UnresolvedImportFormatter unresolvedImportFormatter,
- TransientCodeOwnerCache transientCodeOwnerCache) {
+ TransientCodeOwnerCache transientCodeOwnerCache,
+ InternalGroupBackend groupBackend,
+ ThreadLocalRequestContext context) {
this.codeOwnersPluginConfiguration = codeOwnersPluginConfiguration;
this.permissionBackend = permissionBackend;
this.currentUser = currentUser;
@@ -143,6 +159,8 @@ public class CodeOwnerResolver {
this.codeOwnerMetrics = codeOwnerMetrics;
this.unresolvedImportFormatter = unresolvedImportFormatter;
this.transientCodeOwnerCache = transientCodeOwnerCache;
+ this.groupBackend = groupBackend;
+ this.context = context;
}
/**
@@ -361,6 +379,12 @@ public class CodeOwnerResolver {
"cannot resolve code owner email %s: no account with this email exists",
CodeOwnerResolver.ALL_USERS_WILDCARD));
}
+ if (codeOwnerReference.email().startsWith(GROUP_PREFIX)) {
+ return OptionalResultWithMessages.createEmpty(
+ String.format(
+ "cannot resolve code owner email %s: this is a group",
+ codeOwnerReference.email()));
+ }
ImmutableList.Builder<String> messageBuilder = ImmutableList.builder();
AtomicBoolean ownedByAllUsers = new AtomicBoolean(false);
@@ -405,9 +429,53 @@ public class CodeOwnerResolver {
ImmutableMultimap<CodeOwnerReference, CodeOwnerAnnotation> annotations) {
requireNonNull(codeOwnerReferences, "codeOwnerReferences");
+ ImmutableSet<String> groupsToResolve =
+ codeOwnerReferences.stream()
+ .map(CodeOwnerReference::email)
+ .filter(ref -> ref.startsWith(GROUP_PREFIX))
+ .map(ref -> ref.substring(GROUP_PREFIX.length()))
+ .collect(toImmutableSet());
+
+ // When we call GroupBackends.findExactSuggestion we need to ensure that we
+ // have a user in context. This is because the suggestion backend is
+ // likely to want to try to check that we can actually see the group it's
+ // returning (which we also check for explicitly, because I have trust
+ // issues).
+ RequestContext oldCtx = context.getContext();
+ // Check if we have a user in the context at all...
+ try {
+ oldCtx.getUser();
+ } catch (OutOfScopeException | NullPointerException e) {
+ // Nope.
+ RequestContext newCtx = () -> {
+ return new AnonymousUser();
+ };
+ context.setContext(newCtx);
+ }
+ ImmutableSetMultimap<String, CodeOwner> resolvedGroups = null;
+ try {
+ resolvedGroups =
+ groupsToResolve.stream()
+ .map(groupName -> GroupBackends.findExactSuggestion(groupBackend, groupName))
+ .filter(groupRef -> groupRef != null)
+ .filter(groupRef -> groupBackend.isVisibleToAll(groupRef.getUUID()))
+ .map(groupRef -> groupBackend.get(groupRef.getUUID()))
+ .collect(flatteningToImmutableSetMultimap(
+ groupRef -> GROUP_PREFIX + groupRef.getName(),
+ groupRef -> accountCache
+ .get(ImmutableSet.copyOf(groupRef.getMembers()))
+ .values().stream()
+ .map(accountState -> CodeOwner.create(accountState.account().id()))));
+ } finally {
+ context.setContext(oldCtx);
+ }
+ ImmutableSetMultimap<CodeOwner, String> usersToGroups =
+ resolvedGroups.inverse();
+
ImmutableSet<String> emailsToResolve =
codeOwnerReferences.stream()
.map(CodeOwnerReference::email)
+ .filter(ref -> !ref.startsWith(GROUP_PREFIX))
.filter(filterOutAllUsersWildCard(ownedByAllUsers))
.collect(toImmutableSet());
@@ -442,7 +510,8 @@ public class CodeOwnerResolver {
ImmutableMap<String, CodeOwner> codeOwnersByEmail =
accountsByEmail.map(mapToCodeOwner()).collect(toImmutableMap(Pair::key, Pair::value));
- if (codeOwnersByEmail.keySet().size() < emailsToResolve.size()) {
+ if (codeOwnersByEmail.keySet().size() < emailsToResolve.size() ||
+ resolvedGroups.keySet().size() < groupsToResolve.size()) {
hasUnresolvedCodeOwners.set(true);
}
@@ -456,7 +525,9 @@ public class CodeOwnerResolver {
cachedCodeOwnersByEmail.entrySet().stream()
.filter(e -> e.getValue().isPresent())
.map(e -> Pair.of(e.getKey(), e.getValue().get()));
- Streams.concat(newlyResolvedCodeOwnersStream, cachedCodeOwnersStream)
+ Stream<Pair<String, CodeOwner>> resolvedGroupsCodeOwnersStream =
+ resolvedGroups.entries().stream().map(e -> Pair.of(e.getKey(), e.getValue()));
+ Streams.concat(Streams.concat(newlyResolvedCodeOwnersStream, cachedCodeOwnersStream), resolvedGroupsCodeOwnersStream)
.forEach(
p -> {
ImmutableSet.Builder<CodeOwnerAnnotation> annotationBuilder = ImmutableSet.builder();
@@ -467,6 +538,12 @@ public class CodeOwnerResolver {
annotationBuilder.addAll(
annotations.get(CodeOwnerReference.create(ALL_USERS_WILDCARD)));
+ // annotations for the groups this user is in apply as well
+ for (String group : usersToGroups.get(p.value())) {
+ annotationBuilder.addAll(
+ annotations.get(CodeOwnerReference.create(group)));
+ }
+
if (!codeOwnersWithAnnotations.containsKey(p.value())) {
codeOwnersWithAnnotations.put(p.value(), new HashSet<>());
}
@@ -570,7 +647,7 @@ public class CodeOwnerResolver {
}
messages.add(String.format("email %s has no domain", email));
- return false;
+ return true; // TVL: we allow domain-less strings which are treated as usernames.
}
/**
@@ -585,11 +662,29 @@ public class CodeOwnerResolver {
*/
private ImmutableMap<String, Collection<ExternalId>> lookupExternalIds(
ImmutableList.Builder<String> messages, ImmutableSet<String> emails) {
+ String[] actualEmails = emails.stream()
+ .filter(email -> email.contains("@"))
+ .toArray(String[]::new);
+ ImmutableSet<String> usernames = emails.stream()
+ .filter(email -> !email.contains("@"))
+ .collect(ImmutableSet.toImmutableSet());
try {
- ImmutableMap<String, Collection<ExternalId>> extIdsByEmail =
- externalIdCache.byEmails(emails.toArray(new String[0])).asMap();
+ ImmutableMap<String, Collection<ExternalId>> extIds =
+ new ImmutableMap.Builder<String, Collection<ExternalId>>()
+ .putAll(externalIdCache.byEmails(actualEmails).asMap())
+ .putAll(externalIdCache.allByAccount().entries().stream()
+ .map(entry -> entry.getValue())
+ .filter(externalId ->
+ externalId.key().scheme() != null &&
+ externalId.key().isScheme(ExternalId.SCHEME_USERNAME) &&
+ usernames.contains(externalId.key().id()))
+ .collect(toImmutableSetMultimap(
+ externalId -> externalId.key().id(),
+ externalId -> externalId))
+ .asMap())
+ .build();
emails.stream()
- .filter(email -> !extIdsByEmail.containsKey(email))
+ .filter(email -> !extIds.containsKey(email))
.forEach(
email -> {
transientCodeOwnerCache.cacheNonResolvable(email);
@@ -598,7 +693,7 @@ public class CodeOwnerResolver {
"cannot resolve code owner email %s: no account with this email exists",
email));
});
- return extIdsByEmail;
+ return extIds;
} catch (IOException e) {
throw newInternalServerError(
String.format("cannot resolve code owner emails: %s", emails), e);
@@ -815,6 +910,15 @@ public class CodeOwnerResolver {
user != null ? user.getLoggableName() : currentUser.get().getLoggableName()));
return true;
}
+ if (!email.contains("@")) {
+ // the email is the username of the account, or a group, or something else.
+ messages.add(
+ String.format(
+ "account %s is visible to user %s",
+ accountState.account().id(),
+ user != null ? user.getLoggableName() : currentUser.get().getLoggableName()));
+ return true;
+ }
if (user != null) {
if (user.hasEmailAddress(email)) {
diff --git a/java/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParser.java b/java/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParser.java
index 5f350998..7977ba55 100644
--- a/java/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParser.java
+++ b/java/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParser.java
@@ -149,7 +149,8 @@ public class FindOwnersCodeOwnerConfigParser implements CodeOwnerConfigParser {
private static final String EOL = "[\\s]*(#.*)?$"; // end-of-line
private static final String GLOB = "[^\\s,=]+"; // a file glob
- private static final String EMAIL_OR_STAR = "([^\\s<>@,]+@[^\\s<>@#,]+|\\*)";
+ // Also allows usernames, and group:$GROUP_NAME.
+ private static final String EMAIL_OR_STAR = "([^\\s<>@,]+@[^\\s<>@#,]+?|\\*|[a-zA-Z0-9_\\-]+|group:[a-zA-Z0-9_\\-]+)";
private static final String EMAIL_LIST =
"(" + EMAIL_OR_STAR + "(" + COMMA + EMAIL_OR_STAR + ")*)";
diff --git a/javatests/com/google/gerrit/plugins/codeowners/backend/AbstractFileBasedCodeOwnerBackendTest.java b/javatests/com/google/gerrit/plugins/codeowners/backend/AbstractFileBasedCodeOwnerBackendTest.java
index 7ec92959..59cf7e05 100644
--- a/javatests/com/google/gerrit/plugins/codeowners/backend/AbstractFileBasedCodeOwnerBackendTest.java
+++ b/javatests/com/google/gerrit/plugins/codeowners/backend/AbstractFileBasedCodeOwnerBackendTest.java
@@ -424,7 +424,7 @@ public abstract class AbstractFileBasedCodeOwnerBackendTest extends AbstractCode
.commit()
.parent(head)
.message("Add invalid test code owner config")
- .add(JgitPath.of(codeOwnerConfigKey.filePath(getFileName())).get(), "INVALID"));
+ .add(JgitPath.of(codeOwnerConfigKey.filePath(getFileName())).get(), "INVALID!"));
}
// Try to update the code owner config.
diff --git a/javatests/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolverTest.java b/javatests/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolverTest.java
index 6171aca9..37699012 100644
--- a/javatests/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolverTest.java
+++ b/javatests/com/google/gerrit/plugins/codeowners/backend/CodeOwnerResolverTest.java
@@ -24,8 +24,10 @@ import com.google.gerrit.acceptance.TestAccount;
import com.google.gerrit.acceptance.TestMetricMaker;
import com.google.gerrit.acceptance.config.GerritConfig;
import com.google.gerrit.acceptance.testsuite.account.AccountOperations;
+import com.google.gerrit.acceptance.testsuite.group.GroupOperations;
import com.google.gerrit.acceptance.testsuite.request.RequestScopeOperations;
import com.google.gerrit.entities.Account;
+import com.google.gerrit.entities.AccountGroup;
import com.google.gerrit.plugins.codeowners.acceptance.AbstractCodeOwnersTest;
import com.google.gerrit.server.ServerInitiated;
import com.google.gerrit.server.account.AccountsUpdate;
@@ -51,6 +53,7 @@ public class CodeOwnerResolverTest extends AbstractCodeOwnersTest {
@Inject private RequestScopeOperations requestScopeOperations;
@Inject @ServerInitiated private Provider<AccountsUpdate> accountsUpdate;
@Inject private AccountOperations accountOperations;
+ @Inject private GroupOperations groupOperations;
@Inject private ExternalIdNotes.Factory externalIdNotesFactory;
@Inject private TestMetricMaker testMetricMaker;
@Inject private ExternalIdFactory externalIdFactory;
@@ -112,6 +115,18 @@ public class CodeOwnerResolverTest extends AbstractCodeOwnersTest {
.contains(String.format("account %s is visible to user %s", admin.id(), admin.username()));
}
+ @Test
+ public void resolveCodeOwnerReferenceForUsername() throws Exception {
+ OptionalResultWithMessages<CodeOwner> result =
+ codeOwnerResolverProvider
+ .get()
+ .resolveWithMessages(CodeOwnerReference.create(admin.username()));
+ assertThat(result.get()).hasAccountIdThat().isEqualTo(admin.id());
+ assertThat(result)
+ .hasMessagesThat()
+ .contains(String.format("account %s is visible to user %s", admin.id(), admin.username()));
+ }
+
@Test
public void cannotResolveCodeOwnerReferenceForStarAsEmail() throws Exception {
OptionalResultWithMessages<CodeOwner> result =
@@ -127,6 +142,18 @@ public class CodeOwnerResolverTest extends AbstractCodeOwnersTest {
CodeOwnerResolver.ALL_USERS_WILDCARD));
}
+ @Test
+ public void cannotResolveCodeOwnerReferenceForGroup() throws Exception {
+ OptionalResultWithMessages<CodeOwner> result =
+ codeOwnerResolverProvider
+ .get()
+ .resolveWithMessages(CodeOwnerReference.create("group:Administrators"));
+ assertThat(result).isEmpty();
+ assertThat(result)
+ .hasMessagesThat()
+ .contains("cannot resolve code owner email group:Administrators: this is a group");
+ }
+
@Test
public void resolveCodeOwnerReferenceForAmbiguousEmailIfOtherAccountIsInactive()
throws Exception {
@@ -397,6 +424,64 @@ public class CodeOwnerResolverTest extends AbstractCodeOwnersTest {
assertThat(result.hasUnresolvedCodeOwners()).isFalse();
}
+ @Test
+ public void resolvePathCodeOwnersWhenNonVisibleGroupIsUsed() throws Exception {
+ CodeOwnerConfig codeOwnerConfig =
+ CodeOwnerConfig.builder(CodeOwnerConfig.Key.create(project, "master", "/"), TEST_REVISION)
+ .addCodeOwnerSet(
+ CodeOwnerSet.createWithoutPathExpressions("group:Administrators"))
+ .build();
+
+ CodeOwnerResolverResult result =
+ codeOwnerResolverProvider
+ .get()
+ .resolvePathCodeOwners(codeOwnerConfig, Paths.get("/README.md"));
+ assertThat(result.codeOwnersAccountIds()).isEmpty();
+ assertThat(result.ownedByAllUsers()).isFalse();
+ assertThat(result.hasUnresolvedCodeOwners()).isTrue();
+ }
+
+ @Test
+ public void resolvePathCodeOwnersWhenVisibleGroupIsUsed() throws Exception {
+ AccountGroup.UUID createdGroupUUID = groupOperations
+ .newGroup()
+ .name("VisibleGroup")
+ .visibleToAll(true)
+ .addMember(admin.id())
+ .create();
+
+ CodeOwnerConfig codeOwnerConfig =
+ CodeOwnerConfig.builder(CodeOwnerConfig.Key.create(project, "master", "/"), TEST_REVISION)
+ .addCodeOwnerSet(
+ CodeOwnerSet.createWithoutPathExpressions("group:VisibleGroup"))
+ .build();
+
+ CodeOwnerResolverResult result =
+ codeOwnerResolverProvider
+ .get()
+ .resolvePathCodeOwners(codeOwnerConfig, Paths.get("/README.md"));
+ assertThat(result.codeOwnersAccountIds()).containsExactly(admin.id());
+ assertThat(result.ownedByAllUsers()).isFalse();
+ assertThat(result.hasUnresolvedCodeOwners()).isFalse();
+ }
+
+ @Test
+ public void resolvePathCodeOwnersWhenUsernameIsUsed() throws Exception {
+ CodeOwnerConfig codeOwnerConfig =
+ CodeOwnerConfig.builder(CodeOwnerConfig.Key.create(project, "master", "/"), TEST_REVISION)
+ .addCodeOwnerSet(
+ CodeOwnerSet.createWithoutPathExpressions(admin.username()))
+ .build();
+
+ CodeOwnerResolverResult result =
+ codeOwnerResolverProvider
+ .get()
+ .resolvePathCodeOwners(codeOwnerConfig, Paths.get("/README.md"));
+ assertThat(result.codeOwnersAccountIds()).containsExactly(admin.id());
+ assertThat(result.ownedByAllUsers()).isFalse();
+ assertThat(result.hasUnresolvedCodeOwners()).isFalse();
+ }
+
@Test
public void resolvePathCodeOwnersNonResolvableCodeOwnersAreFilteredOut() throws Exception {
CodeOwnerConfig codeOwnerConfig =
@@ -655,7 +740,7 @@ public class CodeOwnerResolverTest extends AbstractCodeOwnersTest {
"domain example.com of email foo@example.org@example.com is allowed");
assertIsEmailDomainAllowed(
"foo@example.org", false, "domain example.org of email foo@example.org is not allowed");
- assertIsEmailDomainAllowed("foo", false, "email foo has no domain");
+ assertIsEmailDomainAllowed("foo", true, "email foo has no domain");
assertIsEmailDomainAllowed(
"foo@example.com@example.org",
false,
diff --git a/javatests/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParserTest.java b/javatests/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParserTest.java
index 260e635e..7aab99d0 100644
--- a/javatests/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParserTest.java
+++ b/javatests/com/google/gerrit/plugins/codeowners/backend/findowners/FindOwnersCodeOwnerConfigParserTest.java
@@ -158,16 +158,42 @@ public class FindOwnersCodeOwnerConfigParserTest extends AbstractCodeOwnerConfig
codeOwnerConfigParser.parse(
TEST_REVISION,
CodeOwnerConfig.Key.create(project, "master", "/"),
- getCodeOwnerConfig(EMAIL_1, "INVALID", "NOT_AN_EMAIL", EMAIL_2)));
+ getCodeOwnerConfig(EMAIL_1, "INVALID!", "NOT!AN_EMAIL", EMAIL_2)));
assertThat(exception.getFullMessage(FindOwnersBackend.CODE_OWNER_CONFIG_FILE_NAME))
.isEqualTo(
String.format(
"invalid code owner config file '/OWNERS' (project = %s, branch = master):\n"
- + " invalid line: INVALID\n"
- + " invalid line: NOT_AN_EMAIL",
+ + " invalid line: INVALID!\n"
+ + " invalid line: NOT!AN_EMAIL",
project));
}
+ @Test
+ public void codeOwnerConfigWithUsernames() throws Exception {
+ assertParseAndFormat(
+ getCodeOwnerConfig(EMAIL_1, "USERNAME", EMAIL_2),
+ codeOwnerConfig ->
+ assertThat(codeOwnerConfig)
+ .hasCodeOwnerSetsThat()
+ .onlyElement()
+ .hasCodeOwnersEmailsThat()
+ .containsExactly(EMAIL_1, "USERNAME", EMAIL_2),
+ getCodeOwnerConfig(EMAIL_1, "USERNAME", EMAIL_2));
+ }
+
+ @Test
+ public void codeOwnerConfigWithGroups() throws Exception {
+ assertParseAndFormat(
+ getCodeOwnerConfig(EMAIL_1, "group:tvl-employees", EMAIL_2),
+ codeOwnerConfig ->
+ assertThat(codeOwnerConfig)
+ .hasCodeOwnerSetsThat()
+ .onlyElement()
+ .hasCodeOwnersEmailsThat()
+ .containsExactly(EMAIL_1, "group:tvl-employees", EMAIL_2),
+ getCodeOwnerConfig(EMAIL_1, "group:tvl-employees", EMAIL_2));
+ }
+
@Test
public void codeOwnerConfigWithComment() throws Exception {
assertParseAndFormat(

View file

@ -0,0 +1,16 @@
{ buildGerritBazelPlugin, fetchgit }:
buildGerritBazelPlugin rec {
name = "oauth";
depsOutputHash = "sha256-4/+E0BwkA+rYYCy7y3G9xF86DJj+CFzPZUNXC5HN5wc=";
src = fetchgit {
url = "https://gerrit.googlesource.com/plugins/oauth";
rev = "b27cf3ea820eec2ddd22d217fc839261692ccdb0";
sha256 = "1m654ibgzprrhcl0wpzqrmq8drpgx6rzlw0ha16l1fi2zv5idkk2";
};
overlayPluginCmd = ''
chmod +w "$out" "$out/plugins/external_plugin_deps.bzl"
cp -R "${src}" "$out/plugins/${name}"
cp "${src}/external_plugin_deps.bzl" "$out/plugins/external_plugin_deps.bzl"
'';
}

View file

@ -1,46 +1,19 @@
let
keys = import common/ssh-keys.nix;
commonKeys = keys.users.delroth ++ keys.users.raito;
commonKeys = keys.users.delroth;
secrets = with keys; {
hydra-postgres-key = [ machines.build-coord ];
hydra-s3-credentials = [ machines.build-coord ];
hydra-signing-priv = [ machines.build-coord ];
hydra-ssh-key-priv = [ machines.build-coord ];
hydra-s3-credentials = [ machines.bagel-box ];
hydra-ssh-key-priv = [ machines.bagel-box ];
netbox-environment = [ machines.meta01 ];
mimir-environment = [ machines.meta01 ];
mimir-webhook-url = [ machines.meta01 ];
grafana-oauth-secret = [ machines.meta01 ];
loki-environment = [ machines.meta01 ];
gerrit-prometheus-bearer-token = [ machines.gerrit01 machines.meta01 ];
pyroscope-secrets = [ machines.meta01 ];
tempo-environment = [ machines.meta01 ];
buildbot-worker-password = [ machines.buildbot ];
buildbot-oauth-secret = [ machines.buildbot ];
buildbot-workers = [ machines.buildbot ];
# Private SSH key to Gerrit
# ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHx52RUPWzTa2rBA96xcnGjjzAboNN/hm6gW+Q6JiSos
buildbot-service-key = [ machines.buildbot ];
# Signing key for Buildbot's specific cache
buildbot-signing-key = [ machines.buildbot ];
buildbot-remote-builder-key = [ machines.buildbot ];
# These are the same password, but nginx wants it in htpasswd format
metrics-push-htpasswd = [ machines.meta01 ];
metrics-push-password = builtins.attrValues machines;
ows-deploy-key = [ machines.gerrit01 ];
s3-channel-staging-keys = [ machines.gerrit01 ];
s3-channel-keys = [ machines.gerrit01 ];
postgres-ca-priv = [ machines.bagel-box ];
postgres-tls-priv = [ machines.bagel-box ];
newsletter-secrets = [ machines.public01 ];
s3-revproxy-api-keys = [ machines.public01 ];
loki-htpasswd = [ machines.meta01 ];
promtail-password = builtins.attrValues machines;
};
in
builtins.listToAttrs (

View file

@ -1,20 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 87T2Ig tzPD1x6XKuDfgJ8jkQnwW/ALp2pkANCeNoO8xdUqq30
QSsuO6Dwc8QJuY92gXRnWB5aJ2SU9X2uFh01GmLVaQE
-> ssh-ed25519 K3b7BA 9G9Uw1xY8hq//xphNWrPn5y7vG2o8/kwkC8cJGuf/mI
Ip0019OUaFq2ZDFI3i77hdsp9IqFV2qqYIB/TnDSXgo
-> ssh-ed25519 +qVung dx22ef+x9X5mr73L8NUzxYQa640M2XViELjJcpgF3go
CXyit7pk8SPNHBgULlMQUAasGAn4C36zcwOBDI46nU4
-> ssh-rsa krWCLQ
NlGh0hM10NOuek7MbrFo0iul0kQQtDFmZIhgpyqaATMdCDRBXJOyhASHU5N0zDDJ
MLaJUV0l2o1ghBF9RhSKdoUPVEn8Cce/nfQepYzMlfc4UG3qWXwabwR6EtqqCZCJ
jAEWZ8taTKDmzoXwuygCW+bRBuoMMrcfzu7V90N+mQpZWtOScatb6E7d5VRqjlar
st1ZQu5ccghufyQSUmOC7GpojOyutX5EvbMGn84X4ouZRHRX/8fTgaqicV+aeAIb
QyXisOrO6C+Jle5qfxzMSe8c/TCyF2574kD6F1BQ9Kpkinn8v7OWcIXtkNmZ5hzK
vs0Bej8yZVsoBkj1vWAM0A
-> ssh-ed25519 /vwQcQ n+hr1cV1zRs1S86YnA+0oRB8SCaPKtkoMNe15ZsVVwM
fdFtUqno07ik6FpW5zMImIjd8wM8dMgwU+RqjeT2PiI
-> ssh-ed25519 0R97PA ddPILw57gkuKvAqlmpa+MnV/LSEdyQzQaAarCUqQ1xE
ozK5a6uXZDc17OrX0OZun9hmZwP3H3rYQiNuKnukqsg
--- f7yGgKQpCPj64Ps0HfMcToYircGH5SPqMzVZrUMB8ZI
føv[iY\ÅšMP,¯Ùh°Èxb—Ðÿ«J<C2AB>*ºË" ”+¬ÒA0T˜?KmˆPÈË2¹'2±µ³Ø=¯êÚÏŸj”

Binary file not shown.

Binary file not shown.

View file

@ -1,20 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 87T2Ig p8lEB5da4fIfLH/HKBsghzq5mvQLB69UB4+uAi3DGCw
NeZ3jPTUKa7MiqjrFPrYuP4VneytQPdBNqf+omPZJYM
-> ssh-ed25519 K3b7BA uP2K1hU7uLmiHXmmoUdsB7CHQq61ZkEAjG/aK863RDw
0chTczEMXASdYiwqNxDQ+vMXXhjOf64oIQ2ULZmQI8Y
-> ssh-ed25519 +qVung jUgEqz3+ypL7mwJ1R7lfeOMhkon/aRrNSJUJT3X7vmU
pgOiwrp9JiA20yw9bsxi8eiQ9/23CYXKRBGF1pea9eI
-> ssh-rsa krWCLQ
snCHrLHzkjimwIxKO90IjnHwOArlozO9kd/aCdZZnYNgh/QG3rUSceSn9yTHbtMV
izv0SU51LrRU+JyE+a524AxKhyPBvGDig20j7hMy5fVxZqeunztqtlha5gaYYaQg
Tbfs9tDP+pCIgzMVNqYf6EJ4MK7qjNf9DE5I490Eta5YZxAi/3To3BmZmIYtCz6l
1kNRiSmWCbZqE25keFgPCgRMFXAFK9W6NmL+HamqCUhjPoJg/Gd4sf39EONT0PYg
7BpCOAnwwfECHPxpM3qv0h2kJXTb4DZ715cFReSVyQe5fvKv8hoWhl/S+++pEYT8
u/LKBx/o7e3Kd7cm2RGnBw
-> ssh-ed25519 /vwQcQ 4+IQPRsMMHmuSGL7T7IbRkTTuL+TTqgdQp5FSbyt8Dw
KOI0LKQ0oA5XtxaW7wftlEJB0BGVnx41HUJMG92SRUA
-> ssh-ed25519 0R97PA l1aWUEv8nLEtYnpY1gjTJqk5UYm51NDqOjYmL83rZ10
B7qDZwCpolkIajqCXeOepwmF6ciJfKvr+AN7VouMUvA
--- lz/IMMPxBpD3Bzuv9Wl23+swBQHlblhlAO/ZXAgN0hU
µoÍüÌ<EFBFBD>²-Īr °eó|Í?ït èìÎZ<C38E>¬sÒì!ŸƒÁ<>@Ï'ìèz6UöÎgJøÑOµs13<31>š8<î’%-·Ô‡Eÿ }Šdm9¿å¢Óoæ

Binary file not shown.

View file

@ -1,20 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 j2r2qQ vwcaLpvGJ9swXnV8idDwi9jdRPSj38As9p2QFkIJ1Xc
FLnZeblHDQQcWjFm1iaghbvuFgOG3miwtkRE5sz1+X0
-> ssh-ed25519 K3b7BA 9VRe2rBwg3G9lxxfxL/yLob2NZmLJTBMxzx0Ew8VwmY
/I2W80UykNvll5o98OPeMpIsddOel9B7uQlio0X3gcs
-> ssh-ed25519 +qVung VsqKzMD85aps4PIx2zqae2Dj7YWibiaKYb5z7ws8ggM
Y9dRd/hOz8h4avlutBQ1YZgHIAf/AuTr5WaByKlFbLE
-> ssh-rsa krWCLQ
gjyaUFrIIbZnFTGVw4XEZzkTIP/+qXV6/q0W8Wb4EtqQXDRISFT+bwxQU/S2p5hf
7+JGcn4BZg6puOJ5BBABWtpn6gcX5OFfga5azIdioF/R19XByT+0SK5njw8g1VPS
R7o8kQt2yvKWayoq9Cis5XRg+4KANkwOQaNTO8AdiCwgq9nc0Cd9avk8QhaFoR74
D5cf8jPsufp744rQqwhWDoG533LS1WUUuYZqRmtp2Vz+r583RhSscaNyA7ddr7o6
e9ZQJyL5bKiN8qe3Xm76lLypf/wg7+aGn8HHnO6GA65g+VYfjLMODEqCN/+uDJtB
g8v2wzKIGYlZiV1hEjH8nw
-> ssh-ed25519 /vwQcQ 4pU5JGK5vpZbFgq01a9YY8VmSJvPSHPSZD50TLJwKHc
L46UA/p+bNSR8cLmL8G7VpmAcZ+sy5AROc4yj2ABOWg
-> ssh-ed25519 0R97PA Tk00kYLhsEy1HJcmKLgaLWTdNP8XV/cdKHMLzyK6glk
kwyQZr/h6MutROJmjVfPWGcf9xN5Uc5w5mVyuKcK64g
--- E0vVtBqbjNkZY0/1dFJ53uVAR7IGPO+OMmXkpJcKmlw
{ÐQê%èõY•B,isr¥1<31>yLÕ'7?¶iŠM…¶MU]×ê/d2¸I1u¶2hZjHåh &¥
-> ssh-ed25519 j2r2qQ Xl0fSOuF0xNTJrtVGdRLRIszd15LFrG5KCFNvSBK4Go
qSEMBBw90jz4j8elpoUeyS4CTLBhZtNDhLNigesJq+0
-> ssh-ed25519 K3b7BA cKI0twKiuuTKv1Js4jqt5v8cOqpxEMY9dmVghgJtbzw
K5o31XP/nLsswsrMaxnIzCXVUtJqmJWoFglWFsV7+AQ
--- X8pvqCHeCQ0LjzcjIHThkqp6YeOOT8dBMLuktgdgeY4
sZÓ¸ŠíØ[þ²X<C2B2>“¡èÅ®Š5°=÷6)ÇT¿Q†N{•x³I1ƒ!ÓÜøB ƒzš*×íåL~K

Binary file not shown.

Binary file not shown.

View file

@ -1,20 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 +uvEmw TNYFQxSUv5yMmlTWoIxCOlv6UR+RA50cb5aJbo0yEE0
Yw3sTPqYf7A33RI87CqoPWe2gh0FuvdBGGKqHV55Atc
-> ssh-ed25519 K3b7BA vMlHenY6jSIfnxQD6xh09cwwV+YVBkLuSMHcyKD+dCk
heXkAEqRawBlHqcr6ldmhWmk7qPtGLMDFC3QT79vdMM
-> ssh-ed25519 +qVung fgimLW5X0z4Eh2u3fIr5bgR5/c1SKam9CKW/2mqtTik
8VKJJr+FRE0j5YvjfdMXugNA4UwUebKrkeAe+9LYBnQ
-> ssh-rsa krWCLQ
sa3fA4GglovY8H6jimpTvQPW/axun8WADPlIXzpX/Zeshkzem+pQoQqptzDlnmH8
8AngqXgFYrmHgNNAylavgcrxbjNrtlJU24ldF1YIubz7VsU1678F27LCd9B0c2dn
X+0CccH19lM8Q+zVI2Wrq9R83MEP/5uOOc+eXXnvNSGqfKgZ2OplG/HUllFS13j6
uiQy5zwJJKkII7KUThcGteux7NONoLeUqRE8CW2uSeY9fXBWKgxeENKgiT7PEAAo
nvwWa+GatEYf6eUz8Lph8lETorgP+7JS2VQRAkmhDbjQLTYzfFmiJGE/mzyobslf
ZEq6Oj5UNgnzdWmK5ZYKPg
-> ssh-ed25519 /vwQcQ 9EG/cydlzlLd6cFed7DzmwzubzJUXvD9mX3WKDyFD1s
3Emj+tVZmnsC/YZdChvyaxeObbBsri347vZl0ff9kH4
-> ssh-ed25519 0R97PA kcIYyWKxpJmjcrel+YodZQiR2zGPqfjzMyJXsz2XOzM
SUlgGGs2BVRzTHT/ULNo1AiN5SY1BETFtJRY6LDr4JI
--- l87sO6IuwSeCeQ8ktvYFI0xr4Utcl8KfpAV7WePc1y4
÷ÚÖ~÷J3¦Œ§ÍÇè²ù<º?%×ý<0E>Á‡ÉÒ\—\Ï7\»Ú¨åU-&W'd”{իɼ½u "Û#Ž}¯õ…xìšz¥®Nj„!éfUqDG<é<7F>Ñca<63> '´+ ¸Ï±]Pó»ö€DÕ¸´þ<C2B4>£¼­ŽÿÂç Ì\¦<>

Binary file not shown.

View file

@ -1,20 +1,9 @@
age-encryption.org/v1
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
<EFBFBD>
-> ssh-ed25519 j2r2qQ w0lLquFUUcmEZ/Fh1YSt85tAJkBwavORQbwMr7gMqF4
J4T+EHm1uHbCZkAUNoNcB9uGSz082mFL8+dkCnvYQnM
-> ssh-ed25519 K3b7BA 28bJZgBPPc2KIE5+b8LJuQ5L4YAiRAJzucEuOqXHdVM
7hKENFr8QX0jpwuuQEjGFrUywJuhL1Tdi2V4/gR8JWE
--- GSPZxz39TMMWv0qhotNgnXa5679Q7VK8JGjQjI7A8oM
J˛\@F“N• łĺ2®ô¨w×!Ż1Vf»§<C2BB>Ž·ŢO˛CÓw®®V°ŁšĚş.^݆ w‡n4äŕdW-Öľ"@0¨úąEĎż·°ck,]M}xŤřĚťˇŰy°[×ÁJ:!č‘ !ř螀c¬
BëąR
nřę€ţŔáĆ^9í¤M<ú

View file

@ -0,0 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 j2r2qQ nLWy3WcVJWCl3rXkhcSbp1joqmkk06QnxhCZ4UtSvmw
iQ+Hx/vhiFgkWfbxHwGjxMBEqzyGww4/9do3W7V/y1Y
-> ssh-ed25519 K3b7BA RkF2ADcjOGtivl9MrhO/HFwxlTAkbFHWL3iinUldMiM
7q/zdVTMLevukZjkHtcN88iYzfTLvq2s3QdkgsFSO9M
--- 1b2HiK06vJPqBgHVDD0QELOtfkl7/rlgGS9uI1mSbus
„uܧoL;õå¬" 4¦Û»Z¼˜@§öãƒÐ3+93Q4óÄ o•ŒØwé“„6<>M-²DkJn´;ñ*g <0A>Yœ75ËSò)Ù°©

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,20 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 j2r2qQ uYhcMpOER5j/SWUX1mNvkOU9Rumr0CgVBuGv9EHGpFY
6kAgrwjgB7C1cMd410EpUegcxxGRcNOwCMJPXppepvE
-> ssh-ed25519 K3b7BA 57GDNt5nwxgzCV5bnMPEPUeyZNG1U+zajCIjeoHjLAE
rFCbfodjXHZ0aVLtW6xtoh6e/VH/HwFdFzjnQ2QEEXQ
-> ssh-ed25519 +qVung DnLKAJPnUDpZ2+wXDZWpxwZkvv8oDyu3xxObTMT9W1I
vh59DYoQLpiro5eBjwgNH2YHRsGY/i6TB7zPfQicOEU
-> ssh-rsa krWCLQ
ekvGooB5sCmAniHU7hlk+iCkYMQ7Rw2SJx8tp4FnpfAWJbRMH8CpTFYFiDvlHfFy
Ce1OpkNkkipzBge0OCrfn6Y5iVz2CZHYHf8Ul5ueHwmb5fS7seT3yMoWhhSw/zE/
G3snrBORT9S9+KTRnVnKiy+O3CaMZY+q41RR35Fs3mmVc/of2ILc/Jj3a3t+uBTX
axkOMU6z6R6i3Ps5SbwJTaB9q2kMPvZFOO9Nmku1wohjetz64wvm+fDx0XVRPe4A
jDQRPKAMIZK68SYHk/9azmlBtJSJnvxcxyj3IaU9MBskUCldWi8CQ9jQ+1XAIuHX
0Etcsx7MhzBpuhx2xZ+dyg
-> ssh-ed25519 /vwQcQ uW41w2RAtfMaOm1wJktMcbVporqKgdGA5SY03OcPmlM
WgL8DWPU735Ysowq0HtvbrT6Tc3XEpwws3AycqpBgtM
-> ssh-ed25519 0R97PA 59AFQx8ngDwQUdmfOeOFUARQQqaAdLA5WH67Wsld4yM
o6jSWtlidZssWsJsI8xAaASi8p1sirLJFJwizzPXIBM
--- scUnldbU89ICZYlniDbGEqeUF7QUoO1kcZLl8abyttk
öR{p@IµþOlKKõŒ§!<01>œWÎÅœ[R<-AÐbÔ<¯·÷0õÐu¹øµñUgBÏF~µ«=ÊõeQò}î4Ø:ô²¢5ƯŠtaØ™û”<C3BB>æ·<C3A6>§°x±Më?Ew0<77>8.

Binary file not shown.

View file

@ -1,21 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 CyxfgQ fWH/o2+Uf0i/JFIuVjCnkhDIfYndtL8EeDcxSxhKVH8
ShSPmdwnxzDuUe/kCx8e61JJAoHMwguNydn+5OIGuAg
-> ssh-ed25519 K3b7BA p+wXAGvPqTX63dlZNCTIq3F4QFMWEJH1R+Ex4SJ5UTk
1sFqFqnUM8YvZy7BEBArg3eLxCCsLXq2jNI7XLKq/Ww
-> ssh-ed25519 +qVung rcpgzVQ1PmoNF2i0K0nAknzZwPXICBggzqhIZwO+8xY
9rjsTwLm5u1GOJmnJYriXXAY1unG7y+WJ4G2ltxX34U
-> ssh-rsa krWCLQ
seXsQjs62kxn/agyKda2l19PI4xzDl1gM7rEnaEBV8UNLOPNxh41HTnP2etgDXSc
4eyS3ntHXIOHmN4+JBn+Q/wuhzMGQmAcoFWbjqVVPOrpPYjgCG7q/iUD8kULxLB9
UpF0gLsg1TnvrkTwlpxr8rP/PM+ZgyQAA84S96j9TW0coyTUoH/ZX1wWGtS4aalm
aTrOMZGScZu7onTg+tYvR+aBKlFL28h08I5nqbA39srnCNuU68+OUhLgLUfiTscl
umwNh/C4BP2Tmc6gxQiY8o3tGqGBssGH5+WqKzbK151vJjq80RKAS1HCaSSfmxkP
vWkXWN3NQkJyqCBpuPYilg
-> ssh-ed25519 /vwQcQ eUH0B+cCoUubIKbG+bA25kRj0TnZabB6t8jVK04NrFs
ovkI0C4W5CJXMZIZdpaTtQNc+TGkQ3Yq87Dei3BMUsA
-> ssh-ed25519 0R97PA u/I45pxH3Bnja/Jw/6IukINRuC0e1IKu8UVygVgIomc
xyHuiHf1/nJirnhXbGHJnextGQa95tDo/RPRRnDCkIg
--- LGqO4Bsa8bofD1W5YrQp75SlGLNg1XaFZ0rPUuvLPTo
Êçã ‹ÜmlW£{@I3…*¹ŒÇ™@ÞªL7Wª ¤ÝŒY
n õö~Tb\V•ÜvPÙpPôïoÌS"ôm/Ûµ/bÝpÄžêq¸£¦šeDj6ÆþTì)

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,12 @@
age-encryption.org/v1
-> ssh-ed25519 +HUDfA ZUM0ACC/NIekvX1PkCiXTHaTeE3ybudmY3piHw2iekQ
cHj94FIR6gNJ3Hw9FI7K15OYgxbjkajGtCftD+2Mr8c
-> ssh-ed25519 2D+APA tzlyOnAXnLxXO/47n45sFPiJF3FXd98UU5ajPhD2wSs
P8ZdUiBeME17SU2BpMgOq4plyAqgzLOQWHa1+Q7cjYo
-> ssh-ed25519 j2r2qQ 3OikD9JOmug7kdPAPz+JT/ryB6xBQhu2+cwS9h5sKGI
XiIuxOyey2I6hmqabUCPzLc85q/1r9OwVGjHWYNQsp0
-> ssh-ed25519 K3b7BA Bdqcqt4GgLzuSiEnIyImDiOQGwyIhhozRXMmNrp7glI
65joZcnl0Hqe90Th2EdVgbcxUJFpy3fOgk6oPiSHh2A
--- 6x6BFNypc+u3DpsHX3SajwEy1TqsAtbFei0ddRpEoBg
äªUG¾xj4»®Îþ‡b=óžóñ¼Rd<52>3sHYÝ Ô<>*Qµ9Ã6n34&äw»~h!§ ^„[êš

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,20 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 j2r2qQ kbi4mciOrjd7/X86xfmkDaMZhvZakoSJ6qjqLF3ljkE
Q2BsgMLJ8AmjhnggRi+wkICj18NCA2HW1t8clemReUw
-> ssh-ed25519 K3b7BA wNGmX9S9bJgd2JDte9QoNDfyycgmq4JMu2bc5nyYYik
uUiutxAI3nI0M51W97aPRVE/l4dV2PEjph8eWOMLHIE
-> ssh-ed25519 +qVung raYJ5vwMP9JopSdfa+ofkLY/gc0zcW4wTNBFTca+MXw
sa/rWGSYrI4y6rn4JSboldWKUGvx6HbtsYo78AFOkBo
-> ssh-rsa krWCLQ
FLq8NwkiGw2gXptVVY393f0p9hFom57xHWPxtAlzOcRT8gvWu/uwgV+0raOcOcJa
xxr5Sib+2D3UnUhprVPmH5Os9bI2seFAiej1MVVWLqvMtQHLFwnrzZTyZpxsXpQq
5qQhNEADuQc4uD/ELVjGHKt6nF1Cl/GbgNLIOF/ITZ0pm1O1MjtT6MYJhQJhc6sb
sno/wQyTXjj7rC06nyLX/rgOWrJSOeaz9eVp0A8k8/I0TXu/vRCW9gqWtv2m8sbh
1uUHIm0l8f3z+zrL6OlZnpMFw4jpiiGoCYKPzD17I0onDYIjtdVS5iO9BsckxV/a
wQWbyONUwbGCfeNSVAzZbg
-> ssh-ed25519 /vwQcQ jwf7fwy4wKz7q761DNu8SyFHGgFlwq4P/Pn44Nido3E
1q/jvt/vtD4ziY3eCDqk1XwMPpNUd80POTV2VVsumCE
-> ssh-ed25519 0R97PA XeuziQ+wsoh0KSHXk5Qkl1kQOsAu1Ax1zTg13+XWd3M
B1KHKm3tx/EsnE6hY+w7ya1ilhYiUs9AbwARHNkJi90
--- JgQA6gCYZu8xcbXEl9VypccEIBO6uAJIdhBefr4doRQ
V3ZðõÚ<EFBFBD>ç-·Ý.ê«sòÀ³3 ÎiS‰a5#¿Ð{åÔÈ®Dý˜YêNèãëù«ýoL+ÔÝ#M<sws P»¢+í¢Ó‰ïBDoÊξÆÏuFí”Ç^Â¥•<C2A5>ÝG@ÍM×ÛãÐØì q¦ºG^Qb s<;ÂÒnC+ÖÊxª_­Úì]S<16>Ð

View file

@ -1,29 +0,0 @@
# This file contains information on which builder(s) are providing how many
# job slots and providing which nix features
let
genBuilders = { offset ? 0, count, f }: builtins.genList (x: rec { name = "builder-${toString (offset + x)}"; value = f name; }) count;
in builtins.listToAttrs (
# The first 8 builders are general purpose hydra builders
genBuilders { count = 8; f = name: {
cores = 8;
max-jobs = 8;
supported-features = [ "kvm" "nixos-test" ];
required-features = [ ];
}; }
++
# The last 2 builders are exclusively for big-parallel
genBuilders { offset = 8; count = 2; f = name: {
cores = 20;
max-jobs = 1;
supported-features = [ "kvm" "nixos-test" "big-parallel" ];
required-features = [ "big-parallel" ];
}; }
++
# These are not currently used for hydra
genBuilders { offset = 10; count = 1; f = name: {
cores = 8;
max-jobs = 8;
supported-features = [ "kvm" "nixos-test" "big-parallel" ];
required-features = [ ];
}; }
)

View file

@ -1,189 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.bagel.baremetal.builders;
in
{
imports = [ ./netboot.nix ];
options = {
bagel.baremetal.builders = {
enable = lib.mkEnableOption "baremetal bagel oven";
netboot = lib.mkEnableOption "netboot";
num = lib.mkOption {
type = lib.types.int;
};
};
};
config = lib.mkIf cfg.enable {
boot.initrd.availableKernelModules = [ "ahci" "ehci_pci" "usb_storage" "usbhid" "sd_mod" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
users.users.builder = {
isSystemUser = true;
group = "nogroup";
home = "/var/empty";
shell = "/bin/sh";
openssh.authorizedKeys.keys = [
# Do not hardcode Hydra's public key, selectively
# add the keys of the coordinators that require us.
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAvUT9YBig9LQPHgypIBHQuC32XqDKxlFZ2CfgDi0ZKx"
];
};
users.users.buildbot = {
isSystemUser = true;
group = "nogroup";
home = "/var/empty";
shell = "/bin/sh";
openssh.authorizedKeys.keys = [
# Do not hardcode Buildbot's public key, selectively
# add the keys of the coordinators that require us.
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGMnOLLX0vGTZbSJrUmF9ZFXt/NIId/MUrEpXmL2vxod"
];
};
nix.settings = {
trusted-users = [ "builder" "buildbot" ];
inherit ((import ./assignments.nix).${config.networking.hostName}) max-jobs cores;
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = true;
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.initrd.systemd.enable = true;
boot.initrd.services.lvm.enable = true;
boot.kernel.sysctl."fs.xfs.xfssyncd_centisecs" = "12000";
fileSystems = lib.mkMerge [
(lib.mkIf (!cfg.netboot) {
"/" = {
device = "/dev/disk/by-label/root";
fsType = "xfs";
};
"/boot" = {
device = "/dev/disk/by-label/BOOT";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
})
{
"/mnt" = {
device = "/dev/disk/by-label/hydra";
fsType = "xfs";
options = ["logbsize=256k"];
};
# We want the tmp filesystem on the same filesystem as the hydra store, so that builds can use reflinks
"/tmp" = {
device = "/mnt/tmp";
options = [ "bind" ];
};
}
];
swapDevices = lib.optionals (!cfg.netboot) [
{
device = "/swapfile";
size = 50 * 1024; # 50GiB
}
];
zramSwap = {
enable = true;
memoryPercent = 25;
};
boot.kernelParams = [
"console=tty1"
"console=ttyS0,115200"
];
networking.useNetworkd = true;
networking.hostName = "builder-${toString cfg.num}";
networking.domain = "wob01.infra.forkos.org";
systemd.network = {
netdevs = {
"40-uplink" = {
netdevConfig = {
Kind = "bond";
Name = "uplink";
};
bondConfig = {
Mode = "802.3ad";
TransmitHashPolicy = "layer3+4";
};
};
};
networks = {
"40-eno1" = {
name = "eno1";
bond = [ "uplink" ];
};
"40-eno2" = {
name = "eno2";
bond = [ "uplink" ];
};
};
};
networking.interfaces.uplink.ipv6.addresses = [
{ address = "2a01:584:11::1:${toString cfg.num}"; prefixLength = 64; }
];
networking.defaultGateway6 = { interface = "uplink"; address = "2a01:584:11::1"; };
deployment.targetHost = "2a01:584:11::1:${toString cfg.num}";
deployment.tags = [ "builders" ];
# Why can't we have nice things? https://bugs.openjdk.org/browse/JDK-8170568
services.coredns = {
enable = true;
config = ''
. {
bind lo
forward . 2001:4860:4860::6464
template ANY A { rcode NOERROR }
}
'';
};
services.resolved.enable = false;
networking.resolvconf.useLocalResolver = true;
# Hydra blasts ssh connections and does not multiplex. Loosen some of the
# rate limiting.
services.openssh.settings = {
MaxStartups = "500:30:1000";
};
systemd.services.hydra-gc = {
wantedBy = [ "multi-user.target" ];
description = "Nix Garbage Collector";
script = ''
while : ; do
percent_filled=$(($(stat -f --format="100-(100*%a/%b)" /mnt)))
if [ "$percent_filled" -gt "85" ]; then
${config.nix.package.out}/bin/nix-store --gc --max-freed 80G --store /mnt
else
break
fi
done
'';
serviceConfig.Type = "oneshot";
serviceConfig.User = "builder";
};
systemd.timers.hydra-gc = {
timerConfig.OnUnitInactiveSec = "10min";
wantedBy = [ "timers.target" ];
};
systemd.timers.hydra-gc.timerConfig.Persistent = true;
bagel.sysadmin.enable = true;
environment.systemPackages = [ pkgs.ipmitool ];
system.stateVersion = "24.05";
};
}

View file

@ -1,169 +0,0 @@
{ modulesPath, pkgs, lib, config, extendModules, ... }@node:
let
cfg = config.bagel.baremetal.builders;
in
{
config = lib.mkIf (cfg.enable && cfg.netboot) {
systemd.services.sshd.after = [ "provision-ssh-hostkey.service" ];
systemd.services.provision-ssh-hostkey = {
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
mkdir -p /etc/ssh
umask 0077
until ${pkgs.iputils}/bin/ping -c 1 vpn-gw.wob01.infra.forkos.org; do sleep 1; done
${pkgs.curl}/bin/curl --local-port 25-1024 https://vpn-gw.wob01.infra.forkos.org/${config.networking.hostName}/ssh_host_ed25519_key > /etc/ssh/ssh_host_ed25519_key
# Run the activation script again to trigger agenix decryption
/run/current-system/activate
'';
};
system.build = {
# Build a kernel and initramfs which will download the IPXE script from hydra using
# u-root pxeboot tool and kexec into the final netbooted system.
notipxe = import (modulesPath + "/..") {
system = "x86_64-linux";
configuration =
{ pkgs, config, ... }:
{
system.stateVersion = "24.11";
boot.initrd.availableKernelModules = [ "ahci" "ehci_pci" "usb_storage" "usbhid" "sd_mod" "igb" "bonding" ];
boot.kernelParams = [ "console=ttyS0,115200" "panic=1" "boot.panic_on_fail" ];
#boot.initrd.systemd.emergencyAccess = true;
networking.hostName = "${node.config.networking.hostName}-boot";
nixpkgs.overlays = import ../../overlays;
boot.loader.grub.enable = false;
fileSystems."/".device = "bogus"; # this config will never be booted
boot.initrd.systemd.enable = true;
boot.initrd.systemd.network = {
enable = true;
networks = node.config.systemd.network.networks;
netdevs = node.config.systemd.network.netdevs;
};
boot.initrd.systemd.storePaths = [
"${pkgs.u-root}/bin/pxeboot"
"${pkgs.iputils}/bin/ping"
];
boot.initrd.systemd.services.kexec = {
serviceConfig.Restart = "on-failure";
serviceConfig.Type = "oneshot";
wantedBy = [ "initrd-root-fs.target" ];
before = [ "sysroot.mount" ];
script = ''
ln -sf /dev/console /dev/tty
until ${pkgs.iputils}/bin/ping -c 1 hydra.forkos.org; do sleep 1; done
${pkgs.u-root}/bin/pxeboot -v -ipv4=false -file https://hydra.forkos.org/job/infra/main/${node.config.networking.hostName}/latest/download-by-type/file/ipxe
'';
};
boot.initrd.systemd.contents."/etc/ssl/certs/ca-certificates.crt".source = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
boot.initrd.services.resolved.enable = false;
boot.initrd.systemd.contents."/etc/resolv.conf".text = ''
nameserver 2001:4860:4860::6464
'';
boot.initrd.systemd.contents."/etc/systemd/journald.conf".text = ''
[Journal]
ForwardToConsole=yes
MaxLevelConsole=debug
'';
# Provide a bootable USB drive image
system.build.usbImage = pkgs.callPackage ({ stdenv, runCommand, dosfstools, e2fsprogs, mtools, libfaketime, util-linux, nukeReferences }:
runCommand "boot-img-${node.config.networking.hostName}" {
nativeBuildInputs = [ dosfstools e2fsprogs libfaketime mtools util-linux ];
outputs = [ "out" "firmware_part" ];
} ''
export img=$out
truncate -s 40M $img
sfdisk $img <<EOF
label: gpt
label-id: F222513B-DED1-49FA-B591-20CE86A2FE7F
type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, bootable
EOF
# Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img
eval $(partx $img -o START,SECTORS --nr 1 --pairs)
truncate -s $((2081 * 512 + SECTORS * 512)) firmware_part.img
mkfs.vfat --invariant -i 2e24ec82 -n BOOT firmware_part.img
# Populate the files intended for /boot/firmware
mkdir -p firmware/EFI/BOOT firmware/loader/entries
cp ${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot*.efi firmware/EFI/BOOT/BOOT${lib.toUpper stdenv.hostPlatform.efiArch}.EFI
cat > firmware/loader/loader.conf << EOF
default foo
EOF
cat > firmware/loader/entries/default.conf << EOF
title Default
linux /EFI/${pkgs.stdenv.hostPlatform.linux-kernel.target}
initrd /EFI/initrd
options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
EOF
cp ${config.system.build.kernel}/${pkgs.stdenv.hostPlatform.linux-kernel.target} firmware/EFI/${pkgs.stdenv.hostPlatform.linux-kernel.target}
cp ${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile} firmware/EFI/initrd
find firmware -exec touch --date=2000-01-01 {} +
# Copy the populated /boot/firmware into the SD image
cd firmware
# Force a fixed order in mcopy for better determinism, and avoid file globbing
for d in $(find . -type d -mindepth 1 | sort); do
faketime "2000-01-01 00:00:00" mmd -i ../firmware_part.img "::/$d"
done
for f in $(find . -type f | sort); do
mcopy -pvm -i ../firmware_part.img "$f" "::/$f"
done
cd ..
# Verify the FAT partition before copying it.
fsck.vfat -vn firmware_part.img
dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS
cp firmware_part.img $firmware_part
''
) {};
}
;
};
# This is the config which will actually be booted
netbootVariant = extendModules {
modules = [
(
{ modulesPath, ... }:
{
imports = [ (modulesPath + "/installer/netboot/netboot.nix") ];
}
)
];
};
# A derivation combining all the artifacts required for netbooting for the hydra job
netbootDir = let
kernelTarget = pkgs.stdenv.hostPlatform.linux-kernel.target;
build = config.system.build.netbootVariant.config.system.build;
in
pkgs.symlinkJoin {
name = "netboot";
paths = [
build.netbootRamdisk
build.kernel
build.netbootIpxeScript
];
postBuild = ''
mkdir -p $out/nix-support
echo "file ${kernelTarget} $out/${kernelTarget}" >> $out/nix-support/hydra-build-products
echo "file initrd $out/initrd" >> $out/nix-support/hydra-build-products
echo "file ipxe $out/netboot.ipxe" >> $out/nix-support/hydra-build-products
'';
preferLocalBuild = true;
};
};
};
}

View file

@ -1,142 +0,0 @@
{
nodes,
config,
lib,
pkgs,
...
}:
let
cfg = config.bagel.services.buildbot;
cfgGerrit = nodes.gerrit01.config.bagel.services.gerrit;
ssh-keys = import ../../common/ssh-keys.nix;
inherit (lib) mkEnableOption mkOption mkIf types;
in
{
options.bagel.services.buildbot = {
enable = mkEnableOption "Buildbot";
domain = mkOption {
type = types.str;
};
builders = mkOption {
type = types.listOf types.str;
description = "List of builders to configure for Buildbot";
example = [ "builder-2" "builder-3" ];
};
};
config = mkIf cfg.enable {
networking.firewall.allowedTCPPorts = [ 80 443 ];
age.secrets.buildbot-worker-password.file = ../../secrets/buildbot-worker-password.age;
age.secrets.buildbot-oauth-secret.file = ../../secrets/buildbot-oauth-secret.age;
age.secrets.buildbot-workers.file = ../../secrets/buildbot-workers.age;
age.secrets.buildbot-service-key.file = ../../secrets/buildbot-service-key.age;
age.secrets.buildbot-signing-key = {
file = ../../secrets/buildbot-signing-key.age;
owner = "buildbot-worker";
group = "buildbot-worker";
};
age.secrets.buildbot-remote-builder-key = {
file = ../../secrets/buildbot-remote-builder-key.age;
owner = "buildbot-worker";
group = "buildbot-worker";
};
services.nginx.virtualHosts.${cfg.domain} = {
forceSSL = true;
enableACME = true;
extraConfig = ''
add_header Access-Control-Allow-Credentials 'true' always;
add_header Access-Control-Allow-Origin 'https://cl.forkos.org' always;
'';
};
services.buildbot-nix.worker = {
enable = true;
workerPasswordFile = config.age.secrets.buildbot-worker-password.path;
# All credits to eldritch horrors for this beauty.
workerArchitectures =
{
# nix-eval-jobs runs under a lock, error reports do not (but are cheap)
other = 8;
} // (
lib.filterAttrs
(n: v: lib.elem n config.services.buildbot-nix.coordinator.buildSystems)
(lib.zipAttrsWith
(_: lib.foldl' lib.add 0)
(lib.concatMap
(m: map (s: { ${s} = m.maxJobs; }) m.systems)
config.services.buildbot-nix.coordinator.buildMachines))
);
};
services.buildbot-nix.coordinator = {
enable = true;
inherit (cfg) domain;
debugging.enable = true;
oauth2 = {
name = "Lix";
clientId = "forkos-buildbot";
clientSecretFile = config.age.secrets.buildbot-oauth-secret.path;
resourceEndpoint = "https://identity.lix.systems";
authUri = "https://identity.lix.systems/realms/lix-project/protocol/openid-connect/auth";
tokenUri = "https://identity.lix.systems/realms/lix-project/protocol/openid-connect/token";
};
# TODO(raito): this is not really necessary, we never have remote buildbot workers.
# we can replace all of this with automatic localworker generation on buildbot-nix side.
workersFile = config.age.secrets.buildbot-workers.path;
allowedOrigins = [
"*.forkos.org"
];
# TODO(raito): is that really necessary when we can just collect buildMachines' systems?
buildSystems = [
"x86_64-linux"
];
buildMachines = map (n: {
hostName = nodes.${n}.config.networking.fqdn;
protocol = "ssh-ng";
# Follows Hydra.
maxJobs = 8;
sshKey = config.age.secrets.buildbot-remote-builder-key.path;
sshUser = "buildbot";
systems = [ "x86_64-linux" ];
supportedFeatures = nodes.${n}.config.nix.settings.system-features;
# Contrary to how Nix works, here we can specify non-base64 public host keys.
publicHostKey = ssh-keys.machines.${n};
}
) cfg.builders;
gerrit = {
domain = cfgGerrit.canonicalDomain;
# Manually managed account…
# TODO: https://git.lix.systems/the-distro/infra/issues/69
username = "buildbot";
port = cfgGerrit.port;
privateKeyFile = config.age.secrets.buildbot-service-key.path;
projects = [
"buildbot-test"
"nixpkgs"
"infra"
];
};
evalWorkerCount = 6;
evalMaxMemorySize = "4096";
signingKeyFile = config.age.secrets.buildbot-signing-key.path;
};
nix.settings.keep-derivations = true;
nix.gc = {
automatic = true;
dates = "hourly";
};
};
}

View file

@ -1,192 +0,0 @@
{ lib, config, pkgs, ... }:
# FIXME(raito): I'm really really really not happy with this design of NixOS module, clean up all of this someday.
let
inherit (lib) mkEnableOption mkOption types mkIf mapAttrsToList mkPackageOption concatStringsSep mkMerge;
cfg = config.bagel.nixpkgs.channel-scripts;
toml = pkgs.formats.toml { };
configFile = toml.generate "forkos.toml" cfg.settings;
orderLib = import ./service-order.nix { inherit lib; };
makeUpdateJob = channelName: mainJob: {
name = "update-${channelName}";
value = {
description = "Update channel ${channelName}";
path = with pkgs; [ git ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = false;
User = "channel-scripts";
DynamicUser = true;
StateDirectory = "channel-scripts";
MemoryHigh = "80%";
EnvironmentFile = [
cfg.releaseBucketCredentialsFile
];
Environment = cfg.extraEnvironment;
# TODO: we should have our own secret for this.
LoadCredential = [ "password:${config.age.secrets.alloy-push-password.path}" ];
};
unitConfig.After = [ "networking.target" ];
script =
''
# A stateful copy of nixpkgs
dir=/var/lib/channel-scripts/nixpkgs
if ! [[ -e $dir ]]; then
git clone --bare ${cfg.nixpkgsUrl} $dir
fi
GIT_DIR=$dir git config remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*'
CREDENTIAL=$(echo -en "promtail:$(cat $CREDENTIALS_DIRECTORY/password)" | base64)
export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Basic $CREDENTIAL"
# TODO: use escapeShellArgs
exec ${cfg.package}/bin/mirror-forkos -c ${configFile} ${concatStringsSep " " cfg.extraArgs} apply ${channelName} ${mainJob}
'';
};
};
updateJobs = orderLib.mkOrderedChain (mapAttrsToList (n: { job, ... }: makeUpdateJob n job) cfg.channels);
channelOpts = { ... }: {
options = {
job = mkOption {
type = types.str;
example = "nixos/trunk-combined/tested";
};
variant = mkOption {
type = types.enum [ "primary" "small" "darwin" "aarch64" ];
example = "primary";
};
status = mkOption {
type = types.enum [ "beta" "stable" "deprecated" "unmaintained" "rolling" ];
example = "rolling";
};
};
};
in
{
options.bagel.nixpkgs.channel-scripts = {
enable = mkEnableOption ''the channel scripts.
Fast forwarding channel branches which are read-only except for this privileged bot
based on our Hydra acceptance tests.
'';
otlp.enable = mkEnableOption "the OTLP export process";
s3 = {
release = mkOption {
type = types.str;
};
channel = mkOption {
type = types.str;
};
};
package = mkPackageOption pkgs "mirror-forkos" { };
settings = mkOption {
type = types.attrsOf types.anything;
};
nixpkgsUrl = mkOption {
type = types.str;
default = "https://cl.forkos.org/nixpkgs.git";
description = "URL to the nixpkgs repository to clone and to push to";
};
binaryCacheUrl = mkOption {
type = types.str;
default = "https://cache.forkos.org";
description = "URL to the binary cache";
};
baseUriForGitRevisions = mkOption {
type = types.str;
description = "Base URI to generate link to a certain revision";
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = "Extra arguments passed to the mirroring program";
};
releaseBucketCredentialsFile = mkOption {
type = types.path;
description = ''Path to the release bucket credentials file exporting S3-style environment variables.
For example, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` for the S3 operations to work.
'';
};
deployKeyFile = mkOption {
type = types.path;
description = ''Path to the private SSH key which is allowed to deploy things to the protected channel references on the Git repository.
'';
};
hydraUrl = mkOption {
type = types.str;
default = "https://hydra.forkos.org";
description = "URL to the Hydra instance";
};
channels = mkOption {
type = types.attrsOf (types.submodule channelOpts);
description = "List of channels to mirror";
};
extraEnvironment = mkOption {
type = types.listOf types.str;
};
};
config = mkIf cfg.enable {
bagel.nixpkgs.channel-scripts.extraEnvironment = mkMerge [
([
"RUST_LOG=info"
])
(mkIf cfg.otlp.enable [
''OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="https://tempo.forkos.org/v1/traces"''
])
];
bagel.nixpkgs.channel-scripts.settings = {
hydra_uri = cfg.hydraUrl;
binary_cache_uri = cfg.binaryCacheUrl;
base_git_uri_for_revision = cfg.baseUriForGitRevisions;
nixpkgs_dir = "/var/lib/channel-scripts/nixpkgs";
s3_release_bucket_name = cfg.s3.release;
s3_channel_bucket_name = cfg.s3.channel;
};
users.users.channel-scripts = {
description = "Channel scripts user";
isSystemUser = true;
group = "channel-scripts";
};
users.groups.channel-scripts = {};
systemd.services = (lib.listToAttrs updateJobs) // {
"update-all-channels" = {
description = "Start all channel updates.";
unitConfig = {
After = map
(service: "${service.name}.service")
updateJobs;
Wants = map
(service: "${service.name}.service")
updateJobs;
};
script = "true";
};
};
systemd.timers."update-all-channels" = {
description = "Start all channel updates.";
wantedBy = [ "timers.target" ];
timerConfig = {
OnUnitInactiveSec = 600;
OnBootSec = 900;
AccuracySec = 300;
};
};
};
}

View file

@ -1,63 +0,0 @@
# Vendored from https://raw.githubusercontent.com/NixOS/infra/master/lib/service-order.nix
# TODO: get rid of me?
# Ordering Services
#
# Given a set of services, make them run one at a time in a specific
# order, on a timer.
{ lib }:
{
# Given a list of systemd service, give each one an After
# attribute, so they start in a specific order. The returned
# list can be converted in to a systemd.services attrset with
# `lib.listToAttrs`.
#
# Example:
#
# mkOrderedChain [
# { name = "foo"; value = { script = "true"; }; }
# { name = "bar"; value = { script = "true"; }; }
# ]
#
# => [
# {
# name = "foo";
# value = {
# script = "true";
# unitConfig = { After = []; };
# };
# }
# {
# name = "bar";
# value = {
# script = "true";
# unitConfig = { After = [ "bar" ]; };
# };
# }
#
mkOrderedChain = jobs: let
unitConfigFrom = job: job.unitConfig or {};
afterFrom = job: (unitConfigFrom job).After or [];
previousFrom = collector:
if collector ? previous
then [collector.previous]
else [];
ordered = builtins.foldl'
(collector: item: {
services = collector.services
++ [{
inherit (item) name;
value = item.value // {
unitConfig = (unitConfigFrom item.value) //
{
After = (afterFrom item.value) ++
(previousFrom collector);
};
};
}];
previous = "${item.name}.service";
})
{ services = []; }
jobs;
in ordered.services;
}

View file

@ -1,17 +1,9 @@
{
imports = [
./gerrit
./channel-scripts
./hydra
./matrix
./monitoring
./netbox
./ofborg
./postgres
./forgejo
./baremetal-builder
./buildbot
./newsletter
./s3-revproxy
./netbox
./gerrit
./monitoring
];
}

View file

@ -1,143 +0,0 @@
{ pkgs, lib, config, ... }:
let
cfg = config.bagel.services.forgejo;
inherit (lib) mkIf mkEnableOption mkOption types;
domain = "git.forkos.org";
in
{
options.bagel.services.forgejo = {
enable = mkEnableOption "Forgejo";
sshBindAddr = mkOption {
type = types.str;
};
};
config = mkIf cfg.enable {
services.forgejo = {
enable = true;
package = pkgs.callPackage ../../pkgs/forgejo { };
database = {
type = "postgres";
createDatabase = true;
};
lfs.enable = true;
settings = {
DEFAULT = {
APP_NAME = "ForkOS";
};
server = {
PROTOCOL = "http+unix";
ROOT_URL = "https://${domain}/";
DOMAIN = "${domain}";
BUILTIN_SSH_SERVER_USER = "git";
SSH_PORT = 22;
SSH_LISTEN_HOST = cfg.sshBindAddr;
START_SSH_SERVER = true;
};
session = {
PROVIDER = "redis";
PROVIDER_CONFIG = "network=unix,addr=${config.services.redis.servers.forgejo.unixSocket},db=0";
COOKIE_NAME = "session";
};
service = {
DISABLE_REGISTRATION = true;
DEFAULT_KEEP_EMAIL_PRIVATE = true;
};
"service.explore" = {
DISABLE_USERS_PAGE = true;
};
oauth2_client = {
REGISTER_EMAIL_CONFIRM = false;
ENABLE_AUTO_REGISTRATION = true;
};
# TODO: transactional mails
cache = {
ADAPTER = "redis";
HOST = "network=unix,addr=${config.services.redis.servers.forgejo.unixSocket},db=1";
ITEM_TTL = "72h"; # increased from default 16h
};
ui = {
SHOW_USER_EMAIL = false;
};
repository = {
# Forks in forgejo are suprisingly expensive because they are full git clones.
# If we do want to enable forks, we can write a small patch that disables
# only for repositories that are as large as nixpkgs.
DISABLE_FORKS = true;
};
packages = {
# Forgejo's various package registries can easily take up a lot of space.
# We could either store the blobs on some slower disks but larger, or even
# better, use an s3 bucket for it. But until we actually have a use-case for
# this feature, we will simply keep it disabled for now.
ENABLED = false;
};
indexer = {
REPO_INDEXER_REPO_TYPES = "sources,mirrors,templates"; # skip forks
REPO_INDEXER_ENABLED = true;
ISSUE_INDEXER_TYPE = "bleve";
};
"git.timeout" = {
MIGRATE = 3600; # increase from default 600 (seconds) for something as large as nixpkgs on a slow uplink
};
log = {
LEVEL = "Warn";
};
};
};
systemd.services.forgejo = {
serviceConfig = lib.optionalAttrs (config.services.forgejo.settings.server.SSH_PORT < 1024) {
AmbientCapabilities = lib.mkForce "CAP_NET_BIND_SERVICE";
CapabilityBoundingSet = lib.mkForce "CAP_NET_BIND_SERVICE";
PrivateUsers = lib.mkForce false;
};
# start Forgejo *after* sshd.service, so in case Forgejo tries to wildcard bind :22 due to
# a bug or whatever, we don't lose OpenSSH in a race.
wants = [ "sshd.service" "redis-forgejo.service" ];
requires = [ "sshd.service" "redis-forgejo.service" ];
};
services.redis.servers.forgejo = {
enable = true;
user = "forgejo";
};
services.nginx = {
enable = true;
virtualHosts.${domain} = {
enableACME = true;
forceSSL = true;
locations."/".proxyPass = "http://unix:${config.services.forgejo.settings.server.HTTP_ADDR}";
};
};
networking.firewall.allowedTCPPorts = [
80
443
config.services.forgejo.settings.server.SSH_PORT
];
};
}

View file

@ -1,113 +0,0 @@
/* Inspired from the Lix setup.
* Original-Author: puckipedia
*/
Gerrit.install((plugin) => {
// TODO: can we just use `plugin.serverInfo().plugin` and control the settings over there.
const configuration = {
baseUri: @BASE_URI@,
supportedProjects: @SUPPORTED_PROJECTS@,
};
function makeBuildbotUri(suffix) {
return `${configuration.baseUri}/${suffix}`;
}
let builders = [];
let fetchBuilders = async () => {
if (builders.length > 0) return;
let data = await (await fetch(makeBuildbotUri(`api/v2/builders`), { credentials: 'include' })).json();
builders = data.builders;
};
let checksProvider;
checksProvider = {
async fetch({ repo, patchsetSha, changeNumber, patchsetNumber }, runBefore = false) {
if (!configuration.supportedProjects.includes(repo)) {
return { responseCode: 'OK' };
}
let num = changeNumber.toString(10);
let branch = `refs/changes/${num.substr(-2)}/${num}/${patchsetNumber}`;
let changeFetch = await fetch(makeBuildbotUri(`api/v2/changes?limit=1&order=-changeid&revision=${patchsetSha}&branch=${branch}`), { credentials: 'include' });
if (changeFetch.status == 400) {
if ((await changeFetch.json()).error === 'invalid origin' && !runBefore) {
return await checksProvider.fetch({ repo, patchsetSha, changeNumber, patchsetNumber }, true);
}
return { responseCode: 'OK' };
} else if (changeFetch.status === 403) {
return { responseCode: 'NOT_LOGGED_IN', loginCallback() {
window.open(configuration.baseUri);
} };
}
let changes = await changeFetch.json();
if (changes.meta.total === 0) {
return { responseCode: 'OK' };
}
let { changeid } = changes.changes[0];
let { builds } = await (await fetch(makeBuildbotUri(`api/v2/changes/${changeid}/builds?property=owners&property=workername`), { credentials: 'include' })).json();
await fetchBuilders();
let links = [];
let runs = [];
for (let build of builds) {
let name = `unknown builder ${build.builderid}`;
for (let builder of builders) {
if (builder.builderid === build.builderid) {
name = builder.name;
break;
}
}
if (name === `${repo}/nix-eval`) {
links.push({
url: makeBuildbotUri(`#/builders/${build.builderid}/builds/${build.number}`),
primary: true,
icon: 'external',
});
}
let checkrun = {
attempt: build.buildrequestid,
// FIXME: generalize this accordingly once auto-discovery is available.
checkName: name.replace(/^hydraJobs\./, ''),
externalId: build.buildrequestid.toString(),
status: build.complete ? 'COMPLETED' : (typeof build.started_at !== 'number' ? 'SCHEDULED' : 'RUNNING'),
checkLink: makeBuildbotUri(`#/builders/${build.builderid}/builds/${build.number}`),
labelName: 'Verified',
results: [],
links: [{
url: makeBuildbotUri(`#/builders/${build.builderid}/builds/${build.number}`),
primary: true,
icon: 'external',
}],
};
if (build.started_at !== null) {
checkrun.startedTimestamp = new Date(build.started_at * 1000);
}
if (build.complete_at !== null) {
checkrun.finishedTimestamp = new Date(build.complete_at * 1000);
}
if (build.results !== null) {
checkrun.results = [{
category: build.results < 2 ? 'SUCCESS' : 'ERROR',
summary: build.state_string,
}];
}
runs.push(checkrun);
}
return { responseCode: 'OK', runs, links };
}
};
plugin.checks().register(checksProvider);
});

View file

@ -3,55 +3,32 @@
{ pkgs, config, lib, ... }:
let
inherit (lib) mkEnableOption mkIf mkOption types head;
inherit (lib) mkEnableOption mkIf mkOption types;
cfgGerrit = config.services.gerrit;
cfg = config.bagel.services.gerrit;
jdk = pkgs.openjdk21_headless;
in
{
options.bagel.services.gerrit = {
enable = mkEnableOption "Gerrit";
pyroscope.enable = mkEnableOption ''Pyroscope client,
this will send profiling of all Java processes on the current host
to our Pyroscope instance.
'';
domains = mkOption {
type = types.listOf types.str;
description = "List of domains that Gerrit will answer to";
};
canonicalDomain = mkOption {
type = types.str;
description = "Canonical domain for this Gerrit instance";
default = head cfg.domains;
};
data = mkOption {
type = types.path;
default = "/var/lib/gerrit";
description = "Root of data directory for the Gerrit";
};
port = mkOption {
type = types.port;
default = 29418;
readOnly = true;
description = "Port for the Gerrit SSH server";
};
};
imports = [
./www.nix
./one-way-sync.nix
];
config = mkIf cfg.enable {
networking.firewall.allowedTCPPorts = [ cfg.port ];
age.secrets.alloy-push-password.file = ../../secrets/metrics-push-password.age;
networking.firewall.allowedTCPPorts = [ 29418 ];
environment.systemPackages = [ jdk
pkgs.git
# https://gerrit.googlesource.com/gerrit/+/refs/heads/master/contrib/git-gc-preserve
pkgs.git-gc-preserve
];
environment.systemPackages = [ pkgs.openjdk17_headless ];
fileSystems."/var/lib/gerrit" = mkIf (cfg.data != "/var/lib/gerrit") {
device = cfg.data;
@ -64,64 +41,6 @@ in
};
users.groups.git = {};
services.alloy = {
enable = cfg.pyroscope.enable;
extraFlags = [
# Debugging interface.
"--server.http.listen-addr=127.0.0.1:15555"
];
};
systemd.services.alloy.serviceConfig = {
User = lib.mkForce "root";
Group = lib.mkForce "root";
DynamicUser = lib.mkForce false;
};
systemd.services.alloy.serviceConfig.LoadCredential = [ "password:${config.age.secrets.alloy-push-password.path}" ];
environment.etc."alloy/config.alloy".text = ''
pyroscope.write "production" {
endpoint {
url = "https://pyroscope.forkos.org"
basic_auth {
username = "promtail"
password_file = "/run/credentials/alloy.service/password"
}
}
}
discovery.process "all" {
refresh_interval = "60s"
discover_config {
cwd = true
exe = true
commandline = true
username = true
uid = true
container_id = true
}
}
discovery.relabel "java" {
targets = discovery.process.all.targets
rule {
action = "keep"
regex = ".*/java$"
source_labels = ["__meta_process_exe"]
}
}
pyroscope.java "java" {
targets = discovery.relabel.java.output
forward_to = [pyroscope.write.production.receiver]
profiling_config {
interval = "60s"
alloc = "512k"
cpu = true
sample_rate = 100
lock = "1ms"
}
}
'';
services.gerrit = {
enable = true;
listenAddress = "[::]:4778"; # 4778 - grrt
@ -137,31 +56,20 @@ in
"webhooks"
];
plugins = with pkgs.gerritPlugins; [
plugins = with pkgs.gerritPlugins; [
oauth
metrics-reporter-prometheus
# Buildbot checks plugin (writeText because services.gerrit.plugins expects packages)
(pkgs.runCommand "checks.js" {
BASE_URI = builtins.toJSON "https://buildbot.forkos.org";
SUPPORTED_PROJECTS = builtins.toJSON [
"infra"
"nixpkgs"
"buildbot-test"
];
}
''
echo "configuring buildbot checks plugin for $BASE_URI with $SUPPORTED_PROJECTS project list"
substitute ${./checks.js} $out \
--replace-fail "@BASE_URI@" "$BASE_URI" \
--replace-fail "@SUPPORTED_PROJECTS@" "$SUPPORTED_PROJECTS"
'')
];
package = pkgs.gerrit;
jvmHeapLimit = "32g";
jvmPackage = jdk;
# In some NixOS channel bump, the default version of OpenJDK has
# changed to one that is incompatible with our current version of
# Gerrit.
#
# TODO(tazjin): Update Gerrit and remove this when possible.
jvmPackage = pkgs.openjdk17_headless;
settings = {
# Performance settings
@ -178,16 +86,14 @@ in
receive.timeout = "4min";
# Default is 0, infinite.
# When system is under pressure and there's too many packfiles
# the search for reuse can take a stupid amount of time.
transfer.timeout = "60min";
transfer.timeout = "30min";
# We may overshoot but it's OK.
core.packedGitWindowSize = "256k";
# Sum of all current packfiles is ~1.2G
# Largest packfile is 906MB.
# Average packfile is ~5-10MB.
core.packedGitLimit = "2g";
core.packedGitLimit = "1g";
# We have plenty of memory, let's avoid file system cache → Gerrit needless copies.
core.packedGitUseStrongRefs = true;
core.packedGitOpenFiles = 4096;
@ -198,7 +104,7 @@ in
core.packedGitMmap = true;
## Takes more CPU but the transfer is smaller.
pack.deltacompression = true;
pack.deltacompression = false;
pack.threads = 8;
# FIXME(raito):
@ -211,7 +117,7 @@ in
# Other settings
log.jsonLogging = true;
log.textLogging = false;
sshd.advertisedAddress = "${cfg.canonicalDomain}:${toString cfg.port}";
sshd.advertisedAddress = "cl.forkos.org:29418";
cache.web_sessions.maxAge = "3 months";
plugins.allowRemoteAdmin = false;
change.enableAttentionSet = true;
@ -226,7 +132,7 @@ in
# Configures gerrit for being reverse-proxied by nginx as per
# https://gerrit-review.googlesource.com/Documentation/config-reverseproxy.html
gerrit = {
canonicalWebUrl = "https://${cfg.canonicalDomain}";
canonicalWebUrl = "https://cl.forkos.org";
docUrl = "/Documentation";
defaultBranch = "refs/heads/main";
};
@ -243,7 +149,7 @@ in
# Auto-link other CLs
commentlink.gerrit = {
match = "cl/(\\d+)";
link = "https://${cfg.canonicalDomain}/$1";
link = "https://cl.forkos.org/$1";
};
# Configures integration with Keycloak, which then integrates with a
@ -315,14 +221,6 @@ in
User = "git";
Group = "git";
};
environment.REVWALK_USE_PRIORITY_QUEUE = "true";
};
age.secrets.gerrit-prometheus-bearer-token.file = ../../secrets/gerrit-prometheus-bearer-token.age;
bagel.monitoring.grafana-agent.exporters.gerrit = {
port = 4778; # grrt
bearerTokenFile = config.age.secrets.gerrit-prometheus-bearer-token.path;
scrapeConfig.metrics_path = "/plugins/metrics-reporter-prometheus/metrics";
};
};
}

View file

@ -1,132 +0,0 @@
{ lib, config, pkgs, ... }:
let
cfg = config.bagel.nixpkgs.one-way-sync;
inherit (lib) mkIf mkOption mkEnableOption types mapAttrs';
mkSyncTimer = name: { timer, ... }: {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = timer;
Persistent = true;
Unit = "ows-${name}.service";
};
};
mkSyncService = name: { fromUri, fromRefspec, localRefspec, ... }: {
path = [ pkgs.gitFull pkgs.openssh pkgs.lix ];
script = ''
set -xe
RUNTIME_DIRECTORY="/run/onewaysync-${name}"
trap "git worktree remove -f "$RUNTIME_DIRECTORY"/${name}" EXIT
if [ ! -d "/var/lib/onewaysync/nixpkgs" ]; then
echo "First run, synchronizing nixpkgs..."
git clone https://cl.forkos.org/nixpkgs /var/lib/onewaysync/nixpkgs
fi
cd /var/lib/onewaysync/nixpkgs
echo "Syncing ${fromUri}:${fromRefspec} to ${cfg.pushUrl}:refs/heads/${localRefspec}"
echo "Current ref: $EXPECTED_REF"
git worktree add -f "$RUNTIME_DIRECTORY"/${name} refs/remotes/origin/${localRefspec}
cd "$RUNTIME_DIRECTORY"/${name}
git pull origin ${localRefspec} --no-rebase
EXPECTED_REF=$(git rev-list refs/remotes/origin/${localRefspec} | head -1)
git config user.name Fork-o-Tron
git config user.email noreply@forkos.org
git fetch ${fromUri} ${fromRefspec}
'' + lib.optionalString (!(lib.hasInfix "staging" localRefspec)) ''
OLD_STDENV=$(nix eval -f . stdenv.outPath --store "$RUNTIME_DIRECTORY")
'' + ''
git merge FETCH_HEAD
'' + lib.optionalString (!(lib.hasInfix "staging" localRefspec)) ''
NEW_STDENV=$(nix eval -f . stdenv.outPath --store "$RUNTIME_DIRECTORY")
# Do not allow auto-merging a staging iteration
test "$OLD_STDENV" = "$NEW_STDENV"
'' + ''
GIT_SSH_COMMAND='ssh -i ${cfg.deployKeyPath}' git push ${cfg.pushUrl} HEAD:refs/heads/${localRefspec}
'';
serviceConfig = {
User = "git";
Group = "git";
Type = "oneshot";
RuntimeDirectory = "onewaysync-${name}";
WorkingDirectory = "/run/onewaysync-${name}";
StateDirectory = "onewaysync";
};
};
in
{
options.bagel.nixpkgs.one-way-sync = {
enable = mkEnableOption "the one-way sync from GitHub repositories";
referenceDir = mkOption {
type = types.str;
default = "/var/lib/gerrit/git/nixpkgs.git";
description = "Local repository reference";
};
workingDir = mkOption {
type = types.str;
default = "/run/onewaysync/";
description = "Working directory for the service";
};
pushUrl = mkOption {
type = types.str;
example = "ssh://...";
description = "Push URL for the target repository";
};
deployKeyPath = mkOption {
type = types.path;
example = "/run/agenix.d/ows-priv-key";
description = "Deployment private SSH key to push to the repository";
};
branches = mkOption {
type = types.attrsOf (types.submodule ({ ... }:
{
options = {
name = mkOption {
type = types.str;
description = "User-friendly name";
};
fromUri = mkOption {
type = types.str;
description = "Git URI from which we need to sync";
};
fromRefspec = mkOption {
type = types.str;
description = "refspec for the fetch";
};
localRefspec = mkOption {
type = types.str;
default = "local refspec in the local repository to get the expected reference and avoid stale info";
};
timer = mkOption {
type = types.str;
description = "Calendar format everytime we need to run the sync";
};
};
}));
description = "Set of branches mapping from cl.forkos.org to other Git repositories";
};
};
config = mkIf cfg.enable {
systemd.timers = mapAttrs' (name: value: {
name = "ows-${name}";
value = mkSyncTimer name value;
}) cfg.branches;
systemd.services = mapAttrs' (name: value: {
name = "ows-${name}";
value = mkSyncService name value;
}) cfg.branches;
};
}

View file

@ -25,7 +25,7 @@ in
# The :443 suffix is a workaround for https://b.tvl.fyi/issues/88.
proxy_set_header Host $host:443;
# Gerrit can throw a lot of data.
proxy_buffering off;
proxy_buffering on;
# NGINX should not give up super fast. Things can take time.
proxy_read_timeout 3600;
}

View file

@ -1,8 +1,7 @@
{ nodes, config, lib, pkgs, ... }:
{ config, lib, pkgs, ... }:
let
cfg = config.bagel.services.hydra;
ssh-keys = import ../../common/ssh-keys.nix;
narCacheDir = "/var/cache/hydra/nar-cache";
port = 3000;
@ -10,71 +9,19 @@ let
mkCacheSettings = settings: builtins.concatStringsSep "&" (
lib.mapAttrsToList (k: v: "${k}=${v}") settings
);
mkPgConnString = options: builtins.concatStringsSep ";" (
lib.mapAttrsToList (k: v: "${k}=${v}") options
);
# XXX: to support Nix's dumb public host key syntax (base64'd), this outputs
# a string with shell-style command interpolations: $(...).
mkBaremetalBuilder = {
parallelBuilds,
publicHostKey,
host,
speedFactor ? 1,
user ? "builder",
supportedSystems ? [ "i686-linux" "x86_64-linux" ],
supportedFeatures ? [ "big-parallel" "kvm" "nixos-test" ],
requiredFeatures ? [ ]
}:
let
supportedFeatures_ = if (supportedFeatures != []) then lib.concatStringsSep "," supportedFeatures else "-";
requiredFeatures_ = if (requiredFeatures != []) then lib.concatStringsSep "," requiredFeatures else "-";
in
"ssh://${user}@${host}?remote-store=/mnt ${lib.concatStringsSep "," supportedSystems} ${config.age.secrets.hydra-ssh-key-priv.path} ${toString parallelBuilds} ${toString speedFactor} ${supportedFeatures_} ${requiredFeatures_} $(echo -n '${publicHostKey}' | base64 -w0)";
# TODO:
# - generalize to new architectures
# - generalize to new features
baremetalBuilders = lib.concatStringsSep "\n"
(map (n: let
assignments = (import ../baremetal-builder/assignments.nix).${n} or {
inherit (nodes.${n}.config.nix.settings) max-jobs;
supported-features = [ "big-parallel" "kvm" "nixos-test" ];
required-features = [];
};
in mkBaremetalBuilder {
parallelBuilds = assignments.max-jobs;
supportedFeatures = assignments.supported-features;
requiredFeatures = assignments.required-features;
publicHostKey = ssh-keys.machines.${n};
host = nodes.${n}.config.networking.fqdn;
}) cfg.builders);
in {
options.bagel.services.hydra = with lib; {
enable = mkEnableOption "Hydra coordinator";
builders = mkOption {
type = types.listOf types.str;
description = "List of builders to configure for Hydra";
example = [ "builder-0" "builder-1" ];
dbi = mkOption {
type = types.str;
description = "DBI connection string for the Hydra postgres database";
};
};
config = lib.mkIf cfg.enable {
# TODO: we should assert or warn that the builders
# does indeed have our public SSH key and are *builders*
# as a simple evaluation preflight check.
age.secrets.hydra-s3-credentials.file = ../../secrets/hydra-s3-credentials.age;
age.secrets.hydra-postgres-key.group = "hydra";
age.secrets.hydra-postgres-key.mode = "0440";
age.secrets.hydra-postgres-key.file = ../../secrets/hydra-postgres-key.age;
age.secrets.hydra-signing-priv.owner = "hydra-queue-runner";
age.secrets.hydra-signing-priv.file = ../../secrets/hydra-signing-priv.age;
age.secrets.hydra-ssh-key-priv.owner = "hydra-queue-runner";
age.secrets.hydra-ssh-key-priv.file = ../../secrets/hydra-ssh-key-priv.age;
@ -86,47 +33,27 @@ in {
# XXX: Otherwise services.hydra-dev overwrites it to only hydra-queue-runner...
#
# Can be removed once this is added to some common config template.
nix.settings.trusted-users = [ "root" "hydra" "hydra-www" "@wheel" ];
# Because Hydra can't fetch flake inputs otherwise... also yes, this
# prefix-based matching is absurdly bad.
nix.settings.allowed-uris = [
"github:"
"https://github.com/"
"https://git.lix.systems/"
"https://git@git.lix.systems/"
];
nix.settings.trusted-users = [ "root" "@wheel" ];
services.hydra-dev = {
enable = true;
listenHost = "127.0.0.1";
listenHost = "localhost";
port = port;
dbi = cfg.dbi;
dbi = "dbi:Pg:${mkPgConnString {
host = "postgres.forkos.org";
dbname = "hydra";
user = "hydra";
sslmode = "verify-full";
sslcert = "${./postgres.crt}";
sslkey = config.age.secrets.hydra-postgres-key.path;
sslrootcert = "${../postgres/ca.crt}";
}}";
hydraURL = "https://hydra.forkos.org";
hydraURL = "https://hydra.bagel.delroth.net";
useSubstitutes = false;
notificationSender = "hydra@forkos.org";
notificationSender = "bagel@delroth.net";
# XXX: hydra overlay sets pkgs.hydra, but hydra's nixos module uses
# pkgs.hydra_unstable...
package = pkgs.hydra;
buildMachinesFiles = [
(pkgs.runCommandNoCC "hydra-builders.conf" {} ''
cat >$out <<EOF
${baremetalBuilders}
EOF
(pkgs.writeText "hydra-builders.conf" ''
ssh://bagel-builder@epyc.infra.newtype.fr i686-linux,x86_64-linux ${config.age.secrets.hydra-ssh-key-priv.path} 8 1 big-parallel,kvm,nixos-test - c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSU9YVDlJbml0MU1oS3Q0cmpCQU5McTB0MGJQd3cvV1FaOTZ1QjRBRURybWwgcm9vdEBuaXhvcwo=
'')
];
@ -135,7 +62,7 @@ in {
endpoint = "s3.delroth.net";
region = "garage";
secret-key = config.age.secrets.hydra-signing-priv.path;
#secret-key = "TODO";
compression = "zstd";
log-compression = "br";
@ -146,11 +73,11 @@ in {
server_store_uri = https://bagel-cache.s3-web.delroth.net?local-nar-cache=${narCacheDir}
binary_cache_public_url = https://bagel-cache.s3-web.delroth.net
log_prefix = https://bagel-cache.s3-web.delroth.net/
log_prefix = https://bagel-cache.s3-web.delroth.net
upload_logs_to_binary_cache = true
evaluator_workers = 16
evaluator_workers = 4
evaluator_max_memory_size = 4096
max_concurrent_evals = 1
@ -158,18 +85,11 @@ in {
max_output_size = ${builtins.toString (3 * 1024 * 1024 * 1024)}
max_db_connections = 100
<git-input>
timeout = 1800
</git-input>
'';
};
systemd.services.hydra-queue-runner = {
# FIXME: should probably be set in the upstream Hydra module?
wants = [ "network-online.target" ];
serviceConfig.EnvironmentFile = config.age.secrets.hydra-s3-credentials.path;
};
systemd.services.hydra-queue-runner.serviceConfig.EnvironmentFile =
config.age.secrets.hydra-s3-credentials.path;
services.nginx = {
enable = true;
@ -188,7 +108,7 @@ in {
worker_processes auto;
'';
virtualHosts."hydra.forkos.org" = {
virtualHosts."hydra.bagel.delroth.net" = {
forceSSL = true;
enableACME = true;

View file

@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBtDCCAVugAwIBAgIQTU55o4gtG8EFZArwsuj4NjAKBggqhkjOPQQDAjAiMSAw
HgYDVQQDExdGb3JrT1MgUG9zdGdyZXMgUm9vdCBDQTAeFw0yNDA4MTYwNTU0MTda
Fw0zNDA4MTYxNzU0MTdaMBAxDjAMBgNVBAMTBWh5ZHJhMFkwEwYHKoZIzj0CAQYI
KoZIzj0DAQcDQgAEnTgiFZOXBrcPlWDxJPXUFgxIi7/T7LmwLtpGPK/G6R8KA9cS
4UXF5Ifz2dCgozTlhqLROKb81yhNsSy1tOcFyKOBhDCBgTAOBgNVHQ8BAf8EBAMC
B4AwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQGh6HM
jl41qw/F0vpBdmOWQ2IfGzAfBgNVHSMEGDAWgBQy4WX/hUExQ/i1h7MvF6Ow2irN
izAQBgNVHREECTAHggVoeWRyYTAKBggqhkjOPQQDAgNHADBEAiAEypqfyMOGbEJv
dKI1tyj890uq5Osr5+9wxGBvJDMJNwIgefyOdFcvJTzbfHgLmORpBOVtnpbkwj5y
rMnjT8gYjEA=
-----END CERTIFICATE-----

View file

@ -1,68 +0,0 @@
{
config,
lib,
inputs,
...
}:
let
cfg = config.bagel.services.grapevine;
inherit (lib) mkEnableOption mkIf;
in
{
imports = [
inputs.grapevine.nixosModules.default
./hookshot.nix
];
options.bagel.services.grapevine.enable = mkEnableOption "Grapevine";
config = mkIf cfg.enable {
services = {
grapevine = {
enable = true;
settings = {
listen = [
{
type = "tcp";
address = "127.0.0.1";
port = 6167;
}
];
server_name = "forkos.org";
database.backend = "rocksdb";
};
};
nginx = {
upstreams.grapevine.servers."127.0.0.1:6167" = { };
virtualHosts = {
"matrix.forkos.org" = {
forceSSL = true;
enableACME = true;
locations."/".proxyPass = "http://grapevine";
};
"forkos.org" = {
forceSSL = true;
enableACME = true;
locations = {
"= /.well-known/matrix/server".extraConfig = ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.server": "matrix.forkos.org:443"}';
'';
"= /.well-known/matrix/client".extraConfig = ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.homeserver": {"base_url": "https://matrix.forkos.org/"}, "m.identity_server": {"base_url": "https://matrix.org/"}, "org.matrix.msc3575.proxy": {"url": "https://matrix.forkos.org"}}';
'';
};
};
};
};
};
};
}

View file

@ -1,77 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.bagel.services.hookshot;
inherit (lib) mkEnableOption mkIf mkOption types;
keyPath = "/var/lib/matrix-hookshot/key.pem";
in
{
options.bagel.services.hookshot = {
enable = mkEnableOption "matrix-hookshot";
settings = mkOption {
description = "Settings";
type = (pkgs.formats.yaml { }).type;
};
admins = mkOption {
description = "List of admin MXIDs";
type = types.listOf types.str;
};
};
config = mkIf cfg.enable {
systemd.services.matrix-hookshot = {
wantedBy = ["multi-user.target"];
wants = ["network-online.target"];
after = ["network-online.target"];
serviceConfig = {
ExecStart = "${lib.getExe pkgs.matrix-hookshot} ${pkgs.writers.writeYAML "config.yaml" cfg.settings}";
ExecStartPre = pkgs.writeShellScript "hookshot-generate-key" ''
if [ ! -f ${keyPath} ]; then
mkdir -p $(dirname ${keyPath})
${lib.getExe pkgs.openssl} genpkey -out ${keyPath} -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096
fi
'';
DynamicUser = true;
StateDirectory = "matrix-hookshot";
WorkingDirectory = "/var/lib/matrix-hookshot";
};
};
bagel.services.hookshot.settings = {
bridge = {
domain = "forkos.org";
url = "https://matrix.forkos.org";
mediaUrl = "https://forkos.org";
port = 9993;
bindAddress = "127.0.0.1";
};
passFile = keyPath;
listeners = [{
port = 9994;
bindAddress = "127.0.0.1";
resources = [ "webhooks" ];
}];
generic = {
enabled = true;
urlPrefix = "https://alerts.forkos.org/webhook";
};
permissions = map (mxid: {
actor = mxid;
services = [{
service = "*";
level = "admin";
}];
}) cfg.admins;
};
services.nginx.virtualHosts."alerts.forkos.org" = {
forceSSL = true;
enableACME = true;
locations."/".proxyPass = "http://127.0.0.1:9994";
};
};
}

Some files were not shown because too many files have changed in this diff Show more