forked from lix-project/lix
Merge changes from topic "releng" into main
* changes: releng: add prod environment, ready for release releng: automatically figure out if we should tag latest for docker releng: support multiarch docker images manual: rewrite the docker guide now that we have images Rewrite docker to be sensible and smaller Implement docker upload in the releng tools
This commit is contained in:
commit
8a3d063a49
|
@ -1,64 +1,62 @@
|
|||
# Using Lix within Docker
|
||||
|
||||
Currently the Lix project doesn't ship docker images. However, we have the infrastructure to do it, it's just not yet been done. See https://git.lix.systems/lix-project/lix/issues/252
|
||||
|
||||
<!--
|
||||
Lix is available on the following two container registries:
|
||||
- [ghcr.io/lix-project/lix](https://ghcr.io/lix-project/lix)
|
||||
- [git.lix.systems/lix-project/lix](https://git.lix.systems/lix-project/-/packages/container/lix)
|
||||
|
||||
To run the latest stable release of Lix with Docker run the following command:
|
||||
|
||||
```console
|
||||
$ docker run -ti nixos/nix
|
||||
Unable to find image 'nixos/nix:latest' locally
|
||||
latest: Pulling from nixos/nix
|
||||
5843afab3874: Pull complete
|
||||
b52bf13f109c: Pull complete
|
||||
1e2415612aa3: Pull complete
|
||||
Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
|
||||
Status: Downloaded newer image for nixos/nix:latest
|
||||
35ca4ada6e96:/# nix --version
|
||||
nix (Nix) 2.3.12
|
||||
35ca4ada6e96:/# exit
|
||||
~ » sudo podman run -it ghcr.io/lix-project/lix:latest
|
||||
Trying to pull ghcr.io/lix-project/lix:latest...
|
||||
|
||||
bash-5.2# nix --version
|
||||
nix (Lix, like Nix) 2.90.0
|
||||
```
|
||||
|
||||
# What is included in Lix's Docker image?
|
||||
|
||||
The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
|
||||
The official Docker image is created using [nix2container]
|
||||
(and not with `Dockerfile` as it is usual with Docker images). You can still
|
||||
base your custom Docker image on it as you would do with any other Docker
|
||||
image.
|
||||
|
||||
The Docker image is also not based on any other image and includes minimal set
|
||||
of runtime dependencies that are required to use Lix:
|
||||
[nix2container]: https://github.com/nlewo/nix2container
|
||||
|
||||
- pkgs.nix
|
||||
- pkgs.bashInteractive
|
||||
- pkgs.coreutils-full
|
||||
- pkgs.gnutar
|
||||
- pkgs.gzip
|
||||
- pkgs.gnugrep
|
||||
- pkgs.which
|
||||
- pkgs.curl
|
||||
- pkgs.less
|
||||
- pkgs.wget
|
||||
- pkgs.man
|
||||
- pkgs.cacert.out
|
||||
- pkgs.findutils
|
||||
The Docker image is also not based on any other image and includes the nixpkgs
|
||||
that Lix was built with along with a minimal set of tools in the system profile:
|
||||
|
||||
- bashInteractive
|
||||
- cacert.out
|
||||
- coreutils-full
|
||||
- curl
|
||||
- findutils
|
||||
- gitMinimal
|
||||
- gnugrep
|
||||
- gnutar
|
||||
- gzip
|
||||
- iana-etc
|
||||
- less
|
||||
- libxml2
|
||||
- lix
|
||||
- man
|
||||
- openssh
|
||||
- sqlite
|
||||
- wget
|
||||
- which
|
||||
|
||||
# Docker image with the latest development version of Lix
|
||||
|
||||
To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
|
||||
the following command:
|
||||
FIXME: There are not currently images of development versions of Lix. Tracking issue: https://git.lix.systems/lix-project/lix/issues/381
|
||||
|
||||
You can build a Docker image from source yourself and copy it to either:
|
||||
|
||||
Podman: `nix run '.#dockerImage.copyTo' containers-storage:lix`
|
||||
|
||||
Docker: `nix run '.#dockerImage.copyToDockerDaemon'`
|
||||
|
||||
Then:
|
||||
|
||||
```console
|
||||
$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
|
||||
$ docker run -ti nix:2.5pre20211105
|
||||
$ docker run -ti lix
|
||||
```
|
||||
|
||||
You can also build a Docker image from source yourself:
|
||||
|
||||
```console
|
||||
$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
|
||||
$ docker load -i ./result/image.tar.gz
|
||||
$ docker run -ti nix:2.5pre20211105
|
||||
```
|
||||
-->
|
||||
|
|
214
docker.nix
214
docker.nix
|
@ -1,5 +1,8 @@
|
|||
{
|
||||
pkgs ? import <nixpkgs> { },
|
||||
# Git commit ID, if available
|
||||
lixRevision ? null,
|
||||
nix2container,
|
||||
lib ? pkgs.lib,
|
||||
name ? "lix",
|
||||
tag ? "latest",
|
||||
|
@ -12,26 +15,51 @@
|
|||
flake-registry ? null,
|
||||
}:
|
||||
let
|
||||
layerContents = with pkgs; [
|
||||
# pulls in glibc and openssl, about 60MB
|
||||
{ contents = [ coreutils-full ]; }
|
||||
# some stuff that is low in the closure graph and small ish, mostly to make
|
||||
# incremental lix updates cheaper
|
||||
{
|
||||
contents = [
|
||||
curl
|
||||
libxml2
|
||||
sqlite
|
||||
];
|
||||
}
|
||||
# 50MB of git
|
||||
{ contents = [ gitMinimal ]; }
|
||||
# 144MB of nixpkgs
|
||||
{
|
||||
contents = [ channel ];
|
||||
inProfile = false;
|
||||
}
|
||||
];
|
||||
|
||||
# These packages are left to be auto layered by nix2container, since it is
|
||||
# less critical that they get layered sensibly and they tend to not be deps
|
||||
# of anything in particular
|
||||
autoLayered = with pkgs; [
|
||||
bashInteractive
|
||||
gnutar
|
||||
gzip
|
||||
gnugrep
|
||||
which
|
||||
less
|
||||
wget
|
||||
man
|
||||
cacert.out
|
||||
findutils
|
||||
iana-etc
|
||||
openssh
|
||||
nix
|
||||
];
|
||||
|
||||
defaultPkgs =
|
||||
with pkgs;
|
||||
[
|
||||
nix
|
||||
bashInteractive
|
||||
coreutils-full
|
||||
gnutar
|
||||
gzip
|
||||
gnugrep
|
||||
which
|
||||
curl
|
||||
less
|
||||
wget
|
||||
man
|
||||
cacert.out
|
||||
findutils
|
||||
iana-etc
|
||||
git
|
||||
openssh
|
||||
]
|
||||
lib.lists.flatten (
|
||||
map (x: if !(x ? inProfile) || x.inProfile then x.contents else [ ]) layerContents
|
||||
)
|
||||
++ autoLayered
|
||||
++ extraPkgs;
|
||||
|
||||
users =
|
||||
|
@ -139,16 +167,17 @@ let
|
|||
))
|
||||
+ "\n";
|
||||
|
||||
nixpkgs = pkgs.path;
|
||||
channel = pkgs.runCommand "channel-nixpkgs" { } ''
|
||||
mkdir $out
|
||||
${lib.optionalString bundleNixpkgs ''
|
||||
ln -s ${nixpkgs} $out/nixpkgs
|
||||
echo "[]" > $out/manifest.nix
|
||||
''}
|
||||
'';
|
||||
|
||||
baseSystem =
|
||||
let
|
||||
nixpkgs = pkgs.path;
|
||||
channel = pkgs.runCommand "channel-nixos" { inherit bundleNixpkgs; } ''
|
||||
mkdir $out
|
||||
if [ "$bundleNixpkgs" ]; then
|
||||
ln -s ${nixpkgs} $out/nixpkgs
|
||||
echo "[]" > $out/manifest.nix
|
||||
fi
|
||||
'';
|
||||
rootEnv = pkgs.buildPackages.buildEnv {
|
||||
name = "root-profile-env";
|
||||
paths = defaultPkgs;
|
||||
|
@ -187,7 +216,7 @@ let
|
|||
profile = pkgs.buildPackages.runCommand "user-environment" { } ''
|
||||
mkdir $out
|
||||
cp -a ${rootEnv}/* $out/
|
||||
ln -s ${manifest} $out/manifest.nix
|
||||
ln -sf ${manifest} $out/manifest.nix
|
||||
'';
|
||||
flake-registry-path =
|
||||
if (flake-registry == null) then
|
||||
|
@ -236,6 +265,7 @@ let
|
|||
ln -s /nix/var/nix/profiles/share $out/usr/
|
||||
|
||||
mkdir -p $out/nix/var/nix/gcroots
|
||||
ln -s /nix/var/nix/profiles $out/nix/var/nix/gcroots/profiles
|
||||
|
||||
mkdir $out/tmp
|
||||
|
||||
|
@ -248,14 +278,14 @@ let
|
|||
mkdir -p $out/nix/var/nix/profiles/per-user/root
|
||||
|
||||
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
|
||||
ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
|
||||
ln -s /nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
|
||||
ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
|
||||
|
||||
ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
|
||||
ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
|
||||
ln -s /nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
|
||||
|
||||
mkdir -p $out/root/.nix-defexpr
|
||||
ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
|
||||
ln -s /nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
|
||||
echo "${channelURL} ${channelName}" > $out/root/.nix-channels
|
||||
|
||||
mkdir -p $out/bin $out/usr/bin
|
||||
|
@ -273,43 +303,99 @@ let
|
|||
ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName
|
||||
'')
|
||||
);
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
|
||||
inherit name tag maxLayers;
|
||||
layers = builtins.foldl' (
|
||||
layersList: el:
|
||||
let
|
||||
layer = nix2container.buildLayer {
|
||||
deps = el.contents;
|
||||
layers = layersList;
|
||||
};
|
||||
in
|
||||
layersList ++ [ layer ]
|
||||
) [ ] layerContents;
|
||||
|
||||
contents = [ baseSystem ];
|
||||
image = nix2container.buildImage {
|
||||
|
||||
extraCommands = ''
|
||||
rm -rf nix-support
|
||||
ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
|
||||
'';
|
||||
fakeRootCommands = ''
|
||||
chmod 1777 tmp
|
||||
chmod 1777 var/tmp
|
||||
'';
|
||||
inherit name tag maxLayers;
|
||||
|
||||
config = {
|
||||
Cmd = [ "/root/.nix-profile/bin/bash" ];
|
||||
Env = [
|
||||
"USER=root"
|
||||
"PATH=${
|
||||
lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/bin"
|
||||
"/nix/var/nix/profiles/default/bin"
|
||||
"/nix/var/nix/profiles/default/sbin"
|
||||
]
|
||||
}"
|
||||
"MANPATH=${
|
||||
lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/share/man"
|
||||
"/nix/var/nix/profiles/default/share/man"
|
||||
]
|
||||
}"
|
||||
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
|
||||
inherit layers;
|
||||
|
||||
copyToRoot = [ baseSystem ];
|
||||
|
||||
initializeNixDatabase = true;
|
||||
|
||||
perms = [
|
||||
{
|
||||
path = baseSystem;
|
||||
regex = "(/var)?/tmp";
|
||||
mode = "1777";
|
||||
}
|
||||
];
|
||||
|
||||
config = {
|
||||
Cmd = [ "/root/.nix-profile/bin/bash" ];
|
||||
Env = [
|
||||
"USER=root"
|
||||
"PATH=${
|
||||
lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/bin"
|
||||
"/nix/var/nix/profiles/default/bin"
|
||||
"/nix/var/nix/profiles/default/sbin"
|
||||
]
|
||||
}"
|
||||
"MANPATH=${
|
||||
lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/share/man"
|
||||
"/nix/var/nix/profiles/default/share/man"
|
||||
]
|
||||
}"
|
||||
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
|
||||
];
|
||||
|
||||
Labels = {
|
||||
"org.opencontainers.image.title" = "Lix";
|
||||
"org.opencontainers.image.source" = "https://git.lix.systems/lix-project/lix";
|
||||
"org.opencontainers.image.vendor" = "Lix project";
|
||||
"org.opencontainers.image.version" = pkgs.nix.version;
|
||||
"org.opencontainers.image.description" = "Minimal Lix container image, with some batteries included.";
|
||||
} // lib.optionalAttrs (lixRevision != null) { "org.opencontainers.image.revision" = lixRevision; };
|
||||
};
|
||||
|
||||
meta = {
|
||||
description = "Docker image for Lix. This is built with nix2container; see that project's README for details";
|
||||
longDescription = ''
|
||||
Docker image for Lix, built with nix2container.
|
||||
To copy it to your docker daemon, nix run .#dockerImage.copyToDockerDaemon
|
||||
To copy it to podman, nix run .#dockerImage.copyTo containers-storage:lix
|
||||
'';
|
||||
};
|
||||
};
|
||||
in
|
||||
image
|
||||
// {
|
||||
# We don't ship the tarball as the default output because it is a strange thing to want imo
|
||||
tarball =
|
||||
pkgs.buildPackages.runCommand "docker-image-tarball-${pkgs.nix.version}"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.buildPackages.bubblewrap ];
|
||||
meta.description = "Docker image tarball with Lix for ${pkgs.system}";
|
||||
}
|
||||
''
|
||||
mkdir -p $out/nix-support
|
||||
image=$out/image.tar
|
||||
# bwrap for foolish temp dir selection code that forces /var/tmp:
|
||||
# https://github.com/containers/skopeo.git/blob/60ee543f7f7c242f46cc3a7541d9ac8ab1c89168/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go#L15-L18
|
||||
mkdir -p $TMPDIR/fake-var/tmp
|
||||
args=(--unshare-user --bind "$TMPDIR/fake-var" /var)
|
||||
for dir in /*; do
|
||||
args+=(--dev-bind "/$dir" "/$dir")
|
||||
done
|
||||
bwrap ''${args[@]} -- ${lib.getExe image.copyTo} docker-archive:$image
|
||||
gzip $image
|
||||
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
}
|
||||
|
|
17
flake.lock
17
flake.lock
|
@ -16,6 +16,22 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix2container": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1712990762,
|
||||
"narHash": "sha256-hO9W3w7NcnYeX8u8cleHiSpK2YJo7ecarFTUlbybl7k=",
|
||||
"owner": "nlewo",
|
||||
"repo": "nix2container",
|
||||
"rev": "20aad300c925639d5d6cbe30013c8357ce9f2a2e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nlewo",
|
||||
"repo": "nix2container",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1715123187,
|
||||
|
@ -67,6 +83,7 @@
|
|||
"root": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nix2container": "nix2container",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-regression": "nixpkgs-regression",
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
|
|
23
flake.nix
23
flake.nix
|
@ -8,6 +8,10 @@
|
|||
url = "github:cachix/git-hooks.nix";
|
||||
flake = false;
|
||||
};
|
||||
nix2container = {
|
||||
url = "github:nlewo/nix2container";
|
||||
flake = false;
|
||||
};
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat";
|
||||
flake = false;
|
||||
|
@ -20,6 +24,7 @@
|
|||
nixpkgs,
|
||||
nixpkgs-regression,
|
||||
pre-commit-hooks,
|
||||
nix2container,
|
||||
flake-compat,
|
||||
}:
|
||||
|
||||
|
@ -330,19 +335,13 @@
|
|||
dockerImage =
|
||||
let
|
||||
pkgs = nixpkgsFor.${system}.native;
|
||||
image = import ./docker.nix {
|
||||
inherit pkgs;
|
||||
tag = pkgs.nix.version;
|
||||
};
|
||||
nix2container' = import nix2container { inherit pkgs system; };
|
||||
in
|
||||
pkgs.runCommand "docker-image-tarball-${pkgs.nix.version}"
|
||||
{ meta.description = "Docker image with Lix for ${system}"; }
|
||||
''
|
||||
mkdir -p $out/nix-support
|
||||
image=$out/image.tar.gz
|
||||
ln -s ${image} $image
|
||||
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
import ./docker.nix {
|
||||
inherit pkgs;
|
||||
nix2container = nix2container'.nix2container;
|
||||
tag = pkgs.nix.version;
|
||||
};
|
||||
}
|
||||
// builtins.listToAttrs (
|
||||
map (crossSystem: {
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
pkg-config,
|
||||
python3,
|
||||
rapidcheck,
|
||||
skopeo,
|
||||
sqlite,
|
||||
toml11,
|
||||
util-linuxMinimal ? utillinuxMinimal,
|
||||
|
@ -416,6 +417,8 @@ stdenv.mkDerivation (finalAttrs: {
|
|||
p: [
|
||||
p.yapf
|
||||
p.python-frontmatter
|
||||
p.requests
|
||||
p.xdg-base-dirs
|
||||
(p.toPythonModule xonsh-unwrapped)
|
||||
]
|
||||
);
|
||||
|
@ -452,6 +455,8 @@ stdenv.mkDerivation (finalAttrs: {
|
|||
lib.optional (stdenv.cc.isClang && hostPlatform == buildPlatform) clang-tools_llvm
|
||||
++ [
|
||||
pythonEnv
|
||||
# docker image tool
|
||||
skopeo
|
||||
just
|
||||
nixfmt
|
||||
# Load-bearing order. Must come before clang-unwrapped below, but after clang_tools above.
|
||||
|
|
|
@ -2,11 +2,30 @@ from xonsh.main import setup
|
|||
setup()
|
||||
del setup
|
||||
|
||||
from releng import environment
|
||||
from releng import create_release
|
||||
from releng import keys
|
||||
from releng import version
|
||||
from releng import cli
|
||||
import logging
|
||||
|
||||
from . import environment
|
||||
from . import create_release
|
||||
from . import keys
|
||||
from . import version
|
||||
from . import cli
|
||||
from . import docker
|
||||
from . import docker_assemble
|
||||
from . import gitutils
|
||||
|
||||
rootLogger = logging.getLogger()
|
||||
rootLogger.setLevel(logging.DEBUG)
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.DEBUG)
|
||||
|
||||
fmt = logging.Formatter('{asctime} {levelname} {name}: {message}',
|
||||
datefmt='%b %d %H:%M:%S',
|
||||
style='{')
|
||||
|
||||
if not any(isinstance(h, logging.StreamHandler) for h in rootLogger.handlers):
|
||||
hand = logging.StreamHandler()
|
||||
hand.setFormatter(fmt)
|
||||
rootLogger.addHandler(hand)
|
||||
|
||||
def reload():
|
||||
import importlib
|
||||
|
@ -15,3 +34,6 @@ def reload():
|
|||
importlib.reload(keys)
|
||||
importlib.reload(version)
|
||||
importlib.reload(cli)
|
||||
importlib.reload(docker)
|
||||
importlib.reload(docker_assemble)
|
||||
importlib.reload(gitutils)
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
from . import create_release
|
||||
from . import docker
|
||||
from .environment import RelengEnvironment
|
||||
from . import environment
|
||||
import functools
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
|
@ -18,13 +22,16 @@ def do_tag(args):
|
|||
no_check_git=args.no_check_git)
|
||||
|
||||
|
||||
def do_upload(args):
|
||||
create_release.setup_creds()
|
||||
def do_upload(env: RelengEnvironment, args):
|
||||
create_release.setup_creds(env)
|
||||
if args.target == 'all':
|
||||
create_release.upload_artifacts(force_push_tag=args.force_push_tag,
|
||||
noconfirm=args.noconfirm)
|
||||
docker.check_all_logins(env)
|
||||
create_release.upload_artifacts(env,
|
||||
force_push_tag=args.force_push_tag,
|
||||
noconfirm=args.noconfirm,
|
||||
no_check_git=args.no_check_git)
|
||||
elif args.target == 'manual':
|
||||
create_release.upload_manual()
|
||||
create_release.upload_manual(env)
|
||||
else:
|
||||
raise ValueError('invalid target, unreachable')
|
||||
|
||||
|
@ -77,6 +84,10 @@ def main():
|
|||
|
||||
upload = sps.add_parser(
|
||||
'upload', help='Upload artifacts to cache and releases bucket')
|
||||
upload.add_argument(
|
||||
'--no-check-git',
|
||||
action='store_true',
|
||||
help="Don't check git state before uploading. For testing.")
|
||||
upload.add_argument('--force-push-tag',
|
||||
action='store_true',
|
||||
help='Force push the tag. For testing.')
|
||||
|
@ -90,7 +101,12 @@ def main():
|
|||
'--noconfirm',
|
||||
action='store_true',
|
||||
help="Don't ask for confirmation. For testing/automation.")
|
||||
upload.set_defaults(cmd=do_upload)
|
||||
upload.add_argument('--environment',
|
||||
choices=list(environment.ENVIRONMENTS.keys()),
|
||||
default='staging',
|
||||
help='Environment to release to')
|
||||
upload.set_defaults(cmd=lambda args: do_upload(
|
||||
environment.ENVIRONMENTS[args.environment], args))
|
||||
|
||||
args = ap.parse_args()
|
||||
args.cmd(args)
|
||||
|
|
|
@ -7,19 +7,15 @@ import tempfile
|
|||
import hashlib
|
||||
import datetime
|
||||
from . import environment
|
||||
from .environment import RelengEnvironment
|
||||
from . import keys
|
||||
from . import docker
|
||||
from .version import VERSION, RELEASE_NAME, MAJOR
|
||||
from .gitutils import verify_are_on_tag, git_preconditions
|
||||
|
||||
$RAISE_SUBPROC_ERROR = True
|
||||
$XONSH_SHOW_TRACEBACK = True
|
||||
|
||||
RELENG_ENV = environment.STAGING
|
||||
|
||||
RELEASES_BUCKET = RELENG_ENV.releases_bucket
|
||||
DOCS_BUCKET = RELENG_ENV.docs_bucket
|
||||
CACHE_STORE = RELENG_ENV.cache_store_uri()
|
||||
REPO = RELENG_ENV.git_repo
|
||||
|
||||
GCROOTS_DIR = Path('./release/gcroots')
|
||||
BUILT_GCROOTS_DIR = Path('./release/gcroots-build')
|
||||
DRVS_TXT = Path('./release/drvs.txt')
|
||||
|
@ -35,23 +31,14 @@ MAX_JOBS = 2
|
|||
RELEASE_SYSTEMS = ["x86_64-linux"]
|
||||
|
||||
|
||||
def setup_creds():
|
||||
key = keys.get_ephemeral_key(RELENG_ENV)
|
||||
def setup_creds(env: RelengEnvironment):
|
||||
key = keys.get_ephemeral_key(env)
|
||||
$AWS_SECRET_ACCESS_KEY = key.secret_key
|
||||
$AWS_ACCESS_KEY_ID = key.id
|
||||
$AWS_DEFAULT_REGION = 'garage'
|
||||
$AWS_ENDPOINT_URL = environment.S3_ENDPOINT
|
||||
|
||||
|
||||
def git_preconditions():
|
||||
# verify there is nothing in index ready to stage
|
||||
proc = !(git diff-index --quiet --cached HEAD --)
|
||||
assert proc.rtn == 0
|
||||
# verify there is nothing *stageable* and tracked
|
||||
proc = !(git diff-files --quiet)
|
||||
assert proc.rtn == 0
|
||||
|
||||
|
||||
def official_release_commit_tag(force_tag=False):
|
||||
print('[+] Setting officialRelease in flake.nix and tagging')
|
||||
prev_branch = $(git symbolic-ref --short HEAD).strip()
|
||||
|
@ -102,13 +89,13 @@ def eval_jobs():
|
|||
]
|
||||
|
||||
|
||||
def upload_drv_paths_and_outputs(paths: list[str]):
|
||||
def upload_drv_paths_and_outputs(env: RelengEnvironment, paths: list[str]):
|
||||
proc = subprocess.Popen([
|
||||
'nix',
|
||||
'copy',
|
||||
'-v',
|
||||
'--to',
|
||||
CACHE_STORE,
|
||||
env.cache_store_uri(),
|
||||
'--stdin',
|
||||
],
|
||||
stdin=subprocess.PIPE,
|
||||
|
@ -245,33 +232,38 @@ def prepare_release_notes():
|
|||
git commit -m @(commit_msg)
|
||||
|
||||
|
||||
def verify_are_on_tag():
|
||||
current_tag = $(git describe --tag).strip()
|
||||
assert current_tag == VERSION
|
||||
|
||||
|
||||
def upload_artifacts(noconfirm=False, force_push_tag=False):
|
||||
def upload_artifacts(env: RelengEnvironment, noconfirm=False, no_check_git=False, force_push_tag=False):
|
||||
if not no_check_git:
|
||||
verify_are_on_tag()
|
||||
git_preconditions()
|
||||
assert 'AWS_SECRET_ACCESS_KEY' in __xonsh__.env
|
||||
|
||||
tree @(ARTIFACTS)
|
||||
|
||||
env_part = f'environment {env.name}'
|
||||
not noconfirm and confirm(
|
||||
f'Would you like to release {ARTIFACTS} as {VERSION}? Type "I want to release this" to confirm\n',
|
||||
'I want to release this'
|
||||
f'Would you like to release {ARTIFACTS} as {VERSION} in {env.colour(env_part)}? Type "I want to release this to {env.name}" to confirm\n',
|
||||
f'I want to release this to {env.name}'
|
||||
)
|
||||
|
||||
docker_images = list((ARTIFACTS / f'lix/lix-{VERSION}').glob(f'lix-{VERSION}-docker-image-*.tar.gz'))
|
||||
assert docker_images
|
||||
|
||||
print('[+] Upload to cache')
|
||||
with open(DRVS_TXT) as fh:
|
||||
upload_drv_paths_and_outputs([x.strip() for x in fh.readlines() if x])
|
||||
upload_drv_paths_and_outputs(env, [x.strip() for x in fh.readlines() if x])
|
||||
|
||||
print('[+] Upload docker images')
|
||||
for target in env.docker_targets:
|
||||
docker.upload_docker_images(target, docker_images)
|
||||
|
||||
print('[+] Upload to release bucket')
|
||||
aws s3 cp --recursive @(ARTIFACTS)/ @(RELEASES_BUCKET)/
|
||||
aws s3 cp --recursive @(ARTIFACTS)/ @(env.releases_bucket)/
|
||||
print('[+] Upload manual')
|
||||
upload_manual()
|
||||
upload_manual(env)
|
||||
|
||||
print('[+] git push tag')
|
||||
git push @(['-f'] if force_push_tag else []) @(REPO) f'{VERSION}:refs/tags/{VERSION}'
|
||||
git push @(['-f'] if force_push_tag else []) @(env.git_repo) f'{VERSION}:refs/tags/{VERSION}'
|
||||
|
||||
|
||||
def do_tag_merge(force_tag=False, no_check_git=False):
|
||||
|
@ -290,7 +282,7 @@ def build_manual(eval_result):
|
|||
cp --no-preserve=mode -vr @(manual)/share/doc/nix @(MANUAL)
|
||||
|
||||
|
||||
def upload_manual():
|
||||
def upload_manual(env: RelengEnvironment):
|
||||
stable = json.loads($(nix eval --json '.#nix.officialRelease'))
|
||||
if stable:
|
||||
version = MAJOR
|
||||
|
@ -298,9 +290,9 @@ def upload_manual():
|
|||
version = 'nightly'
|
||||
|
||||
print('[+] aws s3 sync manual')
|
||||
aws s3 sync @(MANUAL)/ @(DOCS_BUCKET)/manual/lix/@(version)/
|
||||
aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/@(version)/
|
||||
if stable:
|
||||
aws s3 sync @(MANUAL)/ @(DOCS_BUCKET)/manual/lix/stable/
|
||||
aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/stable/
|
||||
|
||||
|
||||
def build_artifacts(no_check_git=False):
|
||||
|
@ -318,7 +310,8 @@ def build_artifacts(no_check_git=False):
|
|||
build_manual(eval_result)
|
||||
|
||||
with open(DRVS_TXT, 'w') as fh:
|
||||
fh.write('\n'.join(drv_paths))
|
||||
# don't bother putting the release tarballs themselves because they are duplicate and huge
|
||||
fh.write('\n'.join(x['drvPath'] for x in eval_result if x['attr'] != 'lix-release-tarballs'))
|
||||
|
||||
make_artifacts_dir(eval_result, ARTIFACTS)
|
||||
print(f'[+] Done! See {ARTIFACTS}')
|
||||
|
|
74
releng/docker.xsh
Normal file
74
releng/docker.xsh
Normal file
|
@ -0,0 +1,74 @@
|
|||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
|
||||
from .environment import DockerTarget, RelengEnvironment
|
||||
from .version import VERSION, MAJOR
|
||||
from . import gitutils
|
||||
from .docker_assemble import Registry, OCIIndex, OCIIndexItem
|
||||
from . import docker_assemble
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
def check_all_logins(env: RelengEnvironment):
|
||||
for target in env.docker_targets:
|
||||
check_login(target)
|
||||
|
||||
def check_login(target: DockerTarget):
|
||||
skopeo login @(target.registry_name())
|
||||
|
||||
def upload_docker_images(target: DockerTarget, paths: list[Path]):
|
||||
if not paths: return
|
||||
|
||||
sess = requests.Session()
|
||||
sess.headers['User-Agent'] = 'lix-releng'
|
||||
|
||||
tag_names = [DockerTarget.resolve(tag, version=VERSION, major=MAJOR) for tag in target.tags]
|
||||
|
||||
# latest only gets tagged for the current release branch of Lix
|
||||
if not gitutils.is_maintenance_branch('HEAD'):
|
||||
tag_names.append('latest')
|
||||
|
||||
meta = {}
|
||||
|
||||
reg = docker_assemble.Registry(sess)
|
||||
manifests = []
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
tmp = Path(tmp)
|
||||
|
||||
for path in paths:
|
||||
digest_file = tmp / (path.name + '.digest')
|
||||
inspection = json.loads($(skopeo inspect docker-archive:@(path)))
|
||||
|
||||
docker_arch = inspection['Architecture']
|
||||
docker_os = inspection['Os']
|
||||
meta = inspection['Labels']
|
||||
|
||||
log.info('Pushing image %s for %s to %s', path, docker_arch, target.registry_path)
|
||||
|
||||
# insecure-policy: we don't have any signature policy, we are just uploading an image
|
||||
# We upload to a junk tag, because otherwise it will upload to `latest`, which is undesirable
|
||||
skopeo --insecure-policy copy --format oci --digestfile @(digest_file) docker-archive:@(path) docker://@(target.registry_path):temp
|
||||
|
||||
digest = digest_file.read_text().strip()
|
||||
|
||||
# skopeo doesn't give us the manifest size directly, so we just ask the registry
|
||||
metadata = reg.image_info(target.registry_path, digest)
|
||||
|
||||
manifests.append(OCIIndexItem(metadata=metadata, architecture=docker_arch, os=docker_os))
|
||||
# delete the temp tag, which we only have to create because of skopeo
|
||||
# limitations anyhow (it seems to not have a way to say "don't tag it, find
|
||||
# your checksum and put it there")
|
||||
# FIXME: this is not possible because GitHub only has a proprietary API for it. amazing. 11/10.
|
||||
# reg.delete_tag(target.registry_path, 'temp')
|
||||
|
||||
log.info('Pushed images to %r, building a bigger and more menacing manifest from %r with metadata %r', target, manifests, meta)
|
||||
# send the multiarch manifest to each tag
|
||||
index = OCIIndex(manifests=manifests, annotations=meta)
|
||||
for tag in tag_names:
|
||||
reg.upload_index(target.registry_path, tag, index)
|
399
releng/docker_assemble.py
Normal file
399
releng/docker_assemble.py
Normal file
|
@ -0,0 +1,399 @@
|
|||
from typing import Any, Literal, Optional
|
||||
import re
|
||||
from pathlib import Path
|
||||
import json
|
||||
import dataclasses
|
||||
import time
|
||||
from urllib.parse import unquote
|
||||
import urllib.request
|
||||
import logging
|
||||
|
||||
import requests.auth
|
||||
import requests
|
||||
import xdg_base_dirs
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
DEBUG_REQUESTS = False
|
||||
if DEBUG_REQUESTS:
|
||||
urllib3_logger = logging.getLogger('requests.packages.urllib3')
|
||||
urllib3_logger.setLevel(logging.DEBUG)
|
||||
urllib3_logger.propagate = True
|
||||
|
||||
# So, there is a bunch of confusing stuff happening in this file. The gist of why it's Like This is:
|
||||
#
|
||||
# nix2container does not concern itself with tags (reasonably enough):
|
||||
# https://github.com/nlewo/nix2container/issues/59
|
||||
#
|
||||
# This is fine. But then we noticed: docker images don't play nice if you have
|
||||
# multiple architectures you want to abstract over if you don't do special
|
||||
# things. Those special things are images with manifests containing multiple
|
||||
# images.
|
||||
#
|
||||
# Docker has a data model vaguely analogous to git: you have higher level
|
||||
# objects referring to a bunch of content-addressed blobs.
|
||||
#
|
||||
# A multiarch image is more or less just a manifest that refers to more
|
||||
# manifests; in OCI it is an Index.
|
||||
#
|
||||
# See the API spec here: https://github.com/opencontainers/distribution-spec/blob/v1.0.1/spec.md#definitions
|
||||
# And the Index spec here: https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md
|
||||
#
|
||||
# skopeo doesn't *know* how to make multiarch *manifests*:
|
||||
# https://github.com/containers/skopeo/issues/1136
|
||||
#
|
||||
# There is a tool called manifest-tool that is supposed to do this
|
||||
# (https://github.com/estesp/manifest-tool) but it doesn't support putting in
|
||||
# annotations on the outer image, and I *really* didn't want to write golang to
|
||||
# fix that. Thus, a little bit of homebrew containers code.
|
||||
#
|
||||
# Essentially what we are doing in here is splatting a bunch of images into the
|
||||
# registry without tagging them (except as "temp", due to podman issues), then
|
||||
# simply sending a new composite manifest ourselves.
|
||||
|
||||
DockerArchitecture = Literal['amd64'] | Literal['arm64']
|
||||
MANIFEST_MIME = 'application/vnd.oci.image.manifest.v1+json'
|
||||
INDEX_MIME = 'application/vnd.oci.image.index.v1+json'
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True, order=True)
|
||||
class ImageMetadata:
|
||||
size: int
|
||||
digest: str
|
||||
"""sha256:SOMEHEX"""
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True, order=True)
|
||||
class OCIIndexItem:
|
||||
"""Information about an untagged uploaded image."""
|
||||
|
||||
metadata: ImageMetadata
|
||||
|
||||
architecture: DockerArchitecture
|
||||
|
||||
os: str = 'linux'
|
||||
|
||||
def serialize(self):
|
||||
return {
|
||||
'mediaType': MANIFEST_MIME,
|
||||
'size': self.metadata.size,
|
||||
'digest': self.metadata.digest,
|
||||
'platform': {
|
||||
'architecture': self.architecture,
|
||||
'os': self.os,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class OCIIndex:
|
||||
manifests: list[OCIIndexItem]
|
||||
|
||||
annotations: dict[str, str]
|
||||
|
||||
def serialize(self):
|
||||
return {
|
||||
'schemaVersion': 2,
|
||||
'manifests': [item.serialize() for item in sorted(self.manifests)],
|
||||
'annotations': self.annotations
|
||||
}
|
||||
|
||||
|
||||
def docker_architecture_from_nix_system(system: str) -> DockerArchitecture:
|
||||
MAP = {
|
||||
'x86_64-linux': 'amd64',
|
||||
'aarch64-linux': 'arm64',
|
||||
}
|
||||
return MAP[system] # type: ignore
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class TaggingOperation:
|
||||
manifest: OCIIndex
|
||||
tags: list[str]
|
||||
"""Tags this image is uploaded under"""
|
||||
|
||||
|
||||
runtime_dir = xdg_base_dirs.xdg_runtime_dir()
|
||||
config_dir = xdg_base_dirs.xdg_config_home()
|
||||
|
||||
AUTH_FILES = ([runtime_dir / 'containers/auth.json'] if runtime_dir else []) + \
|
||||
[config_dir / 'containers/auth.json', Path.home() / '.docker/config.json']
|
||||
|
||||
|
||||
# Copied from Werkzeug https://github.com/pallets/werkzeug/blob/62e3ea45846d06576199a2f8470be7fe44c867c1/src/werkzeug/http.py#L300-L325
|
||||
def parse_list_header(value: str) -> list[str]:
|
||||
"""Parse a header value that consists of a list of comma separated items according
|
||||
to `RFC 9110 <https://httpwg.org/specs/rfc9110.html#abnf.extension>`__.
|
||||
|
||||
This extends :func:`urllib.request.parse_http_list` to remove surrounding quotes
|
||||
from values.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
parse_list_header('token, "quoted value"')
|
||||
['token', 'quoted value']
|
||||
|
||||
This is the reverse of :func:`dump_header`.
|
||||
|
||||
:param value: The header value to parse.
|
||||
"""
|
||||
result = []
|
||||
|
||||
for item in urllib.request.parse_http_list(value):
|
||||
if len(item) >= 2 and item[0] == item[-1] == '"':
|
||||
item = item[1:-1]
|
||||
|
||||
result.append(item)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# https://www.rfc-editor.org/rfc/rfc2231#section-4
|
||||
_charset_value_re = re.compile(
|
||||
r"""
|
||||
([\w!#$%&*+\-.^`|~]*)' # charset part, could be empty
|
||||
[\w!#$%&*+\-.^`|~]*' # don't care about language part, usually empty
|
||||
([\w!#$%&'*+\-.^`|~]+) # one or more token chars with percent encoding
|
||||
""",
|
||||
re.ASCII | re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
# Copied from: https://github.com/pallets/werkzeug/blob/62e3ea45846d06576199a2f8470be7fe44c867c1/src/werkzeug/http.py#L327-L394
|
||||
def parse_dict_header(value: str) -> dict[str, str | None]:
|
||||
"""Parse a list header using :func:`parse_list_header`, then parse each item as a
|
||||
``key=value`` pair.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
parse_dict_header('a=b, c="d, e", f')
|
||||
{"a": "b", "c": "d, e", "f": None}
|
||||
|
||||
This is the reverse of :func:`dump_header`.
|
||||
|
||||
If a key does not have a value, it is ``None``.
|
||||
|
||||
This handles charsets for values as described in
|
||||
`RFC 2231 <https://www.rfc-editor.org/rfc/rfc2231#section-3>`__. Only ASCII, UTF-8,
|
||||
and ISO-8859-1 charsets are accepted, otherwise the value remains quoted.
|
||||
|
||||
:param value: The header value to parse.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Passing bytes is not supported.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The ``cls`` argument is removed.
|
||||
|
||||
.. versionchanged:: 2.3
|
||||
Added support for ``key*=charset''value`` encoded items.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
The ``cls`` argument was added.
|
||||
"""
|
||||
result: dict[str, str | None] = {}
|
||||
|
||||
for item in parse_list_header(value):
|
||||
key, has_value, value = item.partition("=")
|
||||
key = key.strip()
|
||||
|
||||
if not has_value:
|
||||
result[key] = None
|
||||
continue
|
||||
|
||||
value = value.strip()
|
||||
encoding: str | None = None
|
||||
|
||||
if key[-1] == "*":
|
||||
# key*=charset''value becomes key=value, where value is percent encoded
|
||||
# adapted from parse_options_header, without the continuation handling
|
||||
key = key[:-1]
|
||||
match = _charset_value_re.match(value)
|
||||
|
||||
if match:
|
||||
# If there is a charset marker in the value, split it off.
|
||||
encoding, value = match.groups()
|
||||
assert encoding
|
||||
encoding = encoding.lower()
|
||||
|
||||
# A safe list of encodings. Modern clients should only send ASCII or UTF-8.
|
||||
# This list will not be extended further. An invalid encoding will leave the
|
||||
# value quoted.
|
||||
if encoding in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}:
|
||||
# invalid bytes are replaced during unquoting
|
||||
value = unquote(value, encoding=encoding)
|
||||
|
||||
if len(value) >= 2 and value[0] == value[-1] == '"':
|
||||
value = value[1:-1]
|
||||
|
||||
result[key] = value
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_www_authenticate(www_authenticate):
|
||||
scheme, _, rest = www_authenticate.partition(' ')
|
||||
scheme = scheme.lower()
|
||||
rest = rest.strip()
|
||||
|
||||
parsed = parse_dict_header(rest.rstrip('='))
|
||||
return parsed
|
||||
|
||||
|
||||
class AuthState:
|
||||
|
||||
def __init__(self, auth_files: list[Path] = AUTH_FILES):
|
||||
self.auth_map: dict[str, str] = {}
|
||||
for f in auth_files:
|
||||
self.auth_map.update(AuthState.load_auth_file(f))
|
||||
self.token_cache: dict[str, str] = {}
|
||||
|
||||
@staticmethod
|
||||
def load_auth_file(path: Path) -> dict[str, str]:
|
||||
if path.exists():
|
||||
with path.open() as fh:
|
||||
try:
|
||||
json_obj = json.load(fh)
|
||||
return {k: v['auth'] for k, v in json_obj['auths'].items()}
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
log.exception('JSON decode error in %s', path, exc_info=e)
|
||||
return {}
|
||||
|
||||
def get_token(self, hostname: str) -> Optional[str]:
|
||||
return self.token_cache.get(hostname)
|
||||
|
||||
def obtain_token(self, session: requests.Session, token_endpoint: str,
|
||||
scope: str, service: str, image_path: str) -> str:
|
||||
authority, _, _ = image_path.partition('/')
|
||||
if tok := self.get_token(authority):
|
||||
return tok
|
||||
|
||||
creds = self.find_credential_for(image_path)
|
||||
if not creds:
|
||||
raise ValueError('No credentials available for ' + image_path)
|
||||
|
||||
resp = session.get(token_endpoint,
|
||||
params={
|
||||
'client_id': 'lix-releng',
|
||||
'scope': scope,
|
||||
'service': service,
|
||||
},
|
||||
headers={
|
||||
'Authorization': 'Basic ' + creds
|
||||
}).json()
|
||||
token = resp['token']
|
||||
self.token_cache[service] = token
|
||||
return token
|
||||
|
||||
def find_credential_for(self, image_path: str):
|
||||
trails = image_path.split('/')
|
||||
for i in range(len(trails)):
|
||||
prefix = '/'.join(trails[:len(trails) - i])
|
||||
if prefix in self.auth_map:
|
||||
return self.auth_map[prefix]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class RegistryAuthenticator(requests.auth.AuthBase):
|
||||
"""Authenticates to an OCI compliant registry"""
|
||||
|
||||
def __init__(self, auth_state: AuthState, session: requests.Session,
|
||||
image: str):
|
||||
self.auth_map: dict[str, str] = {}
|
||||
self.image = image
|
||||
self.session = session
|
||||
self.auth_state = auth_state
|
||||
|
||||
def response_hook(self, r: requests.Response,
|
||||
**kwargs: Any) -> requests.Response:
|
||||
if r.status_code == 401:
|
||||
www_authenticate = r.headers.get('www-authenticate', '').lower()
|
||||
parsed = parse_www_authenticate(www_authenticate)
|
||||
assert parsed
|
||||
|
||||
tok = self.auth_state.obtain_token(
|
||||
self.session,
|
||||
parsed['realm'], # type: ignore
|
||||
parsed['scope'], # type: ignore
|
||||
parsed['service'], # type: ignore
|
||||
self.image)
|
||||
|
||||
new_req = r.request.copy()
|
||||
new_req.headers['Authorization'] = 'Bearer ' + tok
|
||||
|
||||
return self.session.send(new_req)
|
||||
else:
|
||||
return r
|
||||
|
||||
def __call__(self,
|
||||
r: requests.PreparedRequest) -> requests.PreparedRequest:
|
||||
authority, _, _ = self.image.partition('/')
|
||||
auth_may = self.auth_state.get_token(authority)
|
||||
|
||||
if auth_may:
|
||||
r.headers['Authorization'] = 'Bearer ' + auth_may
|
||||
|
||||
r.register_hook('response', self.response_hook)
|
||||
return r
|
||||
|
||||
|
||||
class Registry:
|
||||
|
||||
def __init__(self, session: requests.Session):
|
||||
self.auth_state = AuthState()
|
||||
self.session = session
|
||||
|
||||
def image_info(self, image_path: str, manifest_id: str) -> ImageMetadata:
|
||||
authority, _, path = image_path.partition('/')
|
||||
resp = self.session.head(
|
||||
f'https://{authority}/v2/{path}/manifests/{manifest_id}',
|
||||
headers={'Accept': MANIFEST_MIME},
|
||||
auth=RegistryAuthenticator(self.auth_state, self.session,
|
||||
image_path))
|
||||
resp.raise_for_status()
|
||||
return ImageMetadata(int(resp.headers['content-length']),
|
||||
resp.headers['docker-content-digest'])
|
||||
|
||||
def delete_tag(self, image_path: str, tag: str):
|
||||
authority, _, path = image_path.partition('/')
|
||||
resp = self.session.delete(
|
||||
f'https://{authority}/v2/{path}/manifests/{tag}',
|
||||
headers={'Content-Type': INDEX_MIME},
|
||||
auth=RegistryAuthenticator(self.auth_state, self.session,
|
||||
image_path))
|
||||
resp.raise_for_status()
|
||||
|
||||
def _upload_index(self, image_path: str, tag: str, index: OCIIndex):
|
||||
authority, _, path = image_path.partition('/')
|
||||
body = json.dumps(index.serialize(),
|
||||
separators=(',', ':'),
|
||||
sort_keys=True)
|
||||
|
||||
resp = self.session.put(
|
||||
f'https://{authority}/v2/{path}/manifests/{tag}',
|
||||
data=body,
|
||||
headers={'Content-Type': INDEX_MIME},
|
||||
auth=RegistryAuthenticator(self.auth_state, self.session,
|
||||
image_path))
|
||||
resp.raise_for_status()
|
||||
|
||||
return resp.headers['Location']
|
||||
|
||||
def upload_index(self,
|
||||
image_path: str,
|
||||
tag: str,
|
||||
index: OCIIndex,
|
||||
retries=20,
|
||||
retry_delay=1):
|
||||
# eventual consistency lmao
|
||||
for _ in range(retries):
|
||||
try:
|
||||
return self._upload_index(image_path, tag, index)
|
||||
except requests.HTTPError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
|
||||
time.sleep(retry_delay)
|
|
@ -1,5 +1,9 @@
|
|||
import dataclasses
|
||||
from typing import Callable
|
||||
import urllib.parse
|
||||
import re
|
||||
import functools
|
||||
import subprocess
|
||||
import dataclasses
|
||||
|
||||
S3_HOST = 's3.lix.systems'
|
||||
S3_ENDPOINT = 'https://s3.lix.systems'
|
||||
|
@ -16,9 +20,32 @@ DEFAULT_STORE_URI_BITS = {
|
|||
}
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class DockerTarget:
|
||||
registry_path: str
|
||||
"""Registry path without the tag, e.g. ghcr.io/lix-project/lix"""
|
||||
|
||||
tags: list[str]
|
||||
"""List of tags this image should take. There must be at least one."""
|
||||
|
||||
@staticmethod
|
||||
def resolve(item: str, version: str, major: str) -> str:
|
||||
"""
|
||||
Applies templates:
|
||||
- version: the Lix version e.g. 2.90.0
|
||||
- major: the major Lix version e.g. 2.90
|
||||
"""
|
||||
return item.format(version=version, major=major)
|
||||
|
||||
def registry_name(self) -> str:
|
||||
[a, _, _] = self.registry_path.partition('/')
|
||||
return a
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class RelengEnvironment:
|
||||
name: str
|
||||
colour: Callable[[str], str]
|
||||
|
||||
cache_store_overlay: dict[str, str]
|
||||
cache_bucket: str
|
||||
|
@ -26,22 +53,79 @@ class RelengEnvironment:
|
|||
docs_bucket: str
|
||||
git_repo: str
|
||||
|
||||
docker_targets: list[DockerTarget]
|
||||
|
||||
def cache_store_uri(self):
|
||||
qs = DEFAULT_STORE_URI_BITS.copy()
|
||||
qs.update(self.cache_store_overlay)
|
||||
return self.cache_bucket + "?" + urllib.parse.urlencode(qs)
|
||||
|
||||
|
||||
SGR = '\x1b['
|
||||
RED = '31;1m'
|
||||
GREEN = '32;1m'
|
||||
RESET = '0m'
|
||||
|
||||
|
||||
def sgr(colour: str, text: str) -> str:
|
||||
return f'{SGR}{colour}{text}{SGR}{RESET}'
|
||||
|
||||
|
||||
STAGING = RelengEnvironment(
|
||||
name='staging',
|
||||
colour=functools.partial(sgr, GREEN),
|
||||
docs_bucket='s3://staging-docs',
|
||||
cache_bucket='s3://staging-cache',
|
||||
cache_store_overlay={
|
||||
'secret-key': 'staging.key'
|
||||
},
|
||||
cache_store_overlay={'secret-key': 'staging.key'},
|
||||
releases_bucket='s3://staging-releases',
|
||||
git_repo='ssh://git@git.lix.systems/lix-project/lix-releng-staging',
|
||||
docker_targets=[
|
||||
# latest will be auto tagged if appropriate
|
||||
DockerTarget('git.lix.systems/lix-project/lix-releng-staging',
|
||||
tags=['{version}', '{major}']),
|
||||
DockerTarget('ghcr.io/lix-project/lix-releng-staging',
|
||||
tags=['{version}', '{major}']),
|
||||
],
|
||||
)
|
||||
|
||||
GERRIT_REMOTE_RE = re.compile(r'^ssh://(\w+@)?gerrit.lix.systems:2022/lix$')
|
||||
|
||||
|
||||
def guess_gerrit_remote():
|
||||
"""
|
||||
Deals with people having unknown gerrit username.
|
||||
"""
|
||||
out = [
|
||||
x.split()[1] for x in subprocess.check_output(
|
||||
['git', 'remote', '-v']).decode().splitlines()
|
||||
]
|
||||
return next(x for x in out if GERRIT_REMOTE_RE.match(x))
|
||||
|
||||
|
||||
PROD = RelengEnvironment(
|
||||
name='production',
|
||||
colour=functools.partial(sgr, RED),
|
||||
docs_bucket='s3://docs',
|
||||
cache_bucket='s3://cache',
|
||||
# FIXME: we should decrypt this with age into a tempdir in the future, but
|
||||
# the issue is how to deal with the recipients file. For now, we should
|
||||
# just delete it after doing a release.
|
||||
cache_store_overlay={'secret-key': 'prod.key'},
|
||||
releases_bucket='s3://releases',
|
||||
git_repo=guess_gerrit_remote(),
|
||||
docker_targets=[
|
||||
# latest will be auto tagged if appropriate
|
||||
DockerTarget('git.lix.systems/lix-project/lix',
|
||||
tags=['{version}', '{major}']),
|
||||
DockerTarget('ghcr.io/lix-project/lix', tags=['{version}', '{major}']),
|
||||
],
|
||||
)
|
||||
|
||||
ENVIRONMENTS = {
|
||||
'staging': STAGING,
|
||||
'production': PROD,
|
||||
}
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class S3Credentials:
|
||||
|
|
37
releng/gitutils.xsh
Normal file
37
releng/gitutils.xsh
Normal file
|
@ -0,0 +1,37 @@
|
|||
import subprocess
|
||||
import json
|
||||
|
||||
|
||||
def version_compare(v1: str, v2: str):
|
||||
return json.loads($(nix-instantiate --eval --json --argstr v1 @(v1) --argstr v2 @(v2) --expr '{v1, v2}: builtins.compareVersions v1 v2'))
|
||||
|
||||
|
||||
def latest_tag_on_branch(branch: str) -> str:
|
||||
return $(git describe --abbrev=0 @(branch) e>/dev/null).strip()
|
||||
|
||||
|
||||
def is_maintenance_branch(branch: str) -> bool:
|
||||
try:
|
||||
main_tag = latest_tag_on_branch('main')
|
||||
current_tag = latest_tag_on_branch(branch)
|
||||
|
||||
return version_compare(current_tag, main_tag) < 0
|
||||
except subprocess.CalledProcessError:
|
||||
# This is the case before Lix releases 2.90, since main *has* no
|
||||
# release tag on it.
|
||||
# FIXME: delete this case after 2.91
|
||||
return False
|
||||
|
||||
|
||||
def verify_are_on_tag():
|
||||
current_tag = $(git describe --tag).strip()
|
||||
assert current_tag == VERSION
|
||||
|
||||
|
||||
def git_preconditions():
|
||||
# verify there is nothing in index ready to stage
|
||||
proc = !(git diff-index --quiet --cached HEAD --)
|
||||
assert proc.rtn == 0
|
||||
# verify there is nothing *stageable* and tracked
|
||||
proc = !(git diff-files --quiet)
|
||||
assert proc.rtn == 0
|
|
@ -33,7 +33,7 @@ let
|
|||
targetName = "*.tar.xz";
|
||||
}) systems
|
||||
++ builtins.map (system: {
|
||||
target = hydraJobs.dockerImage.${system};
|
||||
target = hydraJobs.dockerImage.${system}.tarball;
|
||||
targetName = "image.tar.gz";
|
||||
rename = "lix-${lix.version}-docker-image-${system}.tar.gz";
|
||||
}) dockerSystems;
|
||||
|
|
Loading…
Reference in a new issue