Merge changes from topic "releng" into main

* changes:
  releng: add sha256 for the manual tarball
  releng: fix upload of multiarch images to forgejo
  releng: fix git checking
  releng: fix logging inside interactive xonsh
  releng: support multiple systems
  version: update to 2.90.0-rc1
This commit is contained in:
jade 2024-06-15 02:38:09 +00:00 committed by Gerrit Code Review
commit e1059bfa34
8 changed files with 134 additions and 57 deletions

View file

@ -1,8 +1,12 @@
from xonsh.main import setup from xonsh.main import setup
setup() setup()
del setup del setup
import logging import logging
import sys
import xonsh.base_shell
from . import environment from . import environment
from . import create_release from . import create_release
@ -13,19 +17,55 @@ from . import docker
from . import docker_assemble from . import docker_assemble
from . import gitutils from . import gitutils
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
fmt = logging.Formatter('{asctime} {levelname} {name}: {message}', def setup_logging():
datefmt='%b %d %H:%M:%S', """
style='{') Sets up logging to work properly. The following are intended to work:
- ipython/xonsh configuration files adding log handlers out of band
- Reloading the module in xonsh/ipython not causing Bonus Loggers (which is
why we check if there is already a handler. This also helps the previous
case)
- Importing the releng module from xonsh and poking at it interactively
"""
LEVELS = {
# Root logger must be DEBUG so that anything else can be DEBUG
None: logging.DEBUG,
# Everything in releng
__name__: logging.DEBUG,
# Log spam caused by prompt_toolkit
'asyncio': logging.INFO,
}
for name, level in LEVELS.items():
logger = logging.getLogger(name)
logger.setLevel(level)
root_logger = logging.getLogger()
fmt = logging.Formatter('{asctime} {levelname} {name}: {message}',
datefmt='%b %d %H:%M:%S',
style='{')
if not any(
isinstance(h, logging.StreamHandler) for h in root_logger.handlers):
stderr = sys.stderr
# XXX: Horrible hack required by the virtual stderr xonsh uses for each entered
# command getting closed after the command is run: we need to pull out
# the real stderr because this survives across multiple command runs.
#
# This only applies when running xonsh in interactive mode and importing releng.
if isinstance(sys.stderr, xonsh.base_shell._TeeStd):
stderr = stderr.std # type: ignore
hand = logging.StreamHandler(stream=stderr)
hand.set_name('releng root handler')
hand.setFormatter(fmt)
root_logger.addHandler(hand)
setup_logging()
if not any(isinstance(h, logging.StreamHandler) for h in rootLogger.handlers):
hand = logging.StreamHandler()
hand.setFormatter(fmt)
rootLogger.addHandler(hand)
def reload(): def reload():
import importlib import importlib

View file

@ -2,16 +2,16 @@ from . import create_release
from . import docker from . import docker
from .environment import RelengEnvironment from .environment import RelengEnvironment
from . import environment from . import environment
import functools
import argparse import argparse
import sys import sys
def do_build(args): def do_build(args):
if args.target == 'all': if args.target == 'all':
create_release.build_artifacts(no_check_git=args.no_check_git) create_release.build_artifacts(args.profile, no_check_git=args.no_check_git)
elif args.target == 'manual': elif args.target == 'manual':
eval_result = create_release.eval_jobs() # n.b. args.profile does nothing here, you will just get the x86_64-linux manual no matter what.
eval_result = create_release.eval_jobs(args.profile)
create_release.build_manual(eval_result) create_release.build_manual(eval_result)
else: else:
raise ValueError('invalid target, unreachable') raise ValueError('invalid target, unreachable')
@ -80,6 +80,10 @@ def main():
build.add_argument('--target', build.add_argument('--target',
choices=['manual', 'all'], choices=['manual', 'all'],
help='Whether to build everything or just the manual') help='Whether to build everything or just the manual')
build.add_argument('--profile',
default='all',
choices=('all', 'x86_64-linux-only'),
help='Which systems to build targets for.')
build.set_defaults(cmd=do_build) build.set_defaults(cmd=do_build)
upload = sps.add_parser( upload = sps.add_parser(

View file

@ -27,9 +27,6 @@ RELENG_MSG = "Release created with releng/create_release.xsh"
BUILD_CORES = 16 BUILD_CORES = 16
MAX_JOBS = 2 MAX_JOBS = 2
# TODO
RELEASE_SYSTEMS = ["x86_64-linux"]
def setup_creds(env: RelengEnvironment): def setup_creds(env: RelengEnvironment):
key = keys.get_ephemeral_key(env) key = keys.get_ephemeral_key(env)
@ -82,11 +79,9 @@ def realise(paths: list[str]):
nix-store @(args) @(paths) nix-store @(args) @(paths)
def eval_jobs(): def eval_jobs(build_profile):
nej_output = $(nix-eval-jobs --workers 4 --gc-roots-dir @(GCROOTS_DIR) --force-recurse --flake '.#release-jobs') nej_output = $(nix-eval-jobs --workers 4 --gc-roots-dir @(GCROOTS_DIR) --force-recurse --flake f'.#release-jobs.{build_profile}')
return [x for x in (json.loads(s) for s in nej_output.strip().split('\n')) return [json.loads(s) for s in nej_output.strip().split('\n')]
if x['system'] in RELEASE_SYSTEMS
]
def upload_drv_paths_and_outputs(env: RelengEnvironment, paths: list[str]): def upload_drv_paths_and_outputs(env: RelengEnvironment, paths: list[str]):
@ -295,14 +290,14 @@ def upload_manual(env: RelengEnvironment):
aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/stable/ aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/stable/
def build_artifacts(no_check_git=False): def build_artifacts(build_profile, no_check_git=False):
rm -rf release/ rm -rf release/
if not no_check_git: if not no_check_git:
verify_are_on_tag() verify_are_on_tag()
git_preconditions() git_preconditions()
print('[+] Evaluating') print('[+] Evaluating')
eval_result = eval_jobs() eval_result = eval_jobs(build_profile)
drv_paths = [x['drvPath'] for x in eval_result] drv_paths = [x['drvPath'] for x in eval_result]
print('[+] Building') print('[+] Building')

View file

@ -19,6 +19,7 @@ def check_all_logins(env: RelengEnvironment):
check_login(target) check_login(target)
def check_login(target: DockerTarget): def check_login(target: DockerTarget):
log.info('Checking login for %s', target.registry_name)
skopeo login @(target.registry_name()) skopeo login @(target.registry_name())
def upload_docker_images(target: DockerTarget, paths: list[Path]): def upload_docker_images(target: DockerTarget, paths: list[Path]):
@ -43,7 +44,23 @@ def upload_docker_images(target: DockerTarget, paths: list[Path]):
for path in paths: for path in paths:
digest_file = tmp / (path.name + '.digest') digest_file = tmp / (path.name + '.digest')
inspection = json.loads($(skopeo inspect docker-archive:@(path))) tmp_image = tmp / 'tmp-image.tar.gz'
# insecure-policy: we don't have any signature policy, we are just uploading an image
#
# Absurd: we copy it into an OCI image first so we can get the hash
# we need to upload it untagged, because skopeo has no "don't tag
# this" option.
# The reason for this is that forgejo's container registry throws
# away old versions of tags immediately, so we cannot use a temp
# tag, and it *does* reduce confusion to not upload tags that
# should not be used.
#
# Workaround for: https://github.com/containers/skopeo/issues/2354
log.info('skopeo copy to temp oci-archive %s', tmp_image)
skopeo --insecure-policy copy --format oci --all --digestfile @(digest_file) docker-archive:@(path) oci-archive:@(tmp_image)
inspection = json.loads($(skopeo inspect oci-archive:@(tmp_image)))
docker_arch = inspection['Architecture'] docker_arch = inspection['Architecture']
docker_os = inspection['Os'] docker_os = inspection['Os']
@ -51,21 +68,13 @@ def upload_docker_images(target: DockerTarget, paths: list[Path]):
log.info('Pushing image %s for %s to %s', path, docker_arch, target.registry_path) log.info('Pushing image %s for %s to %s', path, docker_arch, target.registry_path)
# insecure-policy: we don't have any signature policy, we are just uploading an image
# We upload to a junk tag, because otherwise it will upload to `latest`, which is undesirable
skopeo --insecure-policy copy --format oci --digestfile @(digest_file) docker-archive:@(path) docker://@(target.registry_path):temp
digest = digest_file.read_text().strip() digest = digest_file.read_text().strip()
skopeo --insecure-policy copy --preserve-digests --all oci-archive:@(tmp_image) f'docker://{target.registry_path}@{digest}'
# skopeo doesn't give us the manifest size directly, so we just ask the registry # skopeo doesn't give us the manifest size directly, so we just ask the registry
metadata = reg.image_info(target.registry_path, digest) metadata = reg.image_info(target.registry_path, digest)
manifests.append(OCIIndexItem(metadata=metadata, architecture=docker_arch, os=docker_os)) manifests.append(OCIIndexItem(metadata=metadata, architecture=docker_arch, os=docker_os))
# delete the temp tag, which we only have to create because of skopeo
# limitations anyhow (it seems to not have a way to say "don't tag it, find
# your checksum and put it there")
# FIXME: this is not possible because GitHub only has a proprietary API for it. amazing. 11/10.
# reg.delete_tag(target.registry_path, 'temp')
log.info('Pushed images to %r, building a bigger and more menacing manifest from %r with metadata %r', target, manifests, meta) log.info('Pushed images to %r, building a bigger and more menacing manifest from %r with metadata %r', target, manifests, meta)
# send the multiarch manifest to each tag # send the multiarch manifest to each tag

View file

@ -49,8 +49,8 @@ if DEBUG_REQUESTS:
# fix that. Thus, a little bit of homebrew containers code. # fix that. Thus, a little bit of homebrew containers code.
# #
# Essentially what we are doing in here is splatting a bunch of images into the # Essentially what we are doing in here is splatting a bunch of images into the
# registry without tagging them (except as "temp", due to podman issues), then # registry without tagging them (with a silly workaround to skopeo issues),
# simply sending a new composite manifest ourselves. # then simply sending a new composite manifest ourselves.
DockerArchitecture = Literal['amd64'] | Literal['arm64'] DockerArchitecture = Literal['amd64'] | Literal['arm64']
MANIFEST_MIME = 'application/vnd.oci.image.manifest.v1+json' MANIFEST_MIME = 'application/vnd.oci.image.manifest.v1+json'
@ -100,14 +100,6 @@ class OCIIndex:
} }
def docker_architecture_from_nix_system(system: str) -> DockerArchitecture:
MAP = {
'x86_64-linux': 'amd64',
'aarch64-linux': 'arm64',
}
return MAP[system] # type: ignore
@dataclasses.dataclass @dataclasses.dataclass
class TaggingOperation: class TaggingOperation:
manifest: OCIIndex manifest: OCIIndex
@ -284,7 +276,7 @@ class AuthState:
'Authorization': 'Basic ' + creds 'Authorization': 'Basic ' + creds
}).json() }).json()
token = resp['token'] token = resp['token']
self.token_cache[service] = token self.token_cache[authority] = token
return token return token
def find_credential_for(self, image_path: str): def find_credential_for(self, image_path: str):

View file

@ -1,6 +1,8 @@
import subprocess import subprocess
import json import json
from .version import VERSION
def version_compare(v1: str, v2: str): def version_compare(v1: str, v2: str):
return json.loads($(nix-instantiate --eval --json --argstr v1 @(v1) --argstr v2 @(v2) --expr '{v1, v2}: builtins.compareVersions v1 v2')) return json.loads($(nix-instantiate --eval --json --argstr v1 @(v1) --argstr v2 @(v2) --expr '{v1, v2}: builtins.compareVersions v1 v2'))

View file

@ -3,8 +3,27 @@ let
inherit (pkgs) lib; inherit (pkgs) lib;
lix = hydraJobs.build.x86_64-linux; lix = hydraJobs.build.x86_64-linux;
systems = [ "x86_64-linux" ]; # This is all so clumsy because we can't use arguments to functions in
dockerSystems = [ "x86_64-linux" ]; # flakes, and certainly not with n-e-j.
profiles = {
# Used for testing
x86_64-linux-only = {
systems = [ "x86_64-linux" ];
dockerSystems = [ "x86_64-linux" ];
};
all = {
systems = [
"x86_64-linux"
"aarch64-linux"
"aarch64-darwin"
"x86_64-darwin"
];
dockerSystems = [
"x86_64-linux"
"aarch64-linux"
];
};
};
doTarball = doTarball =
{ {
@ -27,7 +46,8 @@ let
sha256sum --binary $filename | cut -f1 -d' ' > $out/$basename.sha256 sha256sum --binary $filename | cut -f1 -d' ' > $out/$basename.sha256
''; '';
targets = targetsFor =
{ systems, dockerSystems }:
builtins.map (system: { builtins.map (system: {
target = hydraJobs.binaryTarball.${system}; target = hydraJobs.binaryTarball.${system};
targetName = "*.tar.xz"; targetName = "*.tar.xz";
@ -44,14 +64,29 @@ let
tar -cvzf "$out/lix-${lix.version}-manual.tar.gz" lix-${lix.version}-manual tar -cvzf "$out/lix-${lix.version}-manual.tar.gz" lix-${lix.version}-manual
''; '';
tarballs = pkgs.runCommand "lix-release-tarballs" { } '' tarballsFor =
mkdir -p $out { systems, dockerSystems }:
${lib.concatMapStringsSep "\n" doTarball targets} pkgs.runCommand "lix-release-tarballs" { } ''
cp ${manualTar}/*.tar.gz $out mkdir -p $out
cp -r ${lix.doc}/share/doc/nix/manual $out ${lib.concatMapStringsSep "\n" doTarball (targetsFor {
''; inherit systems dockerSystems;
})}
${doTarball {
target = manualTar;
targetName = "lix-*.tar.gz";
}}
cp -r ${lix.doc}/share/doc/nix/manual $out
'';
in in
{ (builtins.mapAttrs (
_:
{ systems, dockerSystems }:
{
build = lib.filterAttrs (x: _: builtins.elem x systems) hydraJobs.build;
tarballs = tarballsFor { inherit systems dockerSystems; };
}
) profiles)
// {
inherit (hydraJobs) build; inherit (hydraJobs) build;
inherit tarballs; inherit tarballsFor;
} }

View file

@ -1,4 +1,4 @@
{ {
"version": "2.90.0", "version": "2.90.0-rc1",
"release_name": "Vanilla Ice Cream" "release_name": "Vanilla Ice Cream"
} }