From 1fbfed816256e7506d94d2fdc13adace63ae4555 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 03:59:38 +0200 Subject: [PATCH 1/7] flake: rename 'nix' input to 'lix' For consistency with other Lix forks of Nix ecosystems projects, e.g. nix-eval-jobs. --- flake.lock | 16 ++++++++-------- flake.nix | 9 +++++---- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/flake.lock b/flake.lock index a92371a9..fe2c6b6b 100644 --- a/flake.lock +++ b/flake.lock @@ -16,7 +16,7 @@ "type": "github" } }, - "nix": { + "lix": { "inputs": { "flake-compat": "flake-compat", "nix2container": "nix2container", @@ -27,17 +27,17 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1720733512, - "narHash": "sha256-vq9CLDvqSSvH4L7YhDa0ihTOrAry4jntKiuoNb5n98M=", + "lastModified": 1721091462, + "narHash": "sha256-0cmEeoOiB91BviTJHzIyxkY+Gxv3O8ZnnExVAoXEFGI=", "ref": "refs/heads/main", - "rev": "4b109ec1a8fc4550150f56f0f46f2f41d844bda8", - "revCount": 15950, + "rev": "6b4d46e9e0e1dd80e0977684ab20d14bcd1a6bc3", + "revCount": 15967, "type": "git", - "url": "https://git@git.lix.systems/lix-project/lix" + "url": "https://git.lix.systems/lix-project/lix" }, "original": { "type": "git", - "url": "https://git@git.lix.systems/lix-project/lix" + "url": "https://git.lix.systems/lix-project/lix" } }, "nix2container": { @@ -106,7 +106,7 @@ }, "root": { "inputs": { - "nix": "nix", + "lix": "lix", "nixpkgs": "nixpkgs" } } diff --git a/flake.nix b/flake.nix index 7797e8f4..4478254f 100644 --- a/flake.nix +++ b/flake.nix @@ -2,15 +2,16 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05"; - inputs.nix.url = "git+https://git@git.lix.systems/lix-project/lix"; - inputs.nix.inputs.nixpkgs.follows = "nixpkgs"; - outputs = { self, nixpkgs, nix }: + inputs.lix.url = "git+https://git.lix.systems/lix-project/lix"; + inputs.lix.inputs.nixpkgs.follows = "nixpkgs"; + + outputs = { self, nixpkgs, lix }: let systems = [ "x86_64-linux" "aarch64-linux" ]; forEachSystem = nixpkgs.lib.genAttrs systems; - overlayList = [ self.overlays.default nix.overlays.default ]; + overlayList = [ self.overlays.default lix.overlays.default ]; pkgsBySystem = forEachSystem (system: import nixpkgs { inherit system; -- 2.44.1 From 6195cec6a3079dbdb3ed433a118bbe83fa72af1f Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 04:35:29 +0200 Subject: [PATCH 2/7] hydra-queue-runner: adjust for Lix generators related changes --- src/hydra-queue-runner/build-remote.cc | 4 ++-- src/hydra-queue-runner/build-result.cc | 2 +- src/hydra-queue-runner/state.hh | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index ff88b343..baa8a49b 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -121,7 +121,7 @@ static void copyClosureTo( the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log conn.to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes; - ServeProto::write(destStore, conn, closure); + conn.to << ServeProto::write(destStore, conn, closure); conn.to.flush(); /* Get back the set of paths that are already valid on the remote @@ -359,7 +359,7 @@ static std::map queryPathInfos( /* Get info about each output path. */ std::map infos; conn.to << ServeProto::Command::QueryPathInfos; - ServeProto::write(localStore, conn, outputs); + conn.to << ServeProto::write(localStore, conn, outputs); conn.to.flush(); while (true) { auto storePathS = readString(conn.from); diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index 08c099f3..93851369 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -36,7 +36,7 @@ BuildOutput getBuildOutput( printInfo("fetching NAR contents of '%s'...", outputS); auto source = sinkToSource([&](Sink & sink) { - store->narFromPath(output, sink); + sink << store->narFromPath(output); }); extractNarData(*source, outputS, narMembers); } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 49968558..29349c9b 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -328,7 +328,6 @@ struct Machine : nix::Machine operator nix::ServeProto::WriteConn () { return { - .to = to, .version = remoteVersion, }; } -- 2.44.1 From 684cc50d86608cccf7500ce00af89ea34c488473 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 04:01:14 +0200 Subject: [PATCH 3/7] flake: add nix-eval-jobs as input --- flake.lock | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ flake.nix | 4 +++ 2 files changed, 94 insertions(+) diff --git a/flake.lock b/flake.lock index fe2c6b6b..cf2b2d72 100644 --- a/flake.lock +++ b/flake.lock @@ -16,6 +16,27 @@ "type": "github" } }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": [ + "nix-eval-jobs", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1719994518, + "narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, "lix": { "inputs": { "flake-compat": "flake-compat", @@ -40,6 +61,53 @@ "url": "https://git.lix.systems/lix-project/lix" } }, + "nix-eval-jobs": { + "inputs": { + "flake-parts": "flake-parts", + "lix": [ + "lix" + ], + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "nixpkgs" + ], + "treefmt-nix": "treefmt-nix" + }, + "locked": { + "lastModified": 1721195872, + "narHash": "sha256-TlvRq634MSl22BWLmpTy2vdtKntbZlsUwdMq8Mp9AWs=", + "ref": "refs/heads/main", + "rev": "c057494450f2d1420726ddb0bab145a5ff4ddfdd", + "revCount": 608, + "type": "git", + "url": "https://git.lix.systems/lix-project/nix-eval-jobs" + }, + "original": { + "type": "git", + "url": "https://git.lix.systems/lix-project/nix-eval-jobs" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "nix-eval-jobs", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1720066371, + "narHash": "sha256-uPlLYH2S0ACj0IcgaK9Lsf4spmJoGejR9DotXiXSBZQ=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "622f829f5fe69310a866c8a6cd07e747c44ef820", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, "nix2container": { "flake": false, "locked": { @@ -107,8 +175,30 @@ "root": { "inputs": { "lix": "lix", + "nix-eval-jobs": "nix-eval-jobs", "nixpkgs": "nixpkgs" } + }, + "treefmt-nix": { + "inputs": { + "nixpkgs": [ + "nix-eval-jobs", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1721059077, + "narHash": "sha256-gCICMMX7VMSKKt99giDDtRLkHJ0cwSgBtDijJAqTlto=", + "owner": "numtide", + "repo": "treefmt-nix", + "rev": "0fb28f237f83295b4dd05e342f333b447c097398", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "treefmt-nix", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 4478254f..d0b3e9d3 100644 --- a/flake.nix +++ b/flake.nix @@ -6,6 +6,10 @@ inputs.lix.url = "git+https://git.lix.systems/lix-project/lix"; inputs.lix.inputs.nixpkgs.follows = "nixpkgs"; + inputs.nix-eval-jobs.url = "git+https://git.lix.systems/lix-project/nix-eval-jobs"; + inputs.nix-eval-jobs.inputs.nixpkgs.follows = "nixpkgs"; + inputs.nix-eval-jobs.inputs.lix.follows = "lix"; + outputs = { self, nixpkgs, lix }: let systems = [ "x86_64-linux" "aarch64-linux" ]; -- 2.44.1 From 6d4ccff43c41adaf6e4b2b9bced7243bc2f6e97b Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 04:22:41 +0200 Subject: [PATCH 4/7] hydra-eval-jobset: use nix-eval-jobs instead of hydra-eval-jobs --- flake.nix | 3 +- package.nix | 3 ++ src/script/hydra-eval-jobset | 102 +++++++++++++++++++++-------------- t/evaluator/evaluate-meta.t | 22 ++++++++ t/jobs/meta.nix | 17 ++++++ 5 files changed, 107 insertions(+), 40 deletions(-) create mode 100644 t/evaluator/evaluate-meta.t create mode 100644 t/jobs/meta.nix diff --git a/flake.nix b/flake.nix index d0b3e9d3..859b418c 100644 --- a/flake.nix +++ b/flake.nix @@ -10,7 +10,7 @@ inputs.nix-eval-jobs.inputs.nixpkgs.follows = "nixpkgs"; inputs.nix-eval-jobs.inputs.lix.follows = "lix"; - outputs = { self, nixpkgs, lix }: + outputs = { self, nix-eval-jobs, nixpkgs, lix }: let systems = [ "x86_64-linux" "aarch64-linux" ]; forEachSystem = nixpkgs.lib.genAttrs systems; @@ -29,6 +29,7 @@ overlays.default = final: prev: { hydra = final.callPackage ./package.nix { inherit (final.lib) fileset; + nix-eval-jobs = nix-eval-jobs.packages.${final.system}.default; rawSrc = self; }; }; diff --git a/package.nix b/package.nix index 623bebeb..31a55b17 100644 --- a/package.nix +++ b/package.nix @@ -48,6 +48,7 @@ , xz , gnutar , gnused +, nix-eval-jobs , rpm , dpkg @@ -192,6 +193,7 @@ stdenv.mkDerivation (finalAttrs: { openldap postgresql_13 pixz + nix-eval-jobs ]; checkInputs = [ @@ -220,6 +222,7 @@ stdenv.mkDerivation (finalAttrs: { darcs gnused breezy + nix-eval-jobs ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] ); diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 7ed7ebe8..de365a7d 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -357,22 +357,21 @@ sub evalJobs { my @cmd; if (defined $flakeRef) { - @cmd = ("hydra-eval-jobs", - "--flake", $flakeRef, - "--gc-roots-dir", getGCRootsDir, - "--max-jobs", 1); + @cmd = ("nix-eval-jobs", "--flake", $flakeRef . '#hydraJobs'); } else { my $nixExprInput = $inputInfo->{$nixExprInputName}->[0] or die "cannot find the input containing the job expression\n"; - @cmd = ("hydra-eval-jobs", + @cmd = ("nix-eval-jobs", "<" . $nixExprInputName . "/" . $nixExprPath . ">", - "--gc-roots-dir", getGCRootsDir, - "--max-jobs", 1, inputsToArgs($inputInfo)); } - push @cmd, "--no-allow-import-from-derivation" if $config->{allow_import_from_derivation} // "true" ne "true"; + push @cmd, ("--gc-roots-dir", getGCRootsDir); + push @cmd, ("--max-jobs", 1); + push @cmd, "--meta"; + push @cmd, "--force-recurse"; + push @cmd, ("--option", "allow-import-from-derivation", "false") if $config->{allow_import_from_derivation} // "true" ne "true"; if (defined $ENV{'HYDRA_DEBUG'}) { sub escape { @@ -384,14 +383,23 @@ sub evalJobs { print STDERR "evaluator: @escaped\n"; } - (my $res, my $jobsJSON, my $stderr) = captureStdoutStderr(21600, @cmd); - die "hydra-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8)) + (my $res, my $jobsJSONLines, my $stderr) = captureStdoutStderr(21600, @cmd); + die "nix-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8)) . ":\n" . ($stderr ? decode("utf-8", $stderr) : "(no output)\n") if $res; print STDERR "$stderr"; - return decode_json($jobsJSON); + # XXX: take advantage of nix-eval-jobs's streaming instead of parsing everything in one block at + # the end. + my @jobs; + foreach my $line (split(/\n/, $jobsJSONLines)) { + last if $line eq ""; + + push(@jobs, decode_json($line)); + }; + + return @jobs; } @@ -420,7 +428,7 @@ sub checkBuild { my $firstOutputName = $outputNames[0]; my $firstOutputPath = $buildInfo->{outputs}->{$firstOutputName}; - my $jobName = $buildInfo->{jobName} or die; + my $jobName = $buildInfo->{attr} or die; my $drvPath = $buildInfo->{drvPath} or die; my $build; @@ -474,9 +482,30 @@ sub checkBuild { my $time = time(); - sub null { - my ($s) = @_; - return $s eq "" ? undef : $s; + sub getMeta { + my ($s, $def) = @_; + return ($s || "") eq "" ? $def : $s; + } + + sub getMetaStrings { + my ($v, $k, $acc) = @_; + my $t = ref $v; + + if ($t eq 'HASH') { + push @$acc, $v->{$k} if exists $v->{$k}; + } elsif ($t eq 'ARRAY') { + getMetaStrings($_, $k, $acc) foreach @$v; + } elsif (defined $v) { + push @$acc, $v; + } + } + + sub getMetaConcatStrings { + my ($v, $k) = @_; + + my @strings; + getMetaStrings($v, $k, \@strings); + return join(", ", @strings) || undef; } # Add the build to the database. @@ -484,19 +513,19 @@ sub checkBuild { { timestamp => $time , jobset_id => $jobset->id , job => $jobName - , description => null($buildInfo->{description}) - , license => null($buildInfo->{license}) - , homepage => null($buildInfo->{homepage}) - , maintainers => null($buildInfo->{maintainers}) - , maxsilent => $buildInfo->{maxSilent} - , timeout => $buildInfo->{timeout} - , nixname => $buildInfo->{nixName} + , description => getMeta($buildInfo->{meta}->{description}, undef) + , license => getMetaConcatStrings($buildInfo->{meta}->{license}, "shortName") + , homepage => getMeta($buildInfo->{meta}->{homepage}, undef) + , maintainers => getMetaConcatStrings($buildInfo->{meta}->{maintainers}, "email") + , maxsilent => getMeta($buildInfo->{meta}->{maxSilent}, 7200) + , timeout => getMeta($buildInfo->{meta}->{timeout}, 36000) + , nixname => $buildInfo->{name} , drvpath => $drvPath , system => $buildInfo->{system} - , priority => $buildInfo->{schedulingPriority} + , priority => getMeta($buildInfo->{meta}->{schedulingPriority}, 100) , finished => 0 , iscurrent => 1 - , ischannel => $buildInfo->{isChannel} + , ischannel => getMeta($buildInfo->{meta}->{isChannel}, 0) }); $build->buildoutputs->create({ name => $_, path => $buildInfo->{outputs}->{$_} }) @@ -665,7 +694,7 @@ sub checkJobsetWrapped { return; } - # Hash the arguments to hydra-eval-jobs and check the + # Hash the arguments to nix-eval-jobs and check the # JobsetInputHashes to see if the previous evaluation had the same # inputs. If so, bail out. my @args = ($jobset->nixexprinput // "", $jobset->nixexprpath // "", inputsToArgs($inputInfo)); @@ -687,19 +716,18 @@ sub checkJobsetWrapped { # Evaluate the job expression. my $evalStart = clock_gettime(CLOCK_MONOTONIC); - my $jobs = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef); + my @jobs = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef); my $evalStop = clock_gettime(CLOCK_MONOTONIC); if ($jobsetsJobset) { - my @keys = keys %$jobs; die "The .jobsets jobset must only have a single job named 'jobsets'" - unless (scalar @keys) == 1 && $keys[0] eq "jobsets"; + unless (scalar @jobs) == 1 && $jobs[0]->{attr} eq "jobsets"; } Net::Statsd::timing("hydra.evaluator.eval_time", int(($evalStop - $evalStart) * 1000)); if ($dryRun) { - foreach my $name (keys %{$jobs}) { - my $job = $jobs->{$name}; + foreach my $job (@jobs) { + my $name = $job->{attr}; if (defined $job->{drvPath}) { print STDERR "good job $name: $job->{drvPath}\n"; } else { @@ -709,11 +737,6 @@ sub checkJobsetWrapped { return; } - die "Jobset contains a job with an empty name. Make sure the jobset evaluates to an attrset of jobs.\n" - if defined $jobs->{""}; - - $jobs->{$_}->{jobName} = $_ for keys %{$jobs}; - my $jobOutPathMap = {}; my $jobsetChanged = 0; my $dbStart = clock_gettime(CLOCK_MONOTONIC); @@ -722,10 +745,10 @@ sub checkJobsetWrapped { # Store the error messages for jobs that failed to evaluate. my $evaluationErrorTime = time; my $evaluationErrorMsg = ""; - foreach my $job (values %{$jobs}) { + foreach my $job (@jobs) { next unless defined $job->{error}; $evaluationErrorMsg .= - ($job->{jobName} ne "" ? "in job ‘$job->{jobName}’" : "at top-level") . + ($job->{attr} ne "" ? "in job ‘$job->{attr}’" : "at top-level") . ":\n" . $job->{error} . "\n\n"; } setJobsetError($jobset, $evaluationErrorMsg, $evaluationErrorTime); @@ -760,7 +783,7 @@ sub checkJobsetWrapped { }); # Schedule each successfully evaluated job. - foreach my $job (permute(values %{$jobs})) { + foreach my $job (permute(@jobs)) { next if defined $job->{error}; #print STDERR "considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n"; checkBuild($db, $jobset, $ev, $inputInfo, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins); @@ -801,7 +824,8 @@ sub checkJobsetWrapped { $drvPathToId{$x->{drvPath}} = $x; } - foreach my $job (values %{$jobs}) { + # XXX: dead code with nix-eval-jobs. To be removed. + foreach my $job (values @jobs) { next unless $job->{constituents}; if (defined $job->{error}) { diff --git a/t/evaluator/evaluate-meta.t b/t/evaluator/evaluate-meta.t new file mode 100644 index 00000000..9f546a7f --- /dev/null +++ b/t/evaluator/evaluate-meta.t @@ -0,0 +1,22 @@ +use feature 'unicode_strings'; +use strict; +use warnings; +use Setup; +use Test2::V0; + +my $ctx = test_context(); + +my $builds = $ctx->makeAndEvaluateJobset( + expression => "meta.nix", + build => 1 +); + +my $build = $builds->{"full-of-meta"}; + +is($build->finished, 1, "Build should be finished."); +is($build->description, "This is the description of the job.", "Wrong description extracted from the build."); +is($build->license, "MIT, BSD", "Wrong licenses extracted from the build."); +is($build->homepage, "https://example.com/", "Wrong homepage extracted from the build."); +is($build->maintainers, 'alice@example.com, bob@not.found', "Wrong maintainers extracted from the build."); + +done_testing; diff --git a/t/jobs/meta.nix b/t/jobs/meta.nix new file mode 100644 index 00000000..9204e384 --- /dev/null +++ b/t/jobs/meta.nix @@ -0,0 +1,17 @@ +with import ./config.nix; +{ + full-of-meta = + mkDerivation { + name = "full-of-meta"; + builder = ./empty-dir-builder.sh; + + meta = { + description = "This is the description of the job."; + license = [ { shortName = "MIT"; } "BSD" ]; + homepage = "https://example.com/"; + maintainers = [ "alice@example.com" { email = "bob@not.found"; } ]; + + outPath = "${placeholder "out"}"; + }; + }; +} -- 2.44.1 From ed7c58708cd3affd62a598a22a500ed2adf318bf Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 04:04:08 +0200 Subject: [PATCH 5/7] hydra-eval-jobs: remove, replaced by nix-eval-jobs --- .gitignore | 1 - configure.ac | 1 - package.nix | 2 +- src/Makefile.am | 2 +- src/hydra-eval-jobs/Makefile.am | 5 - src/hydra-eval-jobs/hydra-eval-jobs.cc | 577 ------------------------- t/Makefile.am | 2 +- t/evaluator/evaluate-oom-job.t | 63 --- 8 files changed, 3 insertions(+), 650 deletions(-) delete mode 100644 src/hydra-eval-jobs/Makefile.am delete mode 100644 src/hydra-eval-jobs/hydra-eval-jobs.cc delete mode 100644 t/evaluator/evaluate-oom-job.t diff --git a/.gitignore b/.gitignore index f8bf5718..95148a62 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,6 @@ Makefile.in /src/sql/hydra-postgresql.sql /src/sql/hydra-sqlite.sql /src/sql/tmp.sqlite -/src/hydra-eval-jobs/hydra-eval-jobs /src/root/static/bootstrap /src/root/static/js/flot /tests diff --git a/configure.ac b/configure.ac index 2f4cf848..671af205 100644 --- a/configure.ac +++ b/configure.ac @@ -61,7 +61,6 @@ AC_CONFIG_FILES([ doc/manual/Makefile src/Makefile src/hydra-evaluator/Makefile - src/hydra-eval-jobs/Makefile src/hydra-queue-runner/Makefile src/sql/Makefile src/ttf/Makefile diff --git a/package.nix b/package.nix index 31a55b17..4e480b3d 100644 --- a/package.nix +++ b/package.nix @@ -231,7 +231,7 @@ stdenv.mkDerivation (finalAttrs: { shellHook = '' pushd $(git rev-parse --show-toplevel) >/dev/null - PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH + PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH PERL5LIB=$(pwd)/src/lib:$PERL5LIB export HYDRA_HOME="$(pwd)/src/" mkdir -p .hydra-data diff --git a/src/Makefile.am b/src/Makefile.am index a28780b6..ccd52eb9 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1,3 +1,3 @@ -SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf +SUBDIRS = hydra-evaluator hydra-queue-runner sql script lib root ttf BOOTCLEAN_SUBDIRS = $(SUBDIRS) DIST_SUBDIRS = $(SUBDIRS) diff --git a/src/hydra-eval-jobs/Makefile.am b/src/hydra-eval-jobs/Makefile.am deleted file mode 100644 index d5f99eed..00000000 --- a/src/hydra-eval-jobs/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -bin_PROGRAMS = hydra-eval-jobs - -hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc -hydra_eval_jobs_LDADD = $(NIX_LIBS) -llixcmd -hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc deleted file mode 100644 index 6593edf5..00000000 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ /dev/null @@ -1,577 +0,0 @@ -#include -#include -#include -#include - -#include "shared.hh" -#include "store-api.hh" -#include "eval.hh" -#include "eval-inline.hh" -#include "eval-settings.hh" -#include "signals.hh" -#include "terminal.hh" -#include "get-drvs.hh" -#include "globals.hh" -#include "lix/libcmd/common-eval-args.hh" -#include "flake/flakeref.hh" -#include "flake/flake.hh" -#include "attr-path.hh" -#include "derivations.hh" -#include "local-fs-store.hh" - -#include "hydra-config.hh" - -#include -#include -#include - -#include - -void check_pid_status_nonblocking(pid_t check_pid) -{ - // Only check 'initialized' and known PID's - if (check_pid <= 0) { return; } - - int wstatus = 0; - pid_t pid = waitpid(check_pid, &wstatus, WNOHANG); - // -1 = failure, WNOHANG: 0 = no change - if (pid <= 0) { return; } - - std::cerr << "child process (" << pid << ") "; - - if (WIFEXITED(wstatus)) { - std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl; - } else if (WIFSIGNALED(wstatus)) { - std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl; - } else if (WIFSTOPPED(wstatus)) { - std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl; - } else if (WIFCONTINUED(wstatus)) { - std::cerr << "continued" << std::endl; - } -} - -using namespace nix; - -static Path gcRootsDir; -static size_t maxMemorySize; - -struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs -{ - Path releaseExpr; - bool flake = false; - bool dryRun = false; - - MyArgs() : MixCommonArgs("hydra-eval-jobs") - { - addFlag({ - .longName = "gc-roots-dir", - .description = "garbage collector roots directory", - .labels = {"path"}, - .handler = {&gcRootsDir} - }); - - addFlag({ - .longName = "dry-run", - .description = "don't create store derivations", - .handler = {&dryRun, true} - }); - - addFlag({ - .longName = "flake", - .description = "build a flake", - .handler = {&flake, true} - }); - - expectArg("expr", &releaseExpr); - } -}; - -static MyArgs myArgs; - -static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute) -{ - Strings res; - std::function rec; - - rec = [&](Value & v) { - state.forceValue(v, noPos); - if (v.type() == nString) - res.push_back(v.string.s); - else if (v.isList()) - for (unsigned int n = 0; n < v.listSize(); ++n) - rec(*v.listElems()[n]); - else if (v.type() == nAttrs) { - auto a = v.attrs->find(state.symbols.create(subAttribute)); - if (a != v.attrs->end()) - res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes"))); - } - }; - - Value * v = drv.queryMeta(name); - if (v) rec(*v); - - return concatStringsSep(", ", res); -} - -static void worker( - EvalState & state, - Bindings & autoArgs, - AutoCloseFD & to, - AutoCloseFD & from) -{ - Value vTop; - - if (myArgs.flake) { - using namespace flake; - - auto flakeRef = parseFlakeRef(myArgs.releaseExpr); - - auto vFlake = state.allocValue(); - - auto lockedFlake = lockFlake(state, flakeRef, - LockFlags { - .updateLockFile = false, - .useRegistries = false, - .allowUnlocked = false, - }); - - callFlake(state, lockedFlake, *vFlake); - - auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value; - state.forceValue(*vOutputs, noPos); - - auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs")); - if (!aHydraJobs) - aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks")); - if (!aHydraJobs) - throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef); - - vTop = *aHydraJobs->value; - - } else { - state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop); - } - - auto vRoot = state.allocValue(); - state.autoCallFunction(autoArgs, vTop, *vRoot); - - while (true) { - /* Wait for the master to send us a job name. */ - writeLine(to.get(), "next"); - - auto s = readLine(from.get()); - if (s == "exit") break; - if (!s.starts_with("do ")) abort(); - std::string attrPath(s, 3); - - debug("worker process %d at '%s'", getpid(), attrPath); - - /* Evaluate it and send info back to the master. */ - nlohmann::json reply; - - try { - auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first; - - auto v = state.allocValue(); - state.autoCallFunction(autoArgs, *vTmp, *v); - - if (auto drv = getDerivation(state, *v, false)) { - - // CA derivations do not have static output paths, so we - // have to defensively not query output paths in case we - // encounter one. - DrvInfo::Outputs outputs = drv->queryOutputs( - !experimentalFeatureSettings.isEnabled(Xp::CaDerivations)); - - if (drv->querySystem() == "unknown") - state.error("derivation must have a 'system' attribute").debugThrow(); - - auto drvPath = state.store->printStorePath(drv->requireDrvPath()); - - nlohmann::json job; - - job["nixName"] = drv->queryName(); - job["system"] =drv->querySystem(); - job["drvPath"] = drvPath; - job["description"] = drv->queryMetaString("description"); - job["license"] = queryMetaStrings(state, *drv, "license", "shortName"); - job["homepage"] = drv->queryMetaString("homepage"); - job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email"); - job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100); - job["timeout"] = drv->queryMetaInt("timeout", 36000); - job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200); - job["isChannel"] = drv->queryMetaBool("isHydraChannel", false); - - /* If this is an aggregate, then get its constituents. */ - auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) { - auto a = v->attrs->get(state.symbols.create("constituents")); - if (!a) - state.error("derivation must have a ‘constituents’ attribute").debugThrow(); - - NixStringContext context; - state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false); - for (auto & c : context) - std::visit(overloaded { - [&](const NixStringContextElem::Built & b) { - job["constituents"].push_back(b.drvPath->to_string(*state.store)); - }, - [&](const NixStringContextElem::Opaque & o) { - }, - [&](const NixStringContextElem::DrvDeep & d) { - }, - }, c.raw); - - state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute"); - for (unsigned int n = 0; n < a->value->listSize(); ++n) { - auto v = a->value->listElems()[n]; - state.forceValue(*v, noPos); - if (v->type() == nString) - job["namedConstituents"].push_back(v->str()); - } - } - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = state.store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - nlohmann::json out; - for (auto & [outputName, optOutputPath] : outputs) { - if (optOutputPath) { - out[outputName] = state.store->printStorePath(*optOutputPath); - } else { - // See the `queryOutputs` call above; we should - // not encounter missing output paths otherwise. - assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations)); - out[outputName] = nullptr; - } - } - job["outputs"] = std::move(out); - reply["job"] = std::move(job); - } - - else if (v->type() == nAttrs) { - auto attrs = nlohmann::json::array(); - StringSet ss; - for (auto & i : v->attrs->lexicographicOrder(state.symbols)) { - std::string name(state.symbols[i->name]); - if (name.find(' ') != std::string::npos) { - printError("skipping job with illegal name '%s'", name); - continue; - } - attrs.push_back(name); - } - reply["attrs"] = std::move(attrs); - } - - else if (v->type() == nNull) - ; - - else state.error("attribute '%s' is %s, which is not supported", attrPath, showType(*v)).debugThrow(); - - } catch (EvalError & e) { - auto msg = e.msg(); - // Transmits the error we got from the previous evaluation - // in the JSON output. - reply["error"] = filterANSIEscapes(msg, true); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - printError(msg); - } - - writeLine(to.get(), reply.dump()); - - /* If our RSS exceeds the maximum, exit. The master will - start a new process. */ - struct rusage r; - getrusage(RUSAGE_SELF, &r); - if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break; - } - - writeLine(to.get(), "restart"); -} - -int main(int argc, char * * argv) -{ - /* Prevent undeclared dependencies in the evaluation via - $NIX_PATH. */ - unsetenv("NIX_PATH"); - - return handleExceptions(argv[0], [&]() { - - auto config = std::make_unique(); - - auto nrWorkers = config->getIntOption("evaluator_workers", 1); - maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096); - - initNix(); - initGC(); - - myArgs.parseCmdline(argvToStrings(argc, argv)); - - auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake); - - /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */ - settings.builders = ""; - - /* Prevent access to paths outside of the Nix search path and - to the environment. */ - evalSettings.restrictEval = true; - - /* When building a flake, use pure evaluation (no access to - 'getEnv', 'currentSystem' etc. */ - evalSettings.pureEval = pureEval; - - if (myArgs.dryRun) settings.readOnlyMode = true; - - if (myArgs.releaseExpr == "") throw UsageError("no expression specified"); - - if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified"); - - struct State - { - std::set todo{""}; - std::set active; - nlohmann::json jobs; - std::exception_ptr exc; - }; - - std::condition_variable wakeup; - - Sync state_; - - /* Start a handler thread per worker process. */ - auto handler = [&]() - { - Pid pid; - try { - AutoCloseFD from, to; - - while (true) { - - /* Start a new worker process if necessary. */ - if (!pid) { - Pipe toPipe, fromPipe; - toPipe.create(); - fromPipe.create(); - pid = startProcess( - [&, - to{std::make_shared(std::move(fromPipe.writeSide))}, - from{std::make_shared(std::move(toPipe.readSide))} - ]() - { - try { - EvalState state(myArgs.searchPath, openStore()); - Bindings & autoArgs = *myArgs.getAutoArgs(state); - worker(state, autoArgs, *to, *from); - } catch (Error & e) { - nlohmann::json err; - auto msg = e.msg(); - err["error"] = filterANSIEscapes(msg, true); - printError(msg); - writeLine(to->get(), err.dump()); - // Don't forget to print it into the STDERR log, this is - // what's shown in the Hydra UI. - writeLine(to->get(), "restart"); - } - }); - from = std::move(fromPipe.readSide); - to = std::move(toPipe.writeSide); - debug("created worker process %d", pid.get()); - } - - /* Check whether the existing worker process is still there. */ - auto s = readLine(from.get()); - if (s == "restart") { - pid.wait(); - continue; - } else if (s != "next") { - auto json = nlohmann::json::parse(s); - throw Error("worker error: %s", (std::string) json["error"]); - } - - /* Wait for a job name to become available. */ - std::string attrPath; - - while (true) { - checkInterrupt(); - auto state(state_.lock()); - if ((state->todo.empty() && state->active.empty()) || state->exc) { - writeLine(to.get(), "exit"); - return; - } - if (!state->todo.empty()) { - attrPath = *state->todo.begin(); - state->todo.erase(state->todo.begin()); - state->active.insert(attrPath); - break; - } else - state.wait(wakeup); - } - - /* Tell the worker to evaluate it. */ - writeLine(to.get(), "do " + attrPath); - - /* Wait for the response. */ - auto response = nlohmann::json::parse(readLine(from.get())); - - /* Handle the response. */ - StringSet newAttrs; - - if (response.find("job") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath] = response["job"]; - } - - if (response.find("attrs") != response.end()) { - for (auto & i : response["attrs"]) { - std::string path = i; - if (path.find(".") != std::string::npos){ - path = "\"" + path + "\""; - } - auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path; - newAttrs.insert(s); - } - } - - if (response.find("error") != response.end()) { - auto state(state_.lock()); - state->jobs[attrPath]["error"] = response["error"]; - } - - /* Add newly discovered job names to the queue. */ - { - auto state(state_.lock()); - state->active.erase(attrPath); - for (auto & s : newAttrs) - state->todo.insert(s); - wakeup.notify_all(); - } - } - } catch (...) { - check_pid_status_nonblocking(pid.release()); - auto state(state_.lock()); - state->exc = std::current_exception(); - wakeup.notify_all(); - } - }; - - std::vector threads; - for (size_t i = 0; i < nrWorkers; i++) - threads.emplace_back(std::thread(handler)); - - for (auto & thread : threads) - thread.join(); - - auto state(state_.lock()); - - if (state->exc) - std::rethrow_exception(state->exc); - - /* For aggregate jobs that have named consistuents - (i.e. constituents that are a job name rather than a - derivation), look up the referenced job and add it to the - dependencies of the aggregate derivation. */ - auto store = openStore(); - - for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) { - auto jobName = i.key(); - auto & job = i.value(); - - auto named = job.find("namedConstituents"); - if (named == job.end()) continue; - - std::unordered_map brokenJobs; - auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state]( - const std::string & childJobName) -> std::optional { - auto childJob = state->jobs.find(childJobName); - if (childJob == state->jobs.end()) { - printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName); - brokenJobs[childJobName] = "does not exist"; - return std::nullopt; - } - if (childJob->find("error") != childJob->end()) { - std::string error = (*childJob)["error"]; - printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error); - brokenJobs[childJobName] = error; - return std::nullopt; - } - return *childJob; - }; - - if (myArgs.dryRun) { - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - std::string drvPath2 = (*job2)["drvPath"]; - job["constituents"].push_back(drvPath2); - } - } else { - auto drvPath = store->parseStorePath((std::string) job["drvPath"]); - auto drv = store->readDerivation(drvPath); - - for (std::string jobName2 : *named) { - auto job2 = getNonBrokenJobOrRecordError(jobName2); - if (!job2) { - continue; - } - auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]); - auto drv2 = store->readDerivation(drvPath2); - job["constituents"].push_back(store->printStorePath(drvPath2)); - drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first}; - } - - if (brokenJobs.empty()) { - std::string drvName(drvPath.name()); - assert(drvName.ends_with(drvExtension)); - drvName.resize(drvName.size() - drvExtension.size()); - - auto hashModulo = hashDerivationModulo(*store, drv, true); - if (hashModulo.kind != DrvHash::Kind::Regular) continue; - auto h = hashModulo.hashes.find("out"); - if (h == hashModulo.hashes.end()) continue; - auto outPath = store->makeOutputPath("out", h->second, drvName); - drv.env["out"] = store->printStorePath(outPath); - drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath }); - auto newDrvPath = store->printStorePath(writeDerivation(*store, drv)); - - debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath); - - job["drvPath"] = newDrvPath; - job["outputs"]["out"] = store->printStorePath(outPath); - } - } - - job.erase("namedConstituents"); - - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - auto drvPath = job["drvPath"].get(); - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root); - } - - if (!brokenJobs.empty()) { - std::stringstream ss; - for (const auto& [jobName, error] : brokenJobs) { - ss << jobName << ": " << error << "\n"; - } - job["error"] = ss.str(); - } - } - - std::cout << state->jobs.dump(2) << "\n"; - }); -} diff --git a/t/Makefile.am b/t/Makefile.am index 9c14c1e3..c1a54e62 100644 --- a/t/Makefile.am +++ b/t/Makefile.am @@ -13,7 +13,7 @@ TESTS_ENVIRONMENT = \ PGHOST=/tmp \ PERL5LIB="$(srcdir):$(abs_top_srcdir)/src/lib:$$PERL5LIB" \ PYTHONPATH= \ - PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-eval-jobs:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \ + PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \ perl -w EXTRA_DIST = \ diff --git a/t/evaluator/evaluate-oom-job.t b/t/evaluator/evaluate-oom-job.t deleted file mode 100644 index 6c17d4e4..00000000 --- a/t/evaluator/evaluate-oom-job.t +++ /dev/null @@ -1,63 +0,0 @@ -use strict; -use warnings; -use Setup; -use Test2::V0; -use Hydra::Helper::Exec; - -# Ensure that `systemd-run` is -# - Available in the PATH/envionment -# - Accessable to the user executing it -# - Capable of using the command switches we use in our test -my $sd_res; -eval { - ($sd_res) = captureStdoutStderr(3, ( - "systemd-run", - "--user", - "--collect", - "--scope", - "--property", - "MemoryMax=25M", - "--", - "true" - )); -} or do { - # The command failed to execute, likely because `systemd-run` is not present - # in `PATH` - skip_all("`systemd-run` failed when invoked in this environment"); -}; -if ($sd_res != 0) { - # `systemd-run` executed but `sytemd-run` failed to call `true` and return - # successfully - skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)"); -} - -my $ctx = test_context(); - -# Contain the memory usage to 25 MegaBytes using `systemd-run` -# Run `hydra-eval-jobs` on test job that will purposefully consume all memory -# available -my ($res, $stdout, $stderr) = captureStdoutStderr(60, ( - "systemd-run", - "--user", - "--collect", - "--scope", - "--property", - "MemoryMax=25M", - "--", - "hydra-eval-jobs", - "-I", "/dev/zero", - "-I", $ctx->jobsdir, - ($ctx->jobsdir . "/oom.nix") -)); - -isnt($res, 0, "`hydra-eval-jobs` exits non-zero"); -ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); -like( - $stderr, - # Assert error log contains messages added in PR - # https://github.com/NixOS/hydra/pull/1203 - qr/^child process \(\d+?\) killed by signal=9$/m, - "The stderr record includes a relevant error message" -); - -done_testing; -- 2.44.1 From 370a4bf138a830c4de8a05e248ace043adbc8f4f Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Tue, 16 Jul 2024 09:37:32 +0200 Subject: [PATCH 6/7] treewide: start removing tests related to constituents The feature cannot easily be ported to nix-eval-jobs since it requires deep integration into the evaluator, and h.n.o doesn't use it. Later more of this will be ripped out. --- src/script/hydra-eval-jobset | 35 ------------------- t/Hydra/Controller/Build/api.t | 24 ------------- t/evaluator/evaluate-constituents-broken.t | 32 ----------------- t/evaluator/evaluate-constituents-gc.t | 20 ----------- t/queue-runner/constituents.t | 32 ----------------- t/queue-runner/direct-indirect-constituents.t | 35 ------------------- 6 files changed, 178 deletions(-) delete mode 100644 t/evaluator/evaluate-constituents-broken.t delete mode 100644 t/evaluator/evaluate-constituents-gc.t delete mode 100644 t/queue-runner/constituents.t delete mode 100644 t/queue-runner/direct-indirect-constituents.t diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index de365a7d..459f1b4c 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -809,41 +809,6 @@ sub checkJobsetWrapped { $ev->jobsetevalmembers->create({ build => $id, isnew => $x->{new} }); } - # Create AggregateConstituents mappings. Since there can - # be jobs that alias each other, if there are multiple - # builds for the same derivation, pick the one with the - # shortest name. - my %drvPathToId; - foreach my $id (keys %buildMap) { - my $x = $buildMap{$id}; - my $y = $drvPathToId{$x->{drvPath}}; - if (defined $y) { - next if length $x->{jobName} > length $y->{jobName}; - next if length $x->{jobName} == length $y->{jobName} && $x->{jobName} ge $y->{jobName}; - } - $drvPathToId{$x->{drvPath}} = $x; - } - - # XXX: dead code with nix-eval-jobs. To be removed. - foreach my $job (values @jobs) { - next unless $job->{constituents}; - - if (defined $job->{error}) { - die "aggregate job ‘$job->{jobName}’ failed with the error: $job->{error}\n"; - } - - my $x = $drvPathToId{$job->{drvPath}} or - die "aggregate job ‘$job->{jobName}’ has no corresponding build record.\n"; - foreach my $drvPath (@{$job->{constituents}}) { - my $constituent = $drvPathToId{$drvPath}; - if (defined $constituent) { - $db->resultset('AggregateConstituents')->update_or_create({aggregate => $x->{id}, constituent => $constituent->{id}}); - } else { - warn "aggregate job ‘$job->{jobName}’ has a constituent ‘$drvPath’ that doesn't correspond to a Hydra build\n"; - } - } - } - foreach my $name (keys %{$inputInfo}) { for (my $n = 0; $n < scalar(@{$inputInfo->{$name}}); $n++) { my $input = $inputInfo->{$name}->[$n]; diff --git a/t/Hydra/Controller/Build/api.t b/t/Hydra/Controller/Build/api.t index 91a553df..bd2d8292 100644 --- a/t/Hydra/Controller/Build/api.t +++ b/t/Hydra/Controller/Build/api.t @@ -78,28 +78,4 @@ subtest "validating the JSON representation of a build" => sub { }, "The build's JSON matches our API."); }; -subtest "accessing the constituents API" => sub { - my $url = $build_url . "/constituents"; - - my $constituents = request(GET $url, - Accept => 'application/json', - ); - - ok($constituents->is_success, "Getting the constituent builds"); - - my $data; - my $valid_json = lives { $data = decode_json($constituents->content); }; - ok($valid_json, "We get back valid JSON."); - if (!$valid_json) { - use Data::Dumper; - print STDERR Dumper $constituents->content; - } - - my ($buildA) = grep { $_->{nixname} eq "empty-dir-a" } @$data; - my ($buildB) = grep { $_->{nixname} eq "empty-dir-b" } @$data; - - is($buildA->{job}, "a"); - is($buildB->{job}, "b"); -}; - done_testing; diff --git a/t/evaluator/evaluate-constituents-broken.t b/t/evaluator/evaluate-constituents-broken.t deleted file mode 100644 index 4014f09f..00000000 --- a/t/evaluator/evaluate-constituents-broken.t +++ /dev/null @@ -1,32 +0,0 @@ -use strict; -use warnings; -use Setup; -use Test2::V0; -use Hydra::Helper::Exec; - -my $ctx = test_context(); - -my $jobsetCtx = $ctx->makeJobset( - expression => 'constituents-broken.nix', -); -my $jobset = $jobsetCtx->{"jobset"}; - -my ($res, $stdout, $stderr) = captureStdoutStderr(60, - ("hydra-eval-jobset", $jobsetCtx->{"project"}->name, $jobset->name) -); -isnt($res, 0, "hydra-eval-jobset exits non-zero"); -ok(utf8::decode($stderr), "Stderr output is UTF8-clean"); -like( - $stderr, - qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/, - "The stderr record includes a relevant error message" -); - -$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB -like( - $jobset->errormsg, - qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/, - "The jobset records a relevant error message" -); - -done_testing; diff --git a/t/evaluator/evaluate-constituents-gc.t b/t/evaluator/evaluate-constituents-gc.t deleted file mode 100644 index a9b23e6c..00000000 --- a/t/evaluator/evaluate-constituents-gc.t +++ /dev/null @@ -1,20 +0,0 @@ -use strict; -use warnings; -use Setup; -use Test2::V0; - -my $ctx = test_context(); - -my $builds = $ctx->makeAndEvaluateJobset( - expression => 'constituents.nix', -); - -my $constituentA = $builds->{"constituentA"}; -my $directAggregate = $builds->{"direct_aggregate"}; -my $indirectAggregate = $builds->{"indirect_aggregate"}; - -is(system('nix-store', '--delete', $constituentA->drvpath), 256, "Deleting a constituent derivation fails"); -is(system('nix-store', '--delete', $directAggregate->drvpath), 256, "Deleting the direct aggregate derivation fails"); -is(system('nix-store', '--delete', $indirectAggregate->drvpath), 256, "Deleting the indirect aggregate derivation fails"); - -done_testing; diff --git a/t/queue-runner/constituents.t b/t/queue-runner/constituents.t deleted file mode 100644 index c6333642..00000000 --- a/t/queue-runner/constituents.t +++ /dev/null @@ -1,32 +0,0 @@ -use feature 'unicode_strings'; -use strict; -use warnings; -use Setup; - -my %ctx = test_init(); - -require Hydra::Schema; -require Hydra::Model::DB; - -use Test2::V0; - -my $db = Hydra::Model::DB->new; -hydra_setup($db); - -my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"}); - -my $jobset = createBaseJobset("broken-constituent", "broken-constituent.nix", $ctx{jobsdir}); - -ok(evalSucceeds($jobset), "Evaluating jobs/broken-constituent.nix should exit with return code 0"); -is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/broken-constituent.nix should not queue any builds"); - -like( - $jobset->errormsg, - qr/^does-not-exist: does not exist$/m, - "Evaluating jobs/broken-constituent.nix should log an error for does-not-exist"); -like( - $jobset->errormsg, - qr/^does-not-evaluate: error: assertion 'false' failed$/m, - "Evaluating jobs/broken-constituent.nix should log an error for does-not-evaluate"); - -done_testing; diff --git a/t/queue-runner/direct-indirect-constituents.t b/t/queue-runner/direct-indirect-constituents.t deleted file mode 100644 index a017c76f..00000000 --- a/t/queue-runner/direct-indirect-constituents.t +++ /dev/null @@ -1,35 +0,0 @@ -use strict; -use warnings; -use Setup; -use Test2::V0; - -my $ctx = test_context(); - -my $builds = $ctx->makeAndEvaluateJobset( - expression => 'constituents.nix', -); - -my $constituentBuildA = $builds->{"constituentA"}; -my $constituentBuildB = $builds->{"constituentB"}; - -my $eval = $constituentBuildA->jobsetevals->first(); -is($eval->evaluationerror->has_error, 0); - -subtest "Verifying the direct aggregate" => sub { - my $aggBuild = $builds->{"direct_aggregate"}; - is($aggBuild->constituents->first()->id, $constituentBuildA->id, "The ID of the constituent is correct"); -}; - -subtest "Verifying the indirect aggregate" => sub { - my $indirectBuild = $builds->{"indirect_aggregate"}; - is($indirectBuild->constituents->first()->id, $constituentBuildA->id, "The ID of the constituent is correct"); -}; - -subtest "Verifying a mix of direct and indirect aggregate references" => sub { - my $mixedBuild = $builds->{"mixed_aggregate"}; - my ($constituentA, $constituentB) = $mixedBuild->constituents()->search({}, {order_by => { -asc => "job"} }); - is($constituentA->id, $constituentBuildA->id, "The ID of the constituent is correct"); - is($constituentB->id, $constituentBuildB->id, "The ID of the constituent is correct"); -}; - -done_testing; -- 2.44.1 From b0e9b4b2f99f9d8f5c4e780e89f955c394b5ced4 Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Wed, 17 Jul 2024 12:05:41 +0200 Subject: [PATCH 7/7] hydra-eval-jobset: incrementally ingest eval results nix-eval-jobs streams output, unlike hydra-eval-jobs. Now that we've migrated, we can use this to: 1. Use less RAM by avoiding buffering a whole eval's worth of metadata into a Perl string and an array of JSON objects. 2. Make evals latency a bit lower by allowing the queue runner to start ingesting builds faster. --- src/script/hydra-eval-jobset | 95 +++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 459f1b4c..9dd165a5 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -17,6 +17,7 @@ use Hydra::Helper::Nix; use Hydra::Model::DB; use Hydra::Plugin; use Hydra::Schema; +use IPC::Run; use JSON::MaybeXS; use Net::Statsd; use Nix::Store; @@ -383,23 +384,33 @@ sub evalJobs { print STDERR "evaluator: @escaped\n"; } - (my $res, my $jobsJSONLines, my $stderr) = captureStdoutStderr(21600, @cmd); - die "nix-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8)) - . ":\n" . ($stderr ? decode("utf-8", $stderr) : "(no output)\n") - if $res; + my $h = IPC::Run::start \@cmd, + '>', IPC::Run::new_chunker, \my $out, + '2>', \my $err; - print STDERR "$stderr"; + return sub { + while (1) { + $h->pump; + if (!defined $out && !defined $err) { + $h->finish; + if ($?) { + die "nix-eval-jobs returned " . ($? & 127 ? "signal $?" : "exit code " . ($? >> 8)) . "\n"; + } + return; + } - # XXX: take advantage of nix-eval-jobs's streaming instead of parsing everything in one block at - # the end. - my @jobs; - foreach my $line (split(/\n/, $jobsJSONLines)) { - last if $line eq ""; + if (defined $err) { + print STDERR "$err"; + undef $err; + } - push(@jobs, decode_json($line)); + if (defined $out && $out ne '') { + my $job = decode_json($out); + undef $out; + return $job; + } + } }; - - return @jobs; } @@ -716,17 +727,11 @@ sub checkJobsetWrapped { # Evaluate the job expression. my $evalStart = clock_gettime(CLOCK_MONOTONIC); - my @jobs = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef); - my $evalStop = clock_gettime(CLOCK_MONOTONIC); - - if ($jobsetsJobset) { - die "The .jobsets jobset must only have a single job named 'jobsets'" - unless (scalar @jobs) == 1 && $jobs[0]->{attr} eq "jobsets"; - } - Net::Statsd::timing("hydra.evaluator.eval_time", int(($evalStop - $evalStart) * 1000)); + my $evalStop; + my $jobsIter = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef); if ($dryRun) { - foreach my $job (@jobs) { + while (defined(my $job = $jobsIter->())) { my $name = $job->{attr}; if (defined $job->{drvPath}) { print STDERR "good job $name: $job->{drvPath}\n"; @@ -737,31 +742,20 @@ sub checkJobsetWrapped { return; } - my $jobOutPathMap = {}; - my $jobsetChanged = 0; - my $dbStart = clock_gettime(CLOCK_MONOTONIC); - - # Store the error messages for jobs that failed to evaluate. my $evaluationErrorTime = time; my $evaluationErrorMsg = ""; - foreach my $job (@jobs) { - next unless defined $job->{error}; - $evaluationErrorMsg .= - ($job->{attr} ne "" ? "in job ‘$job->{attr}’" : "at top-level") . - ":\n" . $job->{error} . "\n\n"; - } - setJobsetError($jobset, $evaluationErrorMsg, $evaluationErrorTime); - my $evaluationErrorRecord = $db->resultset('EvaluationErrors')->create( { errormsg => $evaluationErrorMsg , errortime => $evaluationErrorTime } ); + my $jobOutPathMap = {}; + my $jobsetChanged = 0; my %buildMap; - $db->txn_do(sub { + $db->txn_do(sub { my $prevEval = getPrevJobsetEval($db, $jobset, 1); # Clear the "current" flag on all builds. Since we're in a @@ -774,7 +768,7 @@ sub checkJobsetWrapped { , evaluationerror => $evaluationErrorRecord , timestamp => time , checkouttime => abs(int($checkoutStop - $checkoutStart)) - , evaltime => abs(int($evalStop - $evalStart)) + , evaltime => 0 , hasnewbuilds => 0 , nrbuilds => 0 , flake => $flakeRef @@ -782,11 +776,18 @@ sub checkJobsetWrapped { , nixexprpath => $jobset->nixexprpath }); - # Schedule each successfully evaluated job. - foreach my $job (permute(@jobs)) { - next if defined $job->{error}; - #print STDERR "considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n"; - checkBuild($db, $jobset, $ev, $inputInfo, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins); + while (defined(my $job = $jobsIter->())) { + if ($jobsetsJobset) { + die "The .jobsets jobset must only have a single job named 'jobsets'" + unless $job->{attr} eq "jobsets"; + } + + $evaluationErrorMsg .= + ($job->{attr} ne "" ? "in job ‘$job->{attr}’" : "at top-level") . + ":\n" . $job->{error} . "\n\n" if defined $job->{error}; + + checkBuild($db, $jobset, $ev, $inputInfo, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins) + unless defined $job->{error}; } # Have any builds been added or removed since last time? @@ -846,11 +847,15 @@ sub checkJobsetWrapped { $jobset->update({ enabled => 0 }) if $jobset->enabled == 2; $jobset->update({ lastcheckedtime => time, forceeval => undef }); + + $evaluationErrorRecord->update({ errormsg => $evaluationErrorMsg }); + setJobsetError($jobset, $evaluationErrorMsg, $evaluationErrorTime); + + $evalStop = clock_gettime(CLOCK_MONOTONIC); + $ev->update({ evaltime => abs(int($evalStop - $evalStart)) }); }); - my $dbStop = clock_gettime(CLOCK_MONOTONIC); - - Net::Statsd::timing("hydra.evaluator.db_time", int(($dbStop - $dbStart) * 1000)); + Net::Statsd::timing("hydra.evaluator.eval_time", int(($evalStop - $evalStart) * 1000)); Net::Statsd::increment("hydra.evaluator.evals"); Net::Statsd::increment("hydra.evaluator.cached_evals") unless $jobsetChanged; } -- 2.44.1