From b6ea85a601ddac9cb0716d8cb4d446439fa0778f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josef=20Kemetm=C3=BCller?= Date: Fri, 27 May 2022 11:40:49 +0200 Subject: [PATCH 01/32] scmdiff: Hardcode `--git-dir` The newest version of git refuses to work on repositories not owned by the current user. This leads to issues with the /api/scmdiff endpoint: May 27 11:16:05 myhydra hydra-server[923698]: fatal: unsafe repository ('/var/lib/hydra/scm/git/57ea036ec7ecd85c8dd085e02ecc6f12dd5c079a6203d16aea49f586cadfb2be' is owned by someone else) May 27 11:16:05 myhydra hydra-server[923698]: To add an exception for this directory, call: May 27 11:16:05 myhydra hydra-server[923698]: git config --global --add safe.directory /var/lib/hydra/scm/git/57ea036ec7ecd85c8dd085e02ecc6f12dd5c079a6203d16aea49f586cadfb2be May 27 11:16:05 myhydra hydra-server[923701]: warning: Not a git repository. Use --no-index to compare two paths outside a working tree May 27 11:16:05 myhydra hydra-server[923701]: usage: git diff --no-index [] I used the same solution that was used in NixOS/nix#6440. Fixes #1214 --- src/lib/Hydra/Controller/API.pm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 6f10ef57..8ebed599 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) { } elsif ($type eq "git") { my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); die if ! -d $clonePath; - $diff .= `(cd $clonePath; git log $rev1..$rev2)`; - $diff .= `(cd $clonePath; git diff $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`; + $diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`; } $c->stash->{'plain'} = { data => (scalar $diff) || " " }; From 70ad3a924affd56d57cdced7b31c5b36a7181701 Mon Sep 17 00:00:00 2001 From: Casey Ransom Date: Wed, 26 Oct 2022 16:13:40 -0400 Subject: [PATCH 02/32] exit with error if database connectivity lost There's currently no automatic recovery for disconnected databases in the evaluator. This means if the database is ever temporarily unavailable, hydra-evaluator will sit and spin with no work accomplished. If this condition is caught, the daemon will exit and systemd will be responsible for resuming the service. --- src/hydra-evaluator/hydra-evaluator.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/hydra-evaluator/hydra-evaluator.cc b/src/hydra-evaluator/hydra-evaluator.cc index 2d7e68d9..a1ccf047 100644 --- a/src/hydra-evaluator/hydra-evaluator.cc +++ b/src/hydra-evaluator/hydra-evaluator.cc @@ -366,6 +366,9 @@ struct Evaluator printInfo("received jobset event"); } + } catch (pqxx::broken_connection & e) { + printError("Database connection broken: %s", e.what()); + std::_Exit(1); } catch (std::exception & e) { printError("exception in database monitor thread: %s", e.what()); sleep(30); @@ -473,6 +476,9 @@ struct Evaluator while (true) { try { loop(); + } catch (pqxx::broken_connection & e) { + printError("Database connection broken: %s", e.what()); + std::_Exit(1); } catch (std::exception & e) { printError("exception in main loop: %s", e.what()); sleep(30); From d1d171ee9010be514bb7b95866ac275213ed4286 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20Hamb=C3=BCchen?= Date: Wed, 2 Nov 2022 17:30:32 +0100 Subject: [PATCH 03/32] renderInputDiff: Increase git hash length 6 -> 8 Nixpkgs has so many commits that length 6 is often ambiguous, making the use of the shown values with git difficult. --- src/root/common.tt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/root/common.tt b/src/root/common.tt index 32d6c8cc..4487cbe3 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -374,7 +374,7 @@ BLOCK renderInputDiff; %] [% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %] [% IF bi1.type == "git" %] - [% bi1.name %][% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %] + [% bi1.name %][% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %] [% ELSE %] From a8b89420bc2004b21652ec4de7faecc843faeb73 Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Wed, 2 Nov 2022 11:19:33 +0100 Subject: [PATCH 04/32] Enable aarch64 support --- flake.nix | 63 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/flake.nix b/flake.nix index 2e413277..cd9f094d 100644 --- a/flake.nix +++ b/flake.nix @@ -6,13 +6,15 @@ outputs = { self, nixpkgs, nix }: let - version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}"; - pkgs = import nixpkgs { - system = "x86_64-linux"; + systems = [ "x86_64-linux" "aarch64-linux" ]; + forEachSystem = nixpkgs.lib.genAttrs systems; + + pkgsBySystem = forEachSystem (system: import nixpkgs { + inherit system; overlays = [ self.overlays.default nix.overlays.default ]; - }; + }); # NixOS configuration used for VM tests. hydraServer = @@ -254,9 +256,10 @@ hydraJobs = { - build.x86_64-linux = packages.x86_64-linux.hydra; + build = forEachSystem (system: packages.${system}.hydra); - manual = + manual = forEachSystem (system: + let pkgs = pkgsBySystem.${system}; in pkgs.runCommand "hydra-manual-${version}" { } '' mkdir -p $out/share @@ -264,10 +267,10 @@ mkdir $out/nix-support echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products - ''; + ''); - tests.install.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; + tests.install = forEachSystem (system: + with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; simpleTest { nodes.machine = hydraServer; testScript = @@ -279,10 +282,11 @@ machine.wait_for_open_port("3000") machine.succeed("curl --fail http://localhost:3000/") ''; - }; + }); - tests.notifications.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; + tests.notifications = forEachSystem (system: + let pkgs = pkgsBySystem.${system}; in + with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; simpleTest { nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; @@ -336,10 +340,11 @@ + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success" ) ''; - }; + }); - tests.gitea.x86_64-linux = - with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; + tests.gitea = forEachSystem (system: + let pkgs = pkgsBySystem.${system}; in + with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; makeTest { nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; @@ -352,7 +357,7 @@ distributedBuilds = true; buildMachines = [{ hostName = "localhost"; - systems = [ "x86_64-linux" ]; + systems = [ system ]; }]; binaryCaches = [ ]; }; @@ -467,7 +472,7 @@ smallDrv = pkgs.writeText "jobset.nix" '' { trivial = builtins.derivation { name = "trivial"; - system = "x86_64-linux"; + system = "${system}"; builder = "/bin/sh"; allowSubstitutes = false; preferLocalBuild = true; @@ -531,31 +536,37 @@ machine.shutdown() ''; - }; + }); - tests.validate-openapi = pkgs.runCommand "validate-openapi" + tests.validate-openapi = forEachSystem (system: + let pkgs = pkgsBySystem.${system}; in + pkgs.runCommand "validate-openapi" { buildInputs = [ pkgs.openapi-generator-cli ]; } '' openapi-generator-cli validate -i ${./hydra-api.yaml} touch $out - ''; + ''); container = nixosConfigurations.container.config.system.build.toplevel; }; - checks.x86_64-linux.build = hydraJobs.build.x86_64-linux; - checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux; - checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi; + checks = forEachSystem (system: { + build = hydraJobs.build.${system}; + install = hydraJobs.tests.install.${system}; + validate-openapi = hydraJobs.tests.validate-openapi.${system}; + }); - packages.x86_64-linux.hydra = pkgs.hydra; - packages.x86_64-linux.default = pkgs.hydra; + packages = forEachSystem (system: { + hydra = pkgsBySystem.${system}.hydra; + default = pkgsBySystem.${system}.hydra; + }); nixosModules.hydra = { imports = [ ./hydra-module.nix ]; nixpkgs.overlays = [ self.overlays.default nix.overlays.default ]; }; - nixosModules.hydraTest = { + nixosModules.hydraTest = { pkgs, ... }: { imports = [ self.nixosModules.hydra ]; services.hydra-dev.enable = true; From fd765bc97a321d45b56449ccabb1a7eca1553597 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Tue, 22 Nov 2022 20:50:39 +0100 Subject: [PATCH 05/32] Fix "My Jobs" tab in user dashboard Nowadays `Builds` doesn't reference `Project` directly anymore. This means that simply resolving both `jobset` and `project` with a single JOIN from `Builds` doesn't work anymore. Instead we need to resolve the relation to `jobset` first and then the relation to `project`. For similar fixes see e.g. c7c47596009687d1652522c556333cefce28ec51. --- src/lib/Hydra/Controller/User.pm | 2 +- t/Hydra/Controller/User/dashboard.t | 30 +++++++++++++++++++++++++++++ t/jobs/basic.nix | 2 ++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 t/Hydra/Controller/User/dashboard.t diff --git a/src/lib/Hydra/Controller/User.pm b/src/lib/Hydra/Controller/User.pm index 2a8affae..9e7d96e5 100644 --- a/src/lib/Hydra/Controller/User.pm +++ b/src/lib/Hydra/Controller/User.pm @@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) { , "jobset.enabled" => 1 }, { order_by => ["project", "jobset", "job"] - , join => ["project", "jobset"] + , join => {"jobset" => "project"} })]; } diff --git a/t/Hydra/Controller/User/dashboard.t b/t/Hydra/Controller/User/dashboard.t new file mode 100644 index 00000000..8a24585d --- /dev/null +++ b/t/Hydra/Controller/User/dashboard.t @@ -0,0 +1,30 @@ +use strict; +use warnings; +use Setup; +my $ctx = test_context(); +use HTTP::Request::Common; +use Test2::V0; +use Catalyst::Test (); +Catalyst::Test->import('Hydra'); +require Hydra::Schema; +require Hydra::Model::DB; +my $db = $ctx->db(); +my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' }); +$user->setPassword('foobar'); +my $builds = $ctx->makeAndEvaluateJobset( + expression => "basic.nix", + build => 1 +); +my $login = request(POST '/login', Referer => 'http://localhost', Content => { + username => 'alice', + password => 'foobar', + }); +is($login->code, 302); +my $cookie = $login->header("set-cookie"); +my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie); +ok($my_jobs->is_success); +my $content = $my_jobs->content(); +ok($content =~ /empty_dir/); +ok(!($content =~ /fails/)); +ok(!($content =~ /succeed_with_failed/)); +done_testing; diff --git a/t/jobs/basic.nix b/t/jobs/basic.nix index 2b76ab5d..9e207f80 100644 --- a/t/jobs/basic.nix +++ b/t/jobs/basic.nix @@ -4,6 +4,8 @@ with import ./config.nix; mkDerivation { name = "empty-dir"; builder = ./empty-dir-builder.sh; + meta.maintainers = [ "alice@invalid.org" ]; + meta.outPath = "${placeholder "out"}"; }; fails = From 213879484d2d671eeb417f9c4fe488b8b83e5ce8 Mon Sep 17 00:00:00 2001 From: Sandro Date: Mon, 5 Dec 2022 00:22:35 +0100 Subject: [PATCH 06/32] Fix example config --- doc/manual/src/configuration.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index ab68df43..e06dd35f 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -179,6 +179,7 @@ Example configuration: deref = always + # Make all users in the hydra_admin group Hydra admins From 7f816e3237ef1446ea43b4e0f4cbe1e0d53a3656 Mon Sep 17 00:00:00 2001 From: Sandro Date: Mon, 5 Dec 2022 00:35:05 +0100 Subject: [PATCH 07/32] Fix link --- doc/manual/src/configuration.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/configuration.md b/doc/manual/src/configuration.md index e06dd35f..02210449 100644 --- a/doc/manual/src/configuration.md +++ b/doc/manual/src/configuration.md @@ -131,8 +131,8 @@ use LDAP to manage roles and users. This is configured by defining the `` block in the configuration file. In this block it's possible to configure the authentication plugin in the `` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`. -The documentation for the available settings can be found [here] -(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). +The documentation for the available settings can be found +[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). Note that the bind password (if needed) should be supplied as an included file to prevent it from leaking to the Nix store. From ad99d3366f8a0088782904fc66958fdf614c12db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Josef=20Kemetm=C3=BCller?= Date: Thu, 29 Dec 2022 22:26:59 +0100 Subject: [PATCH 08/32] Fix MIME types when serving .js and .css To correctly render HTML reports we make sure to return the following MIME types instead of "text/plain" - *.css: "text/css" - *.js: "application/javascript" Fixes: #1267 --- src/lib/Hydra/Controller/Build.pm | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index 18a0eba3..2d74f86a 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -238,9 +238,17 @@ sub serveFile { "store", "cat", "--store", getStoreUri(), "$path"]) }; # Detect MIME type. - state $magic = File::LibMagic->new(follow_symlinks => 1); - my $info = $magic->info_from_filename($path); - my $type = $info->{mime_with_encoding}; + my $type = "text/plain"; + if ($path =~ /.*\.(\S{1,})$/xms) { + my $ext = $1; + my $mimeTypes = MIME::Types->new(only_complete => 1); + my $t = $mimeTypes->mimeTypeOf($ext); + $type = ref $t ? $t->type : $t if $t; + } else { + state $magic = File::LibMagic->new(follow_symlinks => 1); + my $info = $magic->info_from_filename($path); + $type = $info->{mime_with_encoding}; + } $c->response->content_type($type); $c->forward('Hydra::View::Plain'); } From 96e36201ebb7748d64f895947d198b370968edd0 Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Tue, 29 Nov 2022 18:13:15 +0100 Subject: [PATCH 09/32] hydra-queue-runner: adapt to nlohmann::json --- src/hydra-queue-runner/hydra-queue-runner.cc | 249 +++++++++---------- 1 file changed, 114 insertions(+), 135 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 723bf223..b16fd770 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -8,6 +8,8 @@ #include +#include + #include "state.hh" #include "hydra-build-result.hh" #include "store-api.hh" @@ -15,20 +17,11 @@ #include "globals.hh" #include "hydra-config.hh" -#include "json.hh" #include "s3-binary-cache-store.hh" #include "shared.hh" using namespace nix; - - -namespace nix { - -template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; } -template<> void toJSON>(std::ostream & str, const std::atomic & n) { str << n; } -template<> void toJSON(std::ostream & str, const double & n) { str << n; } - -} +using nlohmann::json; std::string getEnvOrDie(const std::string & key) @@ -542,181 +535,167 @@ std::shared_ptr State::acquireGlobalLock() void State::dumpStatus(Connection & conn) { - std::ostringstream out; - + auto root = json::object(); { - JSONObject root(out); time_t now = time(0); - root.attr("status", "up"); - root.attr("time", time(0)); - root.attr("uptime", now - startedAt); - root.attr("pid", getpid()); + root["status"] = "up"; + root["time"] = time(0); + root["uptime"] = now - startedAt; + root["pid"] = getpid(); { auto builds_(builds.lock()); - root.attr("nrQueuedBuilds", builds_->size()); + root["nrQueuedBuilds"] = builds_->size(); } { auto steps_(steps.lock()); for (auto i = steps_->begin(); i != steps_->end(); ) if (i->second.lock()) ++i; else i = steps_->erase(i); - root.attr("nrUnfinishedSteps", steps_->size()); + root["nrUnfinishedSteps"] = steps_->size(); } { auto runnable_(runnable.lock()); for (auto i = runnable_->begin(); i != runnable_->end(); ) if (i->lock()) ++i; else i = runnable_->erase(i); - root.attr("nrRunnableSteps", runnable_->size()); + root["nrRunnableSteps"] = runnable_->size(); } - root.attr("nrActiveSteps", activeSteps_.lock()->size()); - root.attr("nrStepsBuilding", nrStepsBuilding); - root.attr("nrStepsCopyingTo", nrStepsCopyingTo); - root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom); - root.attr("nrStepsWaiting", nrStepsWaiting); - root.attr("nrUnsupportedSteps", nrUnsupportedSteps); - root.attr("bytesSent", bytesSent); - root.attr("bytesReceived", bytesReceived); - root.attr("nrBuildsRead", nrBuildsRead); - root.attr("buildReadTimeMs", buildReadTimeMs); - root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead); - root.attr("nrBuildsDone", nrBuildsDone); - root.attr("nrStepsStarted", nrStepsStarted); - root.attr("nrStepsDone", nrStepsDone); - root.attr("nrRetries", nrRetries); - root.attr("maxNrRetries", maxNrRetries); + root["nrActiveSteps"] = activeSteps_.lock()->size(); + root["nrStepsBuilding"] = nrStepsBuilding.load(); + root["nrStepsCopyingTo"] = nrStepsCopyingTo.load(); + root["nrStepsCopyingFrom"] = nrStepsCopyingFrom.load(); + root["nrStepsWaiting"] = nrStepsWaiting.load(); + root["nrUnsupportedSteps"] = nrUnsupportedSteps.load(); + root["bytesSent"] = bytesSent.load(); + root["bytesReceived"] = bytesReceived.load(); + root["nrBuildsRead"] = nrBuildsRead.load(); + root["buildReadTimeMs"] = buildReadTimeMs.load(); + root["buildReadTimeAvgMs"] = nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead; + root["nrBuildsDone"] = nrBuildsDone.load(); + root["nrStepsStarted"] = nrStepsStarted.load(); + root["nrStepsDone"] = nrStepsDone.load(); + root["nrRetries"] = nrRetries.load(); + root["maxNrRetries"] = maxNrRetries.load(); if (nrStepsDone) { - root.attr("totalStepTime", totalStepTime); - root.attr("totalStepBuildTime", totalStepBuildTime); - root.attr("avgStepTime", (float) totalStepTime / nrStepsDone); - root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone); + root["totalStepTime"] = totalStepTime.load(); + root["totalStepBuildTime"] = totalStepBuildTime.load(); + root["avgStepTime"] = (float) totalStepTime / nrStepsDone; + root["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone; } - root.attr("nrQueueWakeups", nrQueueWakeups); - root.attr("nrDispatcherWakeups", nrDispatcherWakeups); - root.attr("dispatchTimeMs", dispatchTimeMs); - root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups); - root.attr("nrDbConnections", dbPool.count()); - root.attr("nrActiveDbUpdates", nrActiveDbUpdates); + root["nrQueueWakeups"] = nrQueueWakeups.load(); + root["nrDispatcherWakeups"] = nrDispatcherWakeups.load(); + root["dispatchTimeMs"] = dispatchTimeMs.load(); + root["dispatchTimeAvgMs"] = nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups; + root["nrDbConnections"] = dbPool.count(); + root["nrActiveDbUpdates"] = nrActiveDbUpdates.load(); { - auto nested = root.object("machines"); + auto nested = root["machines"]; auto machines_(machines.lock()); for (auto & i : *machines_) { auto & m(i.second); auto & s(m->state); - auto nested2 = nested.object(m->sshName); - nested2.attr("enabled", m->enabled); - - { - auto list = nested2.list("systemTypes"); - for (auto & s : m->systemTypes) - list.elem(s); - } - - { - auto list = nested2.list("supportedFeatures"); - for (auto & s : m->supportedFeatures) - list.elem(s); - } - - { - auto list = nested2.list("mandatoryFeatures"); - for (auto & s : m->mandatoryFeatures) - list.elem(s); - } - - nested2.attr("currentJobs", s->currentJobs); - if (s->currentJobs == 0) - nested2.attr("idleSince", s->idleSince); - nested2.attr("nrStepsDone", s->nrStepsDone); - if (m->state->nrStepsDone) { - nested2.attr("totalStepTime", s->totalStepTime); - nested2.attr("totalStepBuildTime", s->totalStepBuildTime); - nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone); - nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone); - } - auto info(m->state->connectInfo.lock()); - nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)); - nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)); - nested2.attr("consecutiveFailures", info->consecutiveFailures); + auto machine = nested[m->sshName] = { + {"enabled", m->enabled}, + {"systemTypes", m->systemTypes}, + {"supportedFeatures", m->supportedFeatures}, + {"mandatoryFeatures", m->mandatoryFeatures}, + {"nrStepsDone", s->nrStepsDone.load()}, + {"currentJobs", s->currentJobs.load()}, + {"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)}, + {"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)}, + {"consecutiveFailures", info->consecutiveFailures}, + }; + + if (s->currentJobs == 0) + machine["idleSince"] = s->idleSince.load(); + if (m->state->nrStepsDone) { + machine["totalStepTime"] = s->totalStepTime.load(); + machine["totalStepBuildTime"] = s->totalStepBuildTime.load(); + machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone; + machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone; + } } } { - auto nested = root.object("jobsets"); + auto jobsets_json = root["jobsets"]; auto jobsets_(jobsets.lock()); for (auto & jobset : *jobsets_) { - auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second); - nested2.attr("shareUsed", jobset.second->shareUsed()); - nested2.attr("seconds", jobset.second->getSeconds()); + jobsets_json[jobset.first.first + ":" + jobset.first.second] = { + {"shareUsed", jobset.second->shareUsed()}, + {"seconds", jobset.second->getSeconds()}, + }; } } { - auto nested = root.object("machineTypes"); + auto machineTypesJson = root["machineTypes"]; auto machineTypes_(machineTypes.lock()); for (auto & i : *machineTypes_) { - auto nested2 = nested.object(i.first); - nested2.attr("runnable", i.second.runnable); - nested2.attr("running", i.second.running); + auto machineTypeJson = machineTypesJson[i.first] = { + {"runnable", i.second.runnable}, + {"running", i.second.running}, + }; if (i.second.runnable > 0) - nested2.attr("waitTime", i.second.waitTime.count() + - i.second.runnable * (time(0) - lastDispatcherCheck)); + machineTypeJson["waitTime"] = i.second.waitTime.count() + + i.second.runnable * (time(0) - lastDispatcherCheck); if (i.second.running == 0) - nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive)); + machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive); } } auto store = getDestStore(); - auto nested = root.object("store"); - auto & stats = store->getStats(); - nested.attr("narInfoRead", stats.narInfoRead); - nested.attr("narInfoReadAverted", stats.narInfoReadAverted); - nested.attr("narInfoMissing", stats.narInfoMissing); - nested.attr("narInfoWrite", stats.narInfoWrite); - nested.attr("narInfoCacheSize", stats.pathInfoCacheSize); - nested.attr("narRead", stats.narRead); - nested.attr("narReadBytes", stats.narReadBytes); - nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); - nested.attr("narWrite", stats.narWrite); - nested.attr("narWriteAverted", stats.narWriteAverted); - nested.attr("narWriteBytes", stats.narWriteBytes); - nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); - nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); - nested.attr("narCompressionSavings", - stats.narWriteBytes - ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes - : 0.0); - nested.attr("narCompressionSpeed", // MiB/s + root["store"] = { + {"narInfoRead", stats.narInfoRead.load()}, + {"narInfoReadAverted", stats.narInfoReadAverted.load()}, + {"narInfoMissing", stats.narInfoMissing.load()}, + {"narInfoWrite", stats.narInfoWrite.load()}, + {"narInfoCacheSize", stats.pathInfoCacheSize.load()}, + {"narRead", stats.narRead.load()}, + {"narReadBytes", stats.narReadBytes.load()}, + {"narReadCompressedBytes", stats.narReadCompressedBytes.load()}, + {"narWrite", stats.narWrite.load()}, + {"narWriteAverted", stats.narWriteAverted.load()}, + {"narWriteBytes", stats.narWriteBytes.load()}, + {"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()}, + {"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()}, + {"narCompressionSavings", + stats.narWriteBytes + ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes + : 0.0}, + {"narCompressionSpeed", // MiB/s stats.narWriteCompressionTimeMs ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); + : 0.0}, + }; auto s3Store = dynamic_cast(&*store); if (s3Store) { - auto nested2 = nested.object("s3"); auto & s3Stats = s3Store->getS3Stats(); - nested2.attr("put", s3Stats.put); - nested2.attr("putBytes", s3Stats.putBytes); - nested2.attr("putTimeMs", s3Stats.putTimeMs); - nested2.attr("putSpeed", - s3Stats.putTimeMs - ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("get", s3Stats.get); - nested2.attr("getBytes", s3Stats.getBytes); - nested2.attr("getTimeMs", s3Stats.getTimeMs); - nested2.attr("getSpeed", - s3Stats.getTimeMs - ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("head", s3Stats.head); - nested2.attr("costDollarApprox", - (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 - + s3Stats.put / 1000.0 * 0.005 + - + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); + auto jsonS3 = root["s3"] = { + {"put", s3Stats.put.load()}, + {"putBytes", s3Stats.putBytes.load()}, + {"putTimeMs", s3Stats.putTimeMs.load()}, + {"putSpeed", + s3Stats.putTimeMs + ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0}, + {"get", s3Stats.get.load()}, + {"getBytes", s3Stats.getBytes.load()}, + {"getTimeMs", s3Stats.getTimeMs.load()}, + {"getSpeed", + s3Stats.getTimeMs + ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0}, + {"head", s3Stats.head.load()}, + {"costDollarApprox", + (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 + + s3Stats.put / 1000.0 * 0.005 + + + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09}, + }; } } @@ -725,7 +704,7 @@ void State::dumpStatus(Connection & conn) pqxx::work txn(conn); // FIXME: use PostgreSQL 9.5 upsert. txn.exec("delete from SystemStatus where what = 'queue-runner'"); - txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str()); + txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", root.dump()); txn.exec("notify status_dumped"); txn.commit(); } From 5b35e1389885cf4c5d1058dc60017564b58e7e4c Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Sat, 28 Jan 2023 12:58:41 +0100 Subject: [PATCH 10/32] hydra-queue-runner: use initializer lists for constructing JSON And also fix the parts that were broken --- src/hydra-queue-runner/hydra-queue-runner.cc | 89 ++++++++++---------- 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index b16fd770..b84681d5 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -535,67 +535,65 @@ std::shared_ptr State::acquireGlobalLock() void State::dumpStatus(Connection & conn) { - auto root = json::object(); + time_t now = time(0); + json statusJson = { + {"status", "up"}, + {"time", time(0)}, + {"uptime", now - startedAt}, + {"pid", getpid()}, + + {"nrQueuedBuilds", builds.lock()->size()}, + {"nrActiveSteps", activeSteps_.lock()->size()}, + {"nrStepsBuilding", nrStepsBuilding.load()}, + {"nrStepsCopyingTo", nrStepsCopyingTo.load()}, + {"nrStepsCopyingFrom", nrStepsCopyingFrom.load()}, + {"nrStepsWaiting", nrStepsWaiting.load()}, + {"nrUnsupportedSteps", nrUnsupportedSteps.load()}, + {"bytesSent", bytesSent.load()}, + {"bytesReceived", bytesReceived.load()}, + {"nrBuildsRead", nrBuildsRead.load()}, + {"buildReadTimeMs", buildReadTimeMs.load()}, + {"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead}, + {"nrBuildsDone", nrBuildsDone.load()}, + {"nrStepsStarted", nrStepsStarted.load()}, + {"nrStepsDone", nrStepsDone.load()}, + {"nrRetries", nrRetries.load()}, + {"maxNrRetries", maxNrRetries.load()}, + {"nrQueueWakeups", nrQueueWakeups.load()}, + {"nrDispatcherWakeups", nrDispatcherWakeups.load()}, + {"dispatchTimeMs", dispatchTimeMs.load()}, + {"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups}, + {"nrDbConnections", dbPool.count()}, + {"nrActiveDbUpdates", nrActiveDbUpdates.load()}, + }; { - time_t now = time(0); - root["status"] = "up"; - root["time"] = time(0); - root["uptime"] = now - startedAt; - root["pid"] = getpid(); - { - auto builds_(builds.lock()); - root["nrQueuedBuilds"] = builds_->size(); - } { auto steps_(steps.lock()); for (auto i = steps_->begin(); i != steps_->end(); ) if (i->second.lock()) ++i; else i = steps_->erase(i); - root["nrUnfinishedSteps"] = steps_->size(); + statusJson["nrUnfinishedSteps"] = steps_->size(); } { auto runnable_(runnable.lock()); for (auto i = runnable_->begin(); i != runnable_->end(); ) if (i->lock()) ++i; else i = runnable_->erase(i); - root["nrRunnableSteps"] = runnable_->size(); + statusJson["nrRunnableSteps"] = runnable_->size(); } - root["nrActiveSteps"] = activeSteps_.lock()->size(); - root["nrStepsBuilding"] = nrStepsBuilding.load(); - root["nrStepsCopyingTo"] = nrStepsCopyingTo.load(); - root["nrStepsCopyingFrom"] = nrStepsCopyingFrom.load(); - root["nrStepsWaiting"] = nrStepsWaiting.load(); - root["nrUnsupportedSteps"] = nrUnsupportedSteps.load(); - root["bytesSent"] = bytesSent.load(); - root["bytesReceived"] = bytesReceived.load(); - root["nrBuildsRead"] = nrBuildsRead.load(); - root["buildReadTimeMs"] = buildReadTimeMs.load(); - root["buildReadTimeAvgMs"] = nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead; - root["nrBuildsDone"] = nrBuildsDone.load(); - root["nrStepsStarted"] = nrStepsStarted.load(); - root["nrStepsDone"] = nrStepsDone.load(); - root["nrRetries"] = nrRetries.load(); - root["maxNrRetries"] = maxNrRetries.load(); if (nrStepsDone) { - root["totalStepTime"] = totalStepTime.load(); - root["totalStepBuildTime"] = totalStepBuildTime.load(); - root["avgStepTime"] = (float) totalStepTime / nrStepsDone; - root["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone; + statusJson["totalStepTime"] = totalStepTime.load(); + statusJson["totalStepBuildTime"] = totalStepBuildTime.load(); + statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone; + statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone; } - root["nrQueueWakeups"] = nrQueueWakeups.load(); - root["nrDispatcherWakeups"] = nrDispatcherWakeups.load(); - root["dispatchTimeMs"] = dispatchTimeMs.load(); - root["dispatchTimeAvgMs"] = nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups; - root["nrDbConnections"] = dbPool.count(); - root["nrActiveDbUpdates"] = nrActiveDbUpdates.load(); { - auto nested = root["machines"]; auto machines_(machines.lock()); for (auto & i : *machines_) { auto & m(i.second); auto & s(m->state); auto info(m->state->connectInfo.lock()); - auto machine = nested[m->sshName] = { + json machine = { {"enabled", m->enabled}, {"systemTypes", m->systemTypes}, {"supportedFeatures", m->supportedFeatures}, @@ -615,11 +613,12 @@ void State::dumpStatus(Connection & conn) machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone; machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone; } + statusJson["machines"][m->sshName] = machine; } } { - auto jobsets_json = root["jobsets"]; + auto jobsets_json = statusJson["jobsets"] = json::object(); auto jobsets_(jobsets.lock()); for (auto & jobset : *jobsets_) { jobsets_json[jobset.first.first + ":" + jobset.first.second] = { @@ -630,7 +629,7 @@ void State::dumpStatus(Connection & conn) } { - auto machineTypesJson = root["machineTypes"]; + auto machineTypesJson = statusJson["machineTypes"] = json::object(); auto machineTypes_(machineTypes.lock()); for (auto & i : *machineTypes_) { auto machineTypeJson = machineTypesJson[i.first] = { @@ -648,7 +647,7 @@ void State::dumpStatus(Connection & conn) auto store = getDestStore(); auto & stats = store->getStats(); - root["store"] = { + statusJson["store"] = { {"narInfoRead", stats.narInfoRead.load()}, {"narInfoReadAverted", stats.narInfoReadAverted.load()}, {"narInfoMissing", stats.narInfoMissing.load()}, @@ -675,7 +674,7 @@ void State::dumpStatus(Connection & conn) auto s3Store = dynamic_cast(&*store); if (s3Store) { auto & s3Stats = s3Store->getS3Stats(); - auto jsonS3 = root["s3"] = { + auto jsonS3 = statusJson["s3"] = { {"put", s3Stats.put.load()}, {"putBytes", s3Stats.putBytes.load()}, {"putTimeMs", s3Stats.putTimeMs.load()}, @@ -704,7 +703,7 @@ void State::dumpStatus(Connection & conn) pqxx::work txn(conn); // FIXME: use PostgreSQL 9.5 upsert. txn.exec("delete from SystemStatus where what = 'queue-runner'"); - txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", root.dump()); + txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump()); txn.exec("notify status_dumped"); txn.commit(); } From c7716817a92031f6d94259a3f9d411dd1f062b1e Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sat, 28 Jan 2023 09:27:48 +0100 Subject: [PATCH 11/32] Update Nix to 2.13 --- flake.lock | 20 ++++++++++---------- flake.nix | 2 +- src/hydra-eval-jobs/hydra-eval-jobs.cc | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/flake.lock b/flake.lock index b41b843a..75023b95 100644 --- a/flake.lock +++ b/flake.lock @@ -23,32 +23,32 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1661606874, - "narHash": "sha256-9+rpYzI+SmxJn+EbYxjGv68Ucp22bdFUSy/4LkHkkDQ=", - "owner": "NixOS", + "lastModified": 1675514340, + "narHash": "sha256-JjnneK+TkhkxFoh6EEVKAzEBdxz0iucZsJ6+PWTTReQ=", + "owner": "nixos", "repo": "nix", - "rev": "11e45768b34fdafdcf019ddbd337afa16127ff0f", + "rev": "9157f94e775936798c1f8783eab929e77904e5ed", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "2.11.0", + "owner": "nixos", + "ref": "2.13-maintenance", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1657693803, - "narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=", + "lastModified": 1670461440, + "narHash": "sha256-jy1LB8HOMKGJEGXgzFRLDU1CBGL0/LlkolgnqIsF0D8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "365e1b3a859281cf11b94f87231adeabbdd878a2", + "rev": "04a75b2eecc0acf6239acf9dd04485ff8d14f425", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.05-small", + "ref": "nixos-22.11-small", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index cd9f094d..208d9017 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:NixOS/nix/2.11.0"; + inputs.nix.url = "github:nixos/nix/2.13-maintenance"; outputs = { self, nixpkgs, nix }: let diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 18d39620..de7ae7ba 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -129,7 +129,7 @@ static void worker( LockFlags { .updateLockFile = false, .useRegistries = false, - .allowMutable = false, + .allowUnlocked = false, }); callFlake(state, lockedFlake, *vFlake); From ddd3ac3a4d57549c88812a0d1bad8a196899309d Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Tue, 29 Nov 2022 15:48:42 +0100 Subject: [PATCH 12/32] name tests --- flake.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake.nix b/flake.nix index 208d9017..ec9a708e 100644 --- a/flake.nix +++ b/flake.nix @@ -272,6 +272,7 @@ tests.install = forEachSystem (system: with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; simpleTest { + name = "hydra-install"; nodes.machine = hydraServer; testScript = '' @@ -288,6 +289,7 @@ let pkgs = pkgsBySystem.${system}; in with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; simpleTest { + name = "hydra-notifications"; nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; services.hydra-dev.extraConfig = '' @@ -346,6 +348,7 @@ let pkgs = pkgsBySystem.${system}; in with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }; makeTest { + name = "hydra-gitea"; nodes.machine = { pkgs, ... }: { imports = [ hydraServer ]; services.hydra-dev.extraConfig = '' From 73dff150397bbc3de94e6c4bf4d7ac59b3a681ab Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Thu, 1 Dec 2022 23:32:47 +0100 Subject: [PATCH 13/32] tests: ports are numbers --- flake.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.nix b/flake.nix index ec9a708e..5f0a7d24 100644 --- a/flake.nix +++ b/flake.nix @@ -280,7 +280,7 @@ machine.wait_for_job("hydra-server") machine.wait_for_job("hydra-evaluator") machine.wait_for_job("hydra-queue-runner") - machine.wait_for_open_port("3000") + machine.wait_for_open_port(3000) machine.succeed("curl --fail http://localhost:3000/") ''; }); @@ -317,7 +317,7 @@ # Wait until InfluxDB can receive web requests machine.wait_for_job("influxdb") - machine.wait_for_open_port("8086") + machine.wait_for_open_port(8086) # Create an InfluxDB database where hydra will write to machine.succeed( @@ -327,7 +327,7 @@ # Wait until hydra-server can receive HTTP requests machine.wait_for_job("hydra-server") - machine.wait_for_open_port("3000") + machine.wait_for_open_port(3000) # Setup the project and jobset machine.succeed( From 65c1249227dd33fec8856dc1680dfdd5b3598d0a Mon Sep 17 00:00:00 2001 From: Rick van Schijndel Date: Thu, 16 Feb 2023 19:24:53 +0100 Subject: [PATCH 14/32] systemd: hydra-queue-runner: wait for network-online This prevents eval errors when a machine is just started and the network isn't yet online. I'm running hydra on a laptop and the network takes a bit of time to come online (WLAN), so it's nice if the evaluator starts only when the network actually goes online. Otherwise an error like this can happen on the first eval(s): ``` error fetching latest change from git repo at `https://github.com/nixos/nixpkgs.git': fatal: unable to access 'https://github.com/nixos/nixpkgs.git/': Could not resolve host: github.com ``` --- hydra-module.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hydra-module.nix b/hydra-module.nix index 8e02dcbb..70e17284 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -340,7 +340,7 @@ in systemd.services.hydra-queue-runner = { wantedBy = [ "multi-user.target" ]; requires = [ "hydra-init.service" ]; - after = [ "hydra-init.service" "network.target" ]; + after = [ "hydra-init.service" "network-online.target" ]; path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ]; restartTriggers = [ hydraConf ]; environment = env // { From f44d3d6ec9b3ca0546a82d0230a9afee4ac179c8 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sat, 4 Mar 2023 12:07:34 +0100 Subject: [PATCH 15/32] Update Nix to 2.13.3 Includes the following required fixes: * perl-bindings are correctly initialized: https://github.com/NixOS/nix/commit/77d8066e83ec6120c954ce34290ee1ffe00da133 * /etc/ must be unwritable in build sandbox: https://github.com/NixOS/nix/commit/4acc684ef7b3117c6d6ac12837398a0008a53d85 --- flake.lock | 8 ++++---- flake.nix | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/flake.lock b/flake.lock index 75023b95..08fd86ad 100644 --- a/flake.lock +++ b/flake.lock @@ -23,16 +23,16 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1675514340, - "narHash": "sha256-JjnneK+TkhkxFoh6EEVKAzEBdxz0iucZsJ6+PWTTReQ=", + "lastModified": 1677045134, + "narHash": "sha256-jUc2ccTR8f6MGY2pUKgujm+lxSPNGm/ZAP+toX+nMNc=", "owner": "nixos", "repo": "nix", - "rev": "9157f94e775936798c1f8783eab929e77904e5ed", + "rev": "4acc684ef7b3117c6d6ac12837398a0008a53d85", "type": "github" }, "original": { "owner": "nixos", - "ref": "2.13-maintenance", + "ref": "2.13.3", "repo": "nix", "type": "github" } diff --git a/flake.nix b/flake.nix index 5f0a7d24..beeb90c1 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:nixos/nix/2.13-maintenance"; + inputs.nix.url = "github:nixos/nix/2.13.3"; outputs = { self, nixpkgs, nix }: let From 810d2e6b51af82a948a12eae505a66c2e0f6f09f Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 6 Mar 2023 07:45:03 -0800 Subject: [PATCH 16/32] Drop unused IndexBuildOutputsOnPath index Also it's larger than the actual table it's indexing lol. -[ RECORD 30 ]----------+----------------------------------------- table_name | buildoutputs index_name | indexbuildoutputsonpath index_scans_count | 0 index_size | 31 GB table_reads_index_count | 2128699937 table_reads_seq_count | 0 table_reads_count | 2128699937 table_writes_count | 22442976 table_size | 28 GB --- src/sql/upgrade-83.sql | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 src/sql/upgrade-83.sql diff --git a/src/sql/upgrade-83.sql b/src/sql/upgrade-83.sql new file mode 100644 index 00000000..01603e78 --- /dev/null +++ b/src/sql/upgrade-83.sql @@ -0,0 +1,3 @@ +-- This index was introduced in a migration but was never recorded in +-- hydra.sql (the source of truth), which is why `if exists` is required. +drop index if exists IndexBuildOutputsOnPath; From 8d53c3ca11855234e32ca9f1da0f544491e6cf09 Mon Sep 17 00:00:00 2001 From: Cole Helbling Date: Mon, 6 Mar 2023 07:47:35 -0800 Subject: [PATCH 17/32] test: use ubuntu-latest --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0f5f43da..42cb6843 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,7 @@ on: push: jobs: tests: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: From a084e204ae8b8ffcf3a4a1912582a00c15914136 Mon Sep 17 00:00:00 2001 From: Rick van Schijndel Date: Tue, 7 Mar 2023 21:56:20 +0100 Subject: [PATCH 18/32] systemd: hydra-queue-runner: wait for network.target too Co-authored-by: Sandro --- hydra-module.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hydra-module.nix b/hydra-module.nix index 70e17284..1f0792d7 100644 --- a/hydra-module.nix +++ b/hydra-module.nix @@ -340,7 +340,7 @@ in systemd.services.hydra-queue-runner = { wantedBy = [ "multi-user.target" ]; requires = [ "hydra-init.service" ]; - after = [ "hydra-init.service" "network-online.target" ]; + after = [ "hydra-init.service" "network.target" "network-online.target" ]; path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ]; restartTriggers = [ hydraConf ]; environment = env // { From f88bef15ed57c36dc33d220c8cdf1d5021b8fdbb Mon Sep 17 00:00:00 2001 From: Rob Vermaas Date: Mon, 13 Mar 2023 16:44:09 +0100 Subject: [PATCH 19/32] Use new Google for Web signin, the old way will be deprecated Mar 31st 2023 --- src/root/auth.tt | 5 +---- src/root/topbar.tt | 6 ++++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/root/auth.tt b/src/root/auth.tt index 360904d9..d1539765 100644 --- a/src/root/auth.tt +++ b/src/root/auth.tt @@ -82,7 +82,7 @@ function onGoogleSignIn(googleUser) { requestJSON({ url: "[% c.uri_for('/google-login') %]", - data: "id_token=" + googleUser.getAuthResponse().id_token, + data: "id_token=" + googleUser.credential, type: 'POST', success: function(data) { window.location.reload(); @@ -91,9 +91,6 @@ return false; }; - $("#google-signin").click(function() { - $(".g-signin2:first-child > div").click(); - }); [% END %] diff --git a/src/root/topbar.tt b/src/root/topbar.tt index fdfbf431..1771222d 100644 --- a/src/root/topbar.tt +++ b/src/root/topbar.tt @@ -133,8 +133,10 @@ [% ELSE %] [% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %] [% IF c.config.enable_google_login %] - - Sign in with Google + +
+
+ [% END %] [% IF c.config.github_client_id %] From b4099df91ec542a2b2eed38d07ff1a64c265711f Mon Sep 17 00:00:00 2001 From: Julien Malka Date: Mon, 24 Apr 2023 16:30:03 +0200 Subject: [PATCH 20/32] hydra-eval-jobs: fix jobs containing a dot being dropped --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index de7ae7ba..af839bba 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -245,7 +245,7 @@ static void worker( StringSet ss; for (auto & i : v->attrs->lexicographicOrder(state.symbols)) { std::string name(state.symbols[i->name]); - if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) { + if (name.find(' ') != std::string::npos) { printError("skipping job with illegal name '%s'", name); continue; } @@ -416,7 +416,11 @@ int main(int argc, char * * argv) if (response.find("attrs") != response.end()) { for (auto & i : response["attrs"]) { - auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i; + std::string path = i; + if (path.find(".") != std::string::npos){ + path = "\"" + path + "\""; + } + auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path; newAttrs.insert(s); } } From a0c8440a5c6eee911479c220706f74fd17e0c55f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 13:14:49 +0200 Subject: [PATCH 21/32] Update to Nix 2.16 and NixOS 23.05 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:nixos/nix/4acc684ef7b3117c6d6ac12837398a0008a53d85' (2023-02-22) → 'github:NixOS/nix/84050709ea18f3285a85d729f40c8f8eddf5008e' (2023-06-06) • Added input 'nix/flake-compat': 'github:edolstra/flake-compat/35bb57c0c8d8b62bbfd284272c928ceb64ddbde9' (2023-01-17) • Updated input 'nixpkgs': follows 'nix/nixpkgs' → 'github:NixOS/nixpkgs/ef0bc3976340dab9a4e087a0bcff661a8b2e87f3' (2023-06-21) --- flake.lock | 46 +++++++++++++++++++++++++++++++--------------- flake.nix | 5 +++-- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/flake.lock b/flake.lock index 08fd86ad..ee85f6fa 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,21 @@ { "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "lowdown-src": { "flake": false, "locked": { @@ -18,37 +34,40 @@ }, "nix": { "inputs": { + "flake-compat": "flake-compat", "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs", + "nixpkgs": [ + "nixpkgs" + ], "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1677045134, - "narHash": "sha256-jUc2ccTR8f6MGY2pUKgujm+lxSPNGm/ZAP+toX+nMNc=", - "owner": "nixos", + "lastModified": 1686048923, + "narHash": "sha256-/XCWa2osNFIpPC5MkxlX6qTZf/DaTLwS3LWN0SRFiuU=", + "owner": "NixOS", "repo": "nix", - "rev": "4acc684ef7b3117c6d6ac12837398a0008a53d85", + "rev": "84050709ea18f3285a85d729f40c8f8eddf5008e", "type": "github" }, "original": { - "owner": "nixos", - "ref": "2.13.3", + "owner": "NixOS", + "ref": "2.16.1", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1670461440, - "narHash": "sha256-jy1LB8HOMKGJEGXgzFRLDU1CBGL0/LlkolgnqIsF0D8=", + "lastModified": 1687379288, + "narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "04a75b2eecc0acf6239acf9dd04485ff8d14f425", + "rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11-small", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } @@ -72,10 +91,7 @@ "root": { "inputs": { "nix": "nix", - "nixpkgs": [ - "nix", - "nixpkgs" - ] + "nixpkgs": "nixpkgs" } } }, diff --git a/flake.nix b/flake.nix index beeb90c1..6bbec9b0 100644 --- a/flake.nix +++ b/flake.nix @@ -1,8 +1,9 @@ { description = "A Nix-based continuous build system"; - inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:nixos/nix/2.13.3"; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05"; + inputs.nix.url = "github:NixOS/nix/2.16.1"; + inputs.nix.inputs.nixpkgs.follows = "nixpkgs"; outputs = { self, nixpkgs, nix }: let From 9f69bb5c2c132e9ac7b8155972096b425155c6e1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 15:06:34 +0200 Subject: [PATCH 22/32] Fix compilation against Nix 2.16 --- configure.ac | 2 -- src/hydra-eval-jobs/hydra-eval-jobs.cc | 31 ++++++++++++-------- src/hydra-queue-runner/build-remote.cc | 12 ++++---- src/hydra-queue-runner/builder.cc | 4 +-- src/hydra-queue-runner/dispatcher.cc | 8 ++--- src/hydra-queue-runner/hydra-queue-runner.cc | 7 ++--- src/hydra-queue-runner/queue-monitor.cc | 14 ++++----- 7 files changed, 40 insertions(+), 38 deletions(-) diff --git a/configure.ac b/configure.ac index 0c823696..eec647c3 100644 --- a/configure.ac +++ b/configure.ac @@ -10,8 +10,6 @@ AC_PROG_LN_S AC_PROG_LIBTOOL AC_PROG_CXX -CXXFLAGS+=" -std=c++17" - AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_ARG_WITH([docbook-xsl], diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index af839bba..79523944 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -25,7 +25,8 @@ #include -void check_pid_status_nonblocking(pid_t check_pid) { +void check_pid_status_nonblocking(pid_t check_pid) +{ // Only check 'initialized' and known PID's if (check_pid <= 0) { return; } @@ -100,7 +101,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std: else if (v.type() == nAttrs) { auto a = v.attrs->find(state.symbols.create(subAttribute)); if (a != v.attrs->end()) - res.push_back(std::string(state.forceString(*a->value))); + res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes"))); } }; @@ -197,26 +198,30 @@ static void worker( /* If this is an aggregate, then get its constituents. */ auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, a->pos)) { + if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) { auto a = v->attrs->get(state.symbols.create("constituents")); if (!a) throw EvalError("derivation must have a ‘constituents’ attribute"); + NixStringContext context; + state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false); + for (auto & c : context) + std::visit(overloaded { + [&](const NixStringContextElem::Built & b) { + job["constituents"].push_back(state.store->printStorePath(b.drvPath)); + }, + [&](const NixStringContextElem::Opaque & o) { + }, + [&](const NixStringContextElem::DrvDeep & d) { + }, + }, c.raw()); - PathSet context; - state.coerceToString(a->pos, *a->value, context, true, false); - for (auto & i : context) - if (i.at(0) == '!') { - size_t index = i.find("!", 1); - job["constituents"].push_back(std::string(i, index + 1)); - } - - state.forceList(*a->value, a->pos); + state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute"); for (unsigned int n = 0; n < a->value->listSize(); ++n) { auto v = a->value->listElems()[n]; state.forceValue(*v, noPos); if (v->type() == nString) - job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); + job["namedConstituents"].push_back(v->str()); } } diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 21a6c331..6baff7df 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -116,12 +116,12 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log to << cmdQueryValidPaths << 1 << useSubstitutes; - worker_proto::write(destStore, to, closure); + workerProtoWrite(destStore, to, closure); to.flush(); /* Get back the set of paths that are already valid on the remote host. */ - auto present = worker_proto::read(destStore, from, Phantom {}); + auto present = WorkerProto::read(destStore, from); if (present.size() == closure.size()) return; @@ -367,7 +367,7 @@ void State::buildRemote(ref destStore, } } if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - worker_proto::read(*localStore, from, Phantom {}); + WorkerProto::read(*localStore, from); } switch ((BuildResult::Status) res) { case BuildResult::Built: @@ -444,17 +444,17 @@ void State::buildRemote(ref destStore, std::map infos; size_t totalNarSize = 0; to << cmdQueryPathInfos; - worker_proto::write(*localStore, to, outputs); + workerProtoWrite(*localStore, to, outputs); to.flush(); while (true) { auto storePathS = readString(from); if (storePathS == "") break; auto deriver = readString(from); // deriver - auto references = worker_proto::read(*localStore, from, Phantom {}); + auto references = WorkerProto::read(*localStore, from); readLongLong(from); // download size auto narSize = readLongLong(from); auto narHash = Hash::parseAny(readString(from), htSHA256); - auto ca = parseContentAddressOpt(readString(from)); + auto ca = ContentAddress::parseOpt(readString(from)); readStrings(from); // sigs ValidPathInfo info(localStore->parseStorePath(storePathS), narHash); assert(outputs.count(info.path)); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 37022522..89aec323 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -323,7 +323,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, pqxx::work txn(*conn); for (auto & b : direct) { - printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id); + printInfo("marking build %1% as succeeded", b->id); markSucceededBuild(txn, b, res, buildId != b->id || result.isCached, result.startTime, result.stopTime); } @@ -451,7 +451,7 @@ void State::failStep( /* Mark all builds that depend on this derivation as failed. */ for (auto & build : indirect) { if (build->finishedInDB) continue; - printMsg(lvlError, format("marking build %1% as failed") % build->id); + printError("marking build %1% as failed", build->id); txn.exec_params0 ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0", build->id, diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index d2bb3c90..1e40fa69 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -52,7 +52,7 @@ void State::dispatcher() { auto dispatcherWakeup_(dispatcherWakeup.lock()); if (!*dispatcherWakeup_) { - printMsg(lvlDebug, format("dispatcher sleeping for %1%s") % + debug("dispatcher sleeping for %1%s", std::chrono::duration_cast(sleepUntil - std::chrono::system_clock::now()).count()); dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil); } @@ -60,7 +60,7 @@ void State::dispatcher() } } catch (std::exception & e) { - printMsg(lvlError, format("dispatcher: %1%") % e.what()); + printError("dispatcher: %s", e.what()); sleep(1); } @@ -80,8 +80,8 @@ system_time State::doDispatch() jobset.second->pruneSteps(); auto s2 = jobset.second->shareUsed(); if (s1 != s2) - printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%") - % jobset.first.first % jobset.first.second % s1 % s2); + debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%", + jobset.first.first, jobset.first.second, s1, s2); } } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index b84681d5..acf1282e 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -161,9 +161,9 @@ void State::parseMachines(const std::string & contents) same name. */ auto i = oldMachines.find(machine->sshName); if (i == oldMachines.end()) - printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName); else - printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName); machine->state = i == oldMachines.end() ? std::make_shared() : i->second->state; @@ -173,7 +173,7 @@ void State::parseMachines(const std::string & contents) for (auto & m : oldMachines) if (newMachines.find(m.first) == newMachines.end()) { if (m.second->enabled) - printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first); + printInfo("removing machine ‘%1%’", m.first); /* Add a disabled Machine object to make sure stats are maintained. */ auto machine = std::make_shared(*(m.second)); @@ -928,7 +928,6 @@ int main(int argc, char * * argv) }); settings.verboseBuild = true; - settings.lockCPU = false; State state{metricsAddrOpt}; if (status) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 12d55b79..0bb167a2 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -13,7 +13,7 @@ void State::queueMonitor() try { queueMonitorLoop(); } catch (std::exception & e) { - printMsg(lvlError, format("queue monitor: %1%") % e.what()); + printError("queue monitor: %s", e.what()); sleep(10); // probably a DB problem, so don't retry right away } } @@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn, createBuild = [&](Build::ptr build) { prom.queue_build_loads.Increment(); - printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); + printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName()); nrAdded++; newBuildsByID.erase(build->id); if (!localStore->isValidPath(build->drvPath)) { /* Derivation has been GC'ed prematurely. */ - printMsg(lvlError, format("aborting GC'ed build %1%") % build->id); + printError("aborting GC'ed build %1%", build->id); if (!build->finishedInDB) { auto mc = startDbUpdate(); pqxx::work txn(conn); @@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Add the new runnable build steps to ‘runnable’ and wake up the builder threads. */ - printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded); + printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded); for (auto & r : newRunnable) makeRunnable(r); @@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn) for (auto i = builds_->begin(); i != builds_->end(); ) { auto b = currentIds.find(i->first); if (b == currentIds.end()) { - printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first); + printInfo("discarding cancelled build %1%", i->first); i = builds_->erase(i); // FIXME: ideally we would interrupt active build steps here. continue; } if (i->second->globalPriority < b->second) { - printMsg(lvlInfo, format("priority of build %1% increased") % i->first); + printInfo("priority of build %1% increased", i->first); i->second->globalPriority = b->second; i->second->propagatePriorities(); } @@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref if (r.empty()) continue; BuildID id = r[0][0].as(); - printMsg(lvlInfo, format("reusing build %d") % id); + printInfo("reusing build %d", id); BuildOutput res; res.failed = r[0][1].as() == bsFailedWithOutput; From ce001bb1420bb0c774ea08cd21fd624ccea04788 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 15:09:09 +0200 Subject: [PATCH 23/32] Relax time interval checks I saw one of these failing randomly. --- t/Hydra/Plugin/RunCommand/basic.t | 4 ++-- t/Hydra/Plugin/RunCommand/errno.t | 4 ++-- t/Hydra/Schema/Result/RunCommandLogs.t | 22 +++++++++++----------- t/Hydra/Schema/Result/TaskRetries.t | 4 ++-- t/Hydra/Schema/ResultSet/TaskRetries.t | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/t/Hydra/Plugin/RunCommand/basic.t b/t/Hydra/Plugin/RunCommand/basic.t index e9fc730b..2c0eec68 100644 --- a/t/Hydra/Plugin/RunCommand/basic.t +++ b/t/Hydra/Plugin/RunCommand/basic.t @@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub { ok($runlog->did_succeed(), "The process did succeed."); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is also recent."); is($runlog->exit_code, 0, "This command should have succeeded."); subtest "Validate the run log file exists" => sub { diff --git a/t/Hydra/Plugin/RunCommand/errno.t b/t/Hydra/Plugin/RunCommand/errno.t index 9e06f9bb..6b05d457 100644 --- a/t/Hydra/Plugin/RunCommand/errno.t +++ b/t/Hydra/Plugin/RunCommand/errno.t @@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub { ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is also recent."); is($runlog->exit_code, undef, "This command should not have executed."); is($runlog->error_number, 2, "This command failed to exec."); }; diff --git a/t/Hydra/Schema/Result/RunCommandLogs.t b/t/Hydra/Schema/Result/RunCommandLogs.t index 80589549..f702fcf9 100644 --- a/t/Hydra/Schema/Result/RunCommandLogs.t +++ b/t/Hydra/Schema/Result/RunCommandLogs.t @@ -55,7 +55,7 @@ subtest "Starting a process" => sub { ok($runlog->is_running(), "The process is running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); is($runlog->end_time, undef, "The end time is undefined."); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, undef, "The signal is undefined."); @@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, 0, "The exit code is 0."); is($runlog->signal, undef, "The signal is undefined."); @@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, 85, "The exit code is 85."); is($runlog->signal, undef, "The signal is undefined."); @@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, 15, "Signal 15 was sent."); @@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, 9, "The signal is 9."); @@ -134,8 +134,8 @@ subtest "The process failed to start" => sub { ok(!$runlog->is_running(), "The process is running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, 2, "The error number is saved"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, undef, "The signal is undefined."); diff --git a/t/Hydra/Schema/Result/TaskRetries.t b/t/Hydra/Schema/Result/TaskRetries.t index 0425f11c..a9c9f132 100644 --- a/t/Hydra/Schema/Result/TaskRetries.t +++ b/t/Hydra/Schema/Result/TaskRetries.t @@ -25,11 +25,11 @@ subtest "requeue" => sub { $task->requeue(); is($task->attempts, 2, "We should have stored a second retry"); - is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step"); + is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step"); $task->requeue(); is($task->attempts, 3, "We should have stored a third retry"); - is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step"); + is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step"); }; done_testing; diff --git a/t/Hydra/Schema/ResultSet/TaskRetries.t b/t/Hydra/Schema/ResultSet/TaskRetries.t index 4555832c..a9354896 100644 --- a/t/Hydra/Schema/ResultSet/TaskRetries.t +++ b/t/Hydra/Schema/ResultSet/TaskRetries.t @@ -101,7 +101,7 @@ subtest "save_task" => sub { is($retry->pluginname, "FooPluginName", "Plugin name should match"); is($retry->payload, "1", "Payload should match"); is($retry->attempts, 1, "We've had one attempt"); - is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away"); + is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away"); }; done_testing; From 5c35d1be2005ee0471076df5c904c72f4c748d66 Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sun, 25 Jun 2023 17:25:43 +0200 Subject: [PATCH 24/32] hydra-queue-runner: fix stats --- src/hydra-queue-runner/hydra-queue-runner.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index b84681d5..30fad267 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -618,7 +618,7 @@ void State::dumpStatus(Connection & conn) } { - auto jobsets_json = statusJson["jobsets"] = json::object(); + auto jobsets_json = json::object(); auto jobsets_(jobsets.lock()); for (auto & jobset : *jobsets_) { jobsets_json[jobset.first.first + ":" + jobset.first.second] = { @@ -626,10 +626,11 @@ void State::dumpStatus(Connection & conn) {"seconds", jobset.second->getSeconds()}, }; } + statusJson["jobsets"] = jobsets_json; } { - auto machineTypesJson = statusJson["machineTypes"] = json::object(); + auto machineTypesJson = json::object(); auto machineTypes_(machineTypes.lock()); for (auto & i : *machineTypes_) { auto machineTypeJson = machineTypesJson[i.first] = { @@ -642,6 +643,7 @@ void State::dumpStatus(Connection & conn) if (i.second.running == 0) machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive); } + statusJson["machineTypes"] = machineTypesJson; } auto store = getDestStore(); From 46246dcae3ad05dfd85fcf4f61b44f5c2114634a Mon Sep 17 00:00:00 2001 From: Arian van Putten Date: Wed, 19 Jul 2023 15:13:25 +0200 Subject: [PATCH 25/32] Fix docs for /eval/{id} endpoint You need to pass it an eval-id, not a build-id pretty sure --- hydra-api.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hydra-api.yaml b/hydra-api.yaml index ce7e0f9a..c3068d66 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -533,13 +533,13 @@ paths: schema: $ref: '#/components/schemas/Error' - /eval/{build-id}: + /eval/{eval-id}: get: - summary: Retrieves evaluations identified by build id + summary: Retrieves evaluations identified by eval id parameters: - - name: build-id + - name: eval-id in: path - description: build identifier + description: eval identifier required: true schema: type: integer From a78664f1b5faac4ade5b3a2f3c0e6c6e59a94c51 Mon Sep 17 00:00:00 2001 From: Arian van Putten Date: Thu, 20 Jul 2023 14:43:03 +0200 Subject: [PATCH 26/32] Fix documentation of defaultpath in api docs --- hydra-api.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hydra-api.yaml b/hydra-api.yaml index ce7e0f9a..2646a275 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -870,7 +870,7 @@ components: description: Size of the produced file type: integer defaultpath: - description: This is a Git/Mercurial commit hash or a Subversion revision number + description: if path is a directory, the default file relative to path to be served type: string 'type': description: Types of build product (user defined) From b23431a657d8a9b2f478c95dd81034780751a262 Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Fri, 4 Aug 2023 15:53:06 +0200 Subject: [PATCH 27/32] Support Nix 2.17 --- flake.lock | 8 ++++---- flake.nix | 2 +- src/hydra-queue-runner/build-remote.cc | 25 +++++++++++++++---------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/flake.lock b/flake.lock index ee85f6fa..4b18fbb4 100644 --- a/flake.lock +++ b/flake.lock @@ -42,16 +42,16 @@ "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1686048923, - "narHash": "sha256-/XCWa2osNFIpPC5MkxlX6qTZf/DaTLwS3LWN0SRFiuU=", + "lastModified": 1690219894, + "narHash": "sha256-QMYAkdtU+g9HlZKtoJ+AI6TbWzzovKGnPZJHfZdclc8=", "owner": "NixOS", "repo": "nix", - "rev": "84050709ea18f3285a85d729f40c8f8eddf5008e", + "rev": "a212300a1d9f9c7b0daf19c00c87fc50480f54f4", "type": "github" }, "original": { "owner": "NixOS", - "ref": "2.16.1", + "ref": "2.17.0", "repo": "nix", "type": "github" } diff --git a/flake.nix b/flake.nix index 6bbec9b0..7e7d50e2 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-based continuous build system"; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05"; - inputs.nix.url = "github:NixOS/nix/2.16.1"; + inputs.nix.url = "github:NixOS/nix/2.17.0"; inputs.nix.inputs.nixpkgs.follows = "nixpkgs"; outputs = { self, nixpkgs, nix }: diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 6baff7df..6bbd22e2 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -6,6 +6,7 @@ #include #include "build-result.hh" +#include "path.hh" #include "serve-protocol.hh" #include "state.hh" #include "util.hh" @@ -110,18 +111,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, StorePathSet closure; destStore.computeFSClosure(paths, closure); + WorkerProto::WriteConn wconn { .to = to }; + WorkerProto::ReadConn rconn { .from = from }; /* Send the "query valid paths" command with the "lock" option enabled. This prevents a race where the remote host garbage-collect paths that are already there. Optionally, ask the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log - to << cmdQueryValidPaths << 1 << useSubstitutes; - workerProtoWrite(destStore, to, closure); + to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes; + WorkerProto::write(destStore, wconn, closure); to.flush(); /* Get back the set of paths that are already valid on the remote host. */ - auto present = WorkerProto::read(destStore, from); + auto present = WorkerProto::Serialise::read(destStore, rconn); if (present.size() == closure.size()) return; @@ -136,7 +139,7 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, std::unique_lock sendLock(sendMutex, std::chrono::seconds(600)); - to << cmdImportPaths; + to << ServeProto::Command::ImportPaths; destStore.exportPaths(missing, to); to.flush(); @@ -223,7 +226,9 @@ void State::buildRemote(ref destStore, }); FdSource from(child.from.get()); + WorkerProto::ReadConn rconn { .from = from }; FdSink to(child.to.get()); + WorkerProto::WriteConn wconn { .to = to }; Finally updateStats([&]() { bytesReceived += from.read; @@ -334,7 +339,7 @@ void State::buildRemote(ref destStore, updateStep(ssBuilding); - to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); + to << ServeProto::Command::BuildDerivation << localStore->printStorePath(step->drvPath); writeDerivation(to, *localStore, basicDrv); to << maxSilentTime << buildTimeout; if (GET_PROTOCOL_MINOR(remoteVersion) >= 2) @@ -367,7 +372,7 @@ void State::buildRemote(ref destStore, } } if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - WorkerProto::read(*localStore, from); + WorkerProto::Serialise::read(*localStore, rconn); } switch ((BuildResult::Status) res) { case BuildResult::Built: @@ -443,14 +448,14 @@ void State::buildRemote(ref destStore, /* Get info about each output path. */ std::map infos; size_t totalNarSize = 0; - to << cmdQueryPathInfos; - workerProtoWrite(*localStore, to, outputs); + to << ServeProto::Command::QueryPathInfos; + WorkerProto::write(*localStore, wconn, outputs); to.flush(); while (true) { auto storePathS = readString(from); if (storePathS == "") break; auto deriver = readString(from); // deriver - auto references = WorkerProto::read(*localStore, from); + auto references = WorkerProto::Serialise::read(*localStore, rconn); readLongLong(from); // download size auto narSize = readLongLong(from); auto narHash = Hash::parseAny(readString(from), htSHA256); @@ -494,7 +499,7 @@ void State::buildRemote(ref destStore, lambda function only gets executed if someone tries to read from source2, we will send the command from here rather than outside the lambda. */ - to << cmdDumpStorePath << localStore->printStorePath(path); + to << ServeProto::Command::DumpStorePath << localStore->printStorePath(path); to.flush(); TeeSource tee(from, sink); From 9f0427385fa9f306c4cd651dc97377624b90997c Mon Sep 17 00:00:00 2001 From: Linus Heckemann Date: Sun, 20 Aug 2023 14:55:56 +0200 Subject: [PATCH 28/32] Apply LTO fix suggested by Ericson2314 --- src/hydra-queue-runner/build-remote.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 6bbd22e2..46c94e60 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -11,6 +11,7 @@ #include "state.hh" #include "util.hh" #include "worker-protocol.hh" +#include "worker-protocol-impl.hh" #include "finally.hh" #include "url.hh" From 35ccc9ebb22796270aafe1b567420e13529e3be5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 23 Aug 2023 17:04:45 +0200 Subject: [PATCH 29/32] Fix indentation Co-authored-by: John Ericson --- src/hydra-queue-runner/build-remote.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 46c94e60..56ce1ccf 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -373,7 +373,7 @@ void State::buildRemote(ref destStore, } } if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - WorkerProto::Serialise::read(*localStore, rconn); + WorkerProto::Serialise::read(*localStore, rconn); } switch ((BuildResult::Status) res) { case BuildResult::Built: From 113836ebae04c518b9c787983da9d43af4e27f73 Mon Sep 17 00:00:00 2001 From: sternenseemann Date: Wed, 30 Aug 2023 15:06:48 +0200 Subject: [PATCH 30/32] hydra-api.yaml: name JobsetEval parameter eval-id This is more accurate since the id space is not shared between build and eval ids. --- hydra-api.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hydra-api.yaml b/hydra-api.yaml index 2646a275..f61a27aa 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -533,13 +533,13 @@ paths: schema: $ref: '#/components/schemas/Error' - /eval/{build-id}: + /eval/{eval-id}: get: - summary: Retrieves evaluations identified by build id + summary: Retrieves evaluations identified by eval id parameters: - - name: build-id + - name: eval-id in: path - description: build identifier + description: eval identifier required: true schema: type: integer From e2195c46d12e1d5ff14dd3d9319e405e2b2b49ab Mon Sep 17 00:00:00 2001 From: sternenseemann Date: Wed, 30 Aug 2023 15:08:11 +0200 Subject: [PATCH 31/32] hydra-api.yaml: document all_builds (/eval/{eval-id}/builds) --- hydra-api.yaml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/hydra-api.yaml b/hydra-api.yaml index f61a27aa..623c9082 100644 --- a/hydra-api.yaml +++ b/hydra-api.yaml @@ -551,6 +551,24 @@ paths: schema: $ref: '#/components/schemas/JobsetEval' + /eval/{eval-id}/builds: + get: + summary: Retrieves all builds belonging to an evaluation identified by eval id + parameters: + - name: eval-id + in: path + description: eval identifier + required: true + schema: + type: integer + responses: + '200': + description: builds + content: + application/json: + schema: + $ref: '#/components/schemas/JobsetEvalBuilds' + components: schemas: @@ -796,6 +814,13 @@ components: additionalProperties: $ref: '#/components/schemas/JobsetEvalInput' + JobsetEvalBuilds: + type: array + items: + type: object + additionalProperties: + $ref: '#/components/schemas/Build' + JobsetOverview: type: array items: From 6a5fb9efaea35ca29836371307f5083576f421ab Mon Sep 17 00:00:00 2001 From: Stig Palmquist Date: Fri, 20 Oct 2023 00:09:28 +0200 Subject: [PATCH 32/32] Set output length of C::P::Argon2 hashes to 16 Since the default lengths in Crypt::Passphrase::Argon2 changed from 16 to 32 in in 0.009, some tests that expected the passphrase to be unchanged started failing. --- src/lib/Hydra/Schema/Result/Users.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Schema/Result/Users.pm b/src/lib/Hydra/Schema/Result/Users.pm index b3de6543..c28ae931 100644 --- a/src/lib/Hydra/Schema/Result/Users.pm +++ b/src/lib/Hydra/Schema/Result/Users.pm @@ -216,7 +216,7 @@ sub json_hint { sub _authenticator() { my $authenticator = Crypt::Passphrase->new( - encoder => 'Argon2', + encoder => { module => 'Argon2', output_size => 16 }, validators => [ (sub { my ($password, $hash) = @_;