Compare commits

..

No commits in common. "927d381dd515a59f1ddb16c1e7186bc1984bafaa" and "459aa0a5983a0bd546399c08231468d6e9282f54" have entirely different histories.

33 changed files with 328 additions and 336 deletions

View file

@ -1,12 +0,0 @@
UseColor: true
Checks:
- -*
- bugprone-*
# kind of nonsense
- -bugprone-easily-swappable-parameters
# many warnings due to not recognizing `assert` properly
- -bugprone-unchecked-optional-access
- modernize-*
- -modernize-use-trailing-return-type

1
.envrc
View file

@ -1 +0,0 @@
use flake

2
.gitignore vendored
View file

@ -5,5 +5,3 @@
/src/sql/tmp.sqlite
result
result-*
.hydra-data
outputs

View file

@ -78,11 +78,11 @@ $ nix-build
### Development Environment
You can use the provided shell.nix to get a working development environment:
```
$ nix develop
[nix-shell]$ just setup
[nix-shell]$ just install
$ nix-shell
$ autoreconfPhase
$ configurePhase # NOTE: not ./configure
$ make
```
### Executing Hydra During Development
@ -91,9 +91,10 @@ When working on new features or bug fixes you need to be able to run Hydra from
can be done using [foreman](https://github.com/ddollar/foreman):
```
$ nix develop
[nix-shell]$ just install
[nix-shell]$ foreman start
$ nix-shell
$ # hack hack
$ make
$ foreman start
```
Have a look at the [Procfile](./Procfile) if you want to see how the processes are being started. In order to avoid
@ -114,22 +115,22 @@ Start by following the steps in [Development Environment](#development-environme
Then, you can run the tests and the perlcritic linter together with:
```console
$ nix develop
[nix-shell]$ just test
$ nix-shell
$ make check
```
You can run a single test with:
```
$ nix develop
[nix-shell]$ yath test ./t/foo/bar.t
$ nix-shell
$ yath test ./t/foo/bar.t
```
And you can run just perlcritic with:
```
$ nix develop
[nix-shell]$ just perlcritic
$ nix-shell
$ make perlcritic
```
### JSON API

122
doc/dev-notes.txt Normal file
View file

@ -0,0 +1,122 @@
* Recreating the schema bindings:
$ make -C src/sql update-dbix
* Running the test server:
$ DBIC_TRACE=1 ./script/hydra_server.pl
* Setting the maximum number of concurrent builds per system type:
$ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
* Creating a user:
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
--password-prompt
(Replace "foobar" with the desired password.)
To make the user an admin:
$ hydra-create-user root --role admin
To enable a non-admin user to create projects:
$ hydra-create-user root --role create-projects
* Changing the priority of a scheduled build:
update buildschedulinginfo set priority = 200 where id = <ID>;
* Changing the priority of all builds for a jobset:
update buildschedulinginfo set priority = 20 where id in (select id from builds where finished = 0 and project = 'nixpkgs' and jobset = 'trunk');
* Steps to install:
- Install the Hydra closure.
- Set HYDRA_DATA to /somewhere.
- Run hydra_init.pl
- Start hydra_server
- Visit http://localhost:3000/
- Create a user (see above)
- Create a project, jobset etc.
- Start hydra_evaluator and hydra_queue_runner
* Job selection:
php-sat:build [system = "i686-linux"]
php-sat:build [same system]
tarball [same patchelfSrc]
--if system i686-linux --arg build {...}
* Restart all aborted builds in a given evaluation (e.g. 820909):
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus = 3 and exists (select 1 from jobsetevalmembers where eval = 820909 and build = id));
* Restart all builds in a given evaluation that had a build step time out:
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus != 0 and exists (select 1 from jobsetevalmembers where eval = 926992 and build = id) and exists (select 1 from buildsteps where build = id and status = 7));
* select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) x join builds y on x.timestamp = y.timestamp and x.project = y.project and x.jobset = y.jobset and x.job = y.job and x.system = y.system;
select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) natural join builds;
* Delete all scheduled builds that are not already building:
delete from builds where finished = 0 and not exists (select 1 from buildschedulinginfo s where s.id = builds.id and busy != 0);
* select x.project, x.jobset, x.job, x.system, x.id, x.timestamp, r.buildstatus, b.id, b.timestamp
from (select project, jobset, job, system, max(id) as id from Builds where finished = 1 group by project, jobset, job, system) as a_
natural join Builds x
natural join BuildResultInfo r
left join Builds b on b.id =
(select max(id) from builds c
natural join buildresultinfo r2
where x.project = c.project and x.jobset = c.jobset and x.job = c.job and x.system = c.system
and x.id > c.id and r.buildstatus != r2.buildstatus);
* Using PostgreSQL (version 9.2 or newer is required):
$ HYDRA_DBI="dbi:Pg:dbname=hydra;" hydra-server
* Find the builds with the highest number of build steps:
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;
* Evaluating the NixOS Hydra jobs:
$ ./hydra_eval_jobs ~/Dev/nixos-wc/release.nix --arg nixpkgs '{outPath = /home/eelco/Dev/nixpkgs-wc;}' --arg nixosSrc '{outPath = /home/eelco/Dev/nixos-wc; rev = 1234;}' --arg services '{outhPath = /home/eelco/services-wc;}' --argstr system i686-linux --argstr system x86_64-linux --arg officialRelease false
* Show all the failing jobs/systems in the nixpkgs:stdenv jobset that
succeed in the nixpkgs:trunk jobset:
select job, system from builds b natural join buildresultinfo where project = 'nixpkgs' and jobset = 'stdenv' and iscurrent = 1 and finished = 1 and buildstatus != 0 and exists (select 1 from builds natural join buildresultinfo where project = 'nixpkgs' and jobset = 'trunk' and job = b.job and system = b.system and iscurrent = 1 and finished = 1 and buildstatus = 0) order by job, system;
* Get all Nixpkgs jobs that have never built succesfully:
select project, jobset, job from builds b1
where project = 'nixpkgs' and jobset = 'trunk' and iscurrent = 1
group by project, jobset, job
having not exists
(select 1 from builds b2 where b1.project = b2.project and b1.jobset = b2.jobset and b1.job = b2.job and finished = 1 and buildstatus = 0)
order by project, jobset, job;

View file

@ -12,14 +12,15 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
and dependencies can be found:
```console
$ nix develop
$ nix-shell
```
To build Hydra, you should then do:
```console
[nix-shell]$ just setup
[nix-shell]$ just install
[nix-shell]$ autoreconfPhase
[nix-shell]$ configurePhase
[nix-shell]$ make
```
You start a local database, the webserver, and other components with
@ -40,13 +41,18 @@ $ ./src/script/hydra-server
You can run Hydra's test suite with the following:
```console
[nix-shell]$ just test
[nix-shell]$ make check
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
[nix-shell]$ # or run yath directly:
[nix-shell]$ yath test
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ yath test -j $NIX_BUILD_CORES
```
When using `yath` instead of `make check`, ensure you have run `make`
in the root of the repository at least once.
**Warning**: Currently, the tests can fail
if run with high parallelism [due to an issue in
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
@ -97,14 +103,3 @@ Off NixOS, change `/etc/nix/nix.conf`:
```conf
trusted-users = root YOURUSERNAME
```
### Updating schema bindings
```
just update-dbix
```
### Find the builds with the highest number of build steps:
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;

View file

@ -1,12 +1,9 @@
# Webhooks
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
Hydra can be notified by github's webhook to trigger a new evaluation when a
jobset has a github repo in its input.
## GitHub
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
and in the `Webhooks` tab click on `Add webhook`.
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
click on `Add webhook`.
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
- In `Content type` switch to `application/json`.
@ -14,14 +11,3 @@ and in the `Webhooks` tab click on `Add webhook`.
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
Then add the hook with `Add webhook`.
## Gitea
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
- Change the branch filter to match the git branch hydra builds.
Then add the hook with `Add webhook`.

View file

@ -24,11 +24,11 @@
]
},
"locked": {
"lastModified": 1722555600,
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
"lastModified": 1719994518,
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7",
"type": "github"
},
"original": {
@ -48,11 +48,11 @@
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1726905313,
"narHash": "sha256-jsOyXonevsNaKxM9burYc2S4JVle+VMCJ8+AAp0MDCc=",
"lastModified": 1723331518,
"narHash": "sha256-JVnQ3OLbXQAlkOluFc3gWhZMbhared1Rg5YvNEc92m0=",
"ref": "refs/heads/main",
"rev": "5f298f74c92402a8390b01c736463b17b36277e3",
"revCount": 16254,
"rev": "5137cea99044d54337e439510a647743110b2d7d",
"revCount": 16128,
"type": "git",
"url": "https://git.lix.systems/lix-project/lix"
},
@ -74,11 +74,11 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1723579251,
"narHash": "sha256-xnHtfw0gRhV+2S9U7hQwvp2klTy1Iv7FlMMO0/WiMVc=",
"lastModified": 1721195872,
"narHash": "sha256-TlvRq634MSl22BWLmpTy2vdtKntbZlsUwdMq8Mp9AWs=",
"ref": "refs/heads/main",
"rev": "42a160bce2fd9ffebc3809746bc80cc7208f9b08",
"revCount": 609,
"rev": "c057494450f2d1420726ddb0bab145a5ff4ddfdd",
"revCount": 608,
"type": "git",
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
},
@ -126,11 +126,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1727129439,
"narHash": "sha256-nPyrcFm6FSk7CxzVW4x2hu62aLDghNcv9dX6DF3dXw8=",
"lastModified": 1723282977,
"narHash": "sha256-oTK91aOlA/4IsjNAZGMEBz7Sq1zBS0Ltu4/nIQdYDOg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "babc25a577c3310cce57c72d5bed70f4c3c3843a",
"rev": "a781ff33ae258bbcfd4ed6e673860c3e923bf2cc",
"type": "github"
},
"original": {
@ -187,11 +187,11 @@
]
},
"locked": {
"lastModified": 1723454642,
"narHash": "sha256-S0Gvsenh0II7EAaoc9158ZB4vYyuycvMGKGxIbERNAM=",
"lastModified": 1721059077,
"narHash": "sha256-gCICMMX7VMSKKt99giDDtRLkHJ0cwSgBtDijJAqTlto=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "349de7bc435bdff37785c2466f054ed1766173be",
"rev": "0fb28f237f83295b4dd05e342f333b447c097398",
"type": "github"
},
"original": {

View file

@ -73,21 +73,6 @@
default = pkgsBySystem.${system}.hydra;
});
devShells = forEachSystem (system: let
pkgs = pkgsBySystem.${system};
lib = pkgs.lib;
mkDevShell = stdenv: (pkgs.mkShell.override { inherit stdenv; }) {
inputsFrom = [ (self.packages.${system}.default.override { inherit stdenv; }) ];
packages =
lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) pkgs.clang-tools;
};
in {
default = mkDevShell pkgs.stdenv;
clang = mkDevShell pkgs.clangStdenv;
});
nixosModules = import ./nixos-modules {
overlays = overlayList;
};

View file

@ -3,4 +3,4 @@
# wait for hydra-server to listen
while ! nc -z localhost 63333; do sleep 1; done
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-evaluator
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-evaluator

View file

@ -28,4 +28,4 @@ use-substitutes = true
</hydra_notify>
EOF
fi
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-dev-server --port 63333 --restart --debug
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333 --restart --debug

View file

@ -3,4 +3,4 @@
# wait for hydra-server to listen
while ! nc -z localhost 63333; do sleep 1; done
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-notify
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-notify

View file

@ -3,4 +3,4 @@
# wait until hydra is listening on port 63333
while ! nc -z localhost 63333; do sleep 1; done
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-queue-runner
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-queue-runner

View file

@ -1,17 +0,0 @@
setup *OPTIONS:
meson setup build --prefix="$PWD/outputs/out" $mesonFlags {{ OPTIONS }}
build *OPTIONS:
meson compile -C build {{ OPTIONS }}
install *OPTIONS: (build OPTIONS)
meson install -C build
test *OPTIONS:
meson test -C build --print-errorlogs {{ OPTIONS }}
update-dbix:
cd src/sql && ./update-dbix-harness.sh
perlcritic:
perlcritic .

View file

@ -37,7 +37,6 @@
, cacert
, foreman
, just
, glibcLocales
, libressl
, openldap
@ -191,8 +190,6 @@ stdenv.mkDerivation (finalAttrs: {
postgresql_13
pixz
nix-eval-jobs
perlPackages.PLS
just
];
checkInputs = [
@ -236,8 +233,8 @@ stdenv.mkDerivation (finalAttrs: {
shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null
PATH=$(pwd)/outputs/out/bin:$PATH
PERL5LIB=$(pwd)/src/lib:$(pwd)/t/lib:$PERL5LIB
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
export HYDRA_HOME="$(pwd)/src/"
mkdir -p .hydra-data
export HYDRA_DATA="$(pwd)/.hydra-data"

View file

@ -14,12 +14,11 @@
#include <sys/wait.h>
#include <boost/format.hpp>
#include <utility>
using namespace nix;
using boost::format;
using JobsetName = std::pair<std::string, std::string>;
typedef std::pair<std::string, std::string> JobsetName;
class JobsetId {
public:
@ -29,8 +28,8 @@ class JobsetId {
int id;
JobsetId(std::string project, std::string jobset, int id)
: project{std::move( project )}, jobset{std::move( jobset )}, id{ id }
JobsetId(const std::string & project, const std::string & jobset, int id)
: project{ project }, jobset{ jobset }, id{ id }
{
}
@ -42,7 +41,7 @@ class JobsetId {
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
[[nodiscard]] std::string display() const {
std::string display() const {
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
}
};
@ -89,11 +88,11 @@ struct Evaluator
JobsetId name;
std::optional<EvaluationStyle> evaluation_style;
time_t lastCheckedTime, triggerTime;
time_t checkInterval;
int checkInterval;
Pid pid;
};
using Jobsets = std::map<JobsetId, Jobset>;
typedef std::map<JobsetId, Jobset> Jobsets;
std::optional<JobsetName> evalOne;
@ -139,15 +138,13 @@ struct Evaluator
if (evalOne && name != *evalOne) continue;
auto res = state->jobsets.try_emplace(name, Jobset{.name=name});
auto res = state->jobsets.try_emplace(name, Jobset{name});
auto & jobset = res.first->second;
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
jobset.checkInterval = row["checkInterval"].as<time_t>();
int eval_style = row["jobset_enabled"].as<int>(0);
switch (eval_style) {
switch (row["jobset_enabled"].as<int>(0)) {
case 1:
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
break;
@ -157,9 +154,6 @@ struct Evaluator
case 3:
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
break;
default:
// Disabled or unknown. Leave as nullopt.
break;
}
seen.insert(name);
@ -181,7 +175,7 @@ struct Evaluator
void startEval(State & state, Jobset & jobset)
{
time_t now = time(nullptr);
time_t now = time(0);
printInfo("starting evaluation of jobset %s (last checked %d s ago)",
jobset.name.display(),
@ -234,7 +228,7 @@ struct Evaluator
return false;
}
if (jobset.lastCheckedTime + jobset.checkInterval <= time(nullptr)) {
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
// Time to schedule a fresh evaluation. If the jobset
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
// has no remaining, unfinished work.
@ -307,7 +301,7 @@ struct Evaluator
/* Put jobsets in order of ascending trigger time, last checked
time, and name. */
std::ranges::sort(sorted,
std::sort(sorted.begin(), sorted.end(),
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
return
a->second.triggerTime != b->second.triggerTime
@ -330,7 +324,7 @@ struct Evaluator
while (true) {
time_t now = time(nullptr);
time_t now = time(0);
std::chrono::seconds sleepTime = std::chrono::seconds::max();
@ -417,7 +411,7 @@ struct Evaluator
printInfo("evaluation of jobset %s %s",
jobset.name.display(), statusToString(status));
auto now = time(nullptr);
auto now = time(0);
jobset.triggerTime = notTriggered;
jobset.lastCheckedTime = now;

View file

@ -1,6 +1,5 @@
#include <algorithm>
#include <cmath>
#include <ranges>
#include <sys/types.h>
#include <sys/stat.h>
@ -42,7 +41,6 @@ static Strings extraStoreArgs(std::string & machine)
}
} catch (BadURL &) {
// We just try to continue with `machine->sshName` here for backwards compat.
printMsg(lvlWarn, "could not parse machine URL '%s', passing through to SSH", machine);
}
return result;
@ -135,8 +133,8 @@ static void copyClosureTo(
auto sorted = destStore.topoSortPaths(closure);
StorePathSet missing;
for (auto & i : std::ranges::reverse_view(sorted))
if (!present.count(i)) missing.insert(i);
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
if (!present.count(*i)) missing.insert(*i);
printMsg(lvlDebug, "sending %d missing paths", missing.size());
@ -306,12 +304,12 @@ static BuildResult performBuild(
time_t startTime, stopTime;
startTime = time(nullptr);
startTime = time(0);
{
MaintainCount<counter> mc(nrStepsBuilding);
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
}
stopTime = time(nullptr);
stopTime = time(0);
if (!result.startTime) {
// If the builder gave `startTime = 0`, use our measurements
@ -340,10 +338,10 @@ static BuildResult performBuild(
// were known
assert(outputPath);
auto outputHash = outputHashes.at(outputName);
auto drvOutput = DrvOutput { .drvHash=outputHash, .outputName=outputName };
auto drvOutput = DrvOutput { outputHash, outputName };
result.builtOutputs.insert_or_assign(
std::move(outputName),
Realisation { .id=drvOutput, .outPath=*outputPath });
Realisation { drvOutput, *outputPath });
}
}
@ -636,7 +634,7 @@ void State::buildRemote(ref<Store> destStore,
* copying outputs and we end up building too many things that we
* haven't been able to allow copy slots for. */
assert(reservation.unique());
reservation = nullptr;
reservation = 0;
wakeDispatcher();
StorePathSet outputs;
@ -699,7 +697,7 @@ void State::buildRemote(ref<Store> destStore,
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
info->lastFailure = now;
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30));
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
printMsg(lvlInfo, "will disable machine %1% for %2%s", machine->sshName, delta);
info->disabledUntil = now + std::chrono::seconds(delta);
}

View file

@ -1,7 +1,6 @@
#include "hydra-build-result.hh"
#include "store-api.hh"
#include "fs-accessor.hh"
#include "strings.hh"
#include <regex>

View file

@ -35,18 +35,10 @@ void State::builder(MachineReservation::ptr reservation)
activeSteps_.lock()->erase(activeStep);
});
auto conn(dbPool.get());
try {
auto destStore = getDestStore();
// Might release the reservation.
res = doBuildStep(destStore, reservation, *conn, activeStep);
} catch (pqxx::broken_connection & e) {
printMsg(lvlError, "db lost while building %s on %s: %s (retriable)",
localStore->printStorePath(activeStep->step->drvPath),
reservation ? reservation->machine->sshName : std::string("(no machine)"),
e.what());
conn.markBad();
res = doBuildStep(destStore, reservation, activeStep);
} catch (std::exception & e) {
printMsg(lvlError, "uncaught exception building %s on %s: %s",
localStore->printStorePath(activeStep->step->drvPath),
@ -58,7 +50,7 @@ void State::builder(MachineReservation::ptr reservation)
/* If the machine hasn't been released yet, release and wake up the dispatcher. */
if (reservation) {
assert(reservation.unique());
reservation = nullptr;
reservation = 0;
wakeDispatcher();
}
@ -72,7 +64,7 @@ void State::builder(MachineReservation::ptr reservation)
step_->tries++;
nrRetries++;
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10));
int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10);
printMsg(lvlInfo, "will retry %s after %ss", localStore->printStorePath(step->drvPath), delta);
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
}
@ -84,7 +76,6 @@ void State::builder(MachineReservation::ptr reservation)
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
MachineReservation::ptr & reservation,
Connection & conn,
std::shared_ptr<ActiveStep> activeStep)
{
auto step(reservation->step);
@ -115,6 +106,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
buildOptions.maxLogSize = maxLogSize;
buildOptions.enforceDeterminism = step->isDeterministic;
auto conn(dbPool.get());
{
std::set<Build::ptr> dependents;
std::set<Step::ptr> steps;
@ -139,7 +132,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
for (auto build2 : dependents) {
if (build2->drvPath == step->drvPath) {
build = build2;
pqxx::work txn(conn);
pqxx::work txn(*conn);
notifyBuildStarted(txn, build->id);
txn.commit();
}
@ -190,11 +183,11 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
}
});
time_t stepStartTime = result.startTime = time(nullptr);
time_t stepStartTime = result.startTime = time(0);
/* If any of the outputs have previously failed, then don't bother
building again. */
if (checkCachedFailure(step, conn))
if (checkCachedFailure(step, *conn))
result.stepStatus = bsCachedFailure;
else {
@ -202,13 +195,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
building. */
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
pqxx::work txn(*conn);
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
txn.commit();
}
auto updateStep = [&](StepState stepState) {
pqxx::work txn(conn);
pqxx::work txn(*conn);
updateBuildStep(txn, buildId, stepNr, stepState);
txn.commit();
};
@ -237,7 +230,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
}
}
time_t stepStopTime = time(nullptr);
time_t stepStopTime = time(0);
if (!result.stopTime) result.stopTime = stepStopTime;
/* For standard failures, we don't care about the error
@ -251,7 +244,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
auto step_(step->state.lock());
if (!step_->jobsets.empty()) {
// FIXME: loss of precision.
time_t charge = (result.stopTime - result.startTime) / static_cast<time_t>(step_->jobsets.size());
time_t charge = (result.stopTime - result.startTime) / step_->jobsets.size();
for (auto & jobset : step_->jobsets)
jobset->addStep(result.startTime, charge);
}
@ -259,7 +252,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
/* Finish the step in the database. */
if (stepNr) {
pqxx::work txn(conn);
pqxx::work txn(*conn);
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
txn.commit();
}
@ -335,7 +328,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
pqxx::work txn(*conn);
for (auto & b : direct) {
printInfo("marking build %1% as succeeded", b->id);
@ -363,7 +356,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
/* Send notification about the builds that have this step as
the top-level. */
{
pqxx::work txn(conn);
pqxx::work txn(*conn);
for (auto id : buildIDs)
notifyBuildFinished(txn, id, {});
txn.commit();
@ -392,7 +385,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
}
} else
failStep(conn, step, buildId, result, machine, stepFinished);
failStep(*conn, step, buildId, result, machine, stepFinished);
// FIXME: keep stats about aborted steps?
nrStepsDone++;

View file

@ -46,7 +46,7 @@ void State::dispatcher()
auto t_after_work = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_running.Increment(
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
/* Sleep until we're woken up (either because a runnable build
@ -63,7 +63,7 @@ void State::dispatcher()
auto t_after_sleep = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_waiting.Increment(
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
} catch (std::exception & e) {
printError("dispatcher: %s", e.what());
@ -190,7 +190,7 @@ system_time State::doDispatch()
}
}
std::ranges::sort(runnableSorted,
sort(runnableSorted.begin(), runnableSorted.end(),
[](const StepInfo & a, const StepInfo & b)
{
return
@ -240,11 +240,11 @@ system_time State::doDispatch()
- Then by speed factor.
- Finally by load. */
std::ranges::sort(machinesSorted,
sort(machinesSorted.begin(), machinesSorted.end(),
[](const MachineInfo & a, const MachineInfo & b) -> bool
{
float ta = std::round(static_cast<float>(a.currentJobs) / a.machine->speedFactorFloat);
float tb = std::round(static_cast<float>(b.currentJobs) / b.machine->speedFactorFloat);
float ta = std::round(a.currentJobs / a.machine->speedFactorFloat);
float tb = std::round(b.currentJobs / b.machine->speedFactorFloat);
return
ta != tb ? ta < tb :
a.machine->speedFactorFloat != b.machine->speedFactorFloat ? a.machine->speedFactorFloat > b.machine->speedFactorFloat :
@ -345,7 +345,7 @@ void State::abortUnsupported()
auto machines2 = *machines.lock();
system_time now = std::chrono::system_clock::now();
auto now2 = time(nullptr);
auto now2 = time(0);
std::unordered_set<Step::ptr> aborted;
@ -436,7 +436,7 @@ void Jobset::addStep(time_t startTime, time_t duration)
void Jobset::pruneSteps()
{
time_t now = time(nullptr);
time_t now = time(0);
auto steps_(steps.lock());
while (!steps_->empty()) {
auto i = steps_->begin();
@ -464,7 +464,7 @@ State::MachineReservation::~MachineReservation()
auto prev = machine->state->currentJobs--;
assert(prev);
if (prev == 1)
machine->state->idleSince = time(nullptr);
machine->state->idleSince = time(0);
{
auto machineTypes_(state.machineTypes.lock());

View file

@ -14,7 +14,7 @@ struct BuildProduct
bool isRegular = false;
std::optional<nix::Hash> sha256hash;
std::optional<off_t> fileSize;
BuildProduct() = default;
BuildProduct() { }
};
struct BuildMetric

View file

@ -105,7 +105,7 @@ State::State(std::optional<std::string> metricsAddrOpt)
: config(std::make_unique<HydraConfig>())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
, localWorkThrottler(static_cast<ptrdiff_t>(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2))))
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
@ -138,7 +138,7 @@ nix::MaintainCount<counter> State::startDbUpdate()
{
if (nrActiveDbUpdates > 6)
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
return {nrActiveDbUpdates};
return MaintainCount<counter>(nrActiveDbUpdates);
}
@ -171,9 +171,9 @@ void State::parseMachines(const std::string & contents)
for (auto & f : mandatoryFeatures)
supportedFeatures.insert(f);
using MaxJobs = std::remove_const_t<decltype(nix::Machine::maxJobs)>;
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
auto machine = std::make_shared<::Machine>(::Machine {{
auto machine = std::make_shared<::Machine>(nix::Machine {
// `storeUri`, not yet used
"",
// `systemTypes`, not yet used
@ -194,11 +194,11 @@ void State::parseMachines(const std::string & contents)
tokens[7] != "" && tokens[7] != "-"
? base64Decode(tokens[7])
: "",
}});
});
machine->sshName = tokens[0];
machine->systemTypesSet = tokenizeString<StringSet>(tokens[1], ",");
machine->speedFactorFloat = static_cast<float>(atof(tokens[4].c_str()));
machine->speedFactorFloat = atof(tokens[4].c_str());
/* Re-use the State object of the previous machine with the
same name. */
@ -412,7 +412,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
}
unsigned int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
{
restart:
@ -594,7 +594,7 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
createDirs(dirOf(lockPath));
auto lock = std::make_shared<PathLocks>();
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return nullptr;
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return 0;
return lock;
}
@ -602,10 +602,10 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
void State::dumpStatus(Connection & conn)
{
time_t now = time(nullptr);
time_t now = time(0);
json statusJson = {
{"status", "up"},
{"time", time(nullptr)},
{"time", time(0)},
{"uptime", now - startedAt},
{"pid", getpid()},
@ -620,7 +620,7 @@ void State::dumpStatus(Connection & conn)
{"bytesReceived", bytesReceived.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / (float) nrBuildsRead},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
{"nrBuildsDone", nrBuildsDone.load()},
{"nrStepsStarted", nrStepsStarted.load()},
{"nrStepsDone", nrStepsDone.load()},
@ -629,7 +629,7 @@ void State::dumpStatus(Connection & conn)
{"nrQueueWakeups", nrQueueWakeups.load()},
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
{"dispatchTimeMs", dispatchTimeMs.load()},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / (float) nrDispatcherWakeups},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
{"nrDbConnections", dbPool.count()},
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
};
@ -649,8 +649,8 @@ void State::dumpStatus(Connection & conn)
if (nrStepsDone) {
statusJson["totalStepTime"] = totalStepTime.load();
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
statusJson["avgStepTime"] = (float) totalStepTime / (float) nrStepsDone;
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / (float) nrStepsDone;
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
}
{
@ -677,8 +677,8 @@ void State::dumpStatus(Connection & conn)
if (m->state->nrStepsDone) {
machine["totalStepTime"] = s->totalStepTime.load();
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
machine["avgStepTime"] = (float) s->totalStepTime / (float) s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / (float) s->nrStepsDone;
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
}
statusJson["machines"][m->sshName] = machine;
}
@ -706,7 +706,7 @@ void State::dumpStatus(Connection & conn)
};
if (i.second.runnable > 0)
machineTypeJson["waitTime"] = i.second.waitTime.count() +
i.second.runnable * (time(nullptr) - lastDispatcherCheck);
i.second.runnable * (time(0) - lastDispatcherCheck);
if (i.second.running == 0)
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
}
@ -732,11 +732,11 @@ void State::dumpStatus(Connection & conn)
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
{"narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / (double) stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0},
{"narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / (double) stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
};
@ -749,20 +749,20 @@ void State::dumpStatus(Connection & conn)
{"putTimeMs", s3Stats.putTimeMs.load()},
{"putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / (double) s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"get", s3Stats.get.load()},
{"getBytes", s3Stats.getBytes.load()},
{"getTimeMs", s3Stats.getTimeMs.load()},
{"getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / (double) s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"head", s3Stats.head.load()},
{"costDollarApprox",
(double) (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ (double) s3Stats.put / 1000.0 * 0.005 +
+ (double) s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
}
}
@ -848,7 +848,7 @@ void State::run(BuildID buildOne)
/* Can't be bothered to shut down cleanly. Goodbye! */
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
startedAt = time(nullptr);
startedAt = time(0);
this->buildOne = buildOne;
auto lock = acquireGlobalLock();

View file

@ -3,7 +3,6 @@
#include "archive.hh"
#include <unordered_set>
#include <utility>
using namespace nix;
@ -19,8 +18,8 @@ struct Extractor : ParseSink
NarMemberData * curMember = nullptr;
Path prefix;
Extractor(NarMemberDatas & members, Path prefix)
: members(members), prefix(std::move(prefix))
Extractor(NarMemberDatas & members, const Path & prefix)
: members(members), prefix(prefix)
{ }
void createDirectory(const Path & path) override

View file

@ -13,7 +13,7 @@ struct NarMemberData
std::optional<nix::Hash> sha256;
};
using NarMemberDatas = std::map<nix::Path, NarMemberData>;
typedef std::map<nix::Path, NarMemberData> NarMemberDatas;
/* Read a NAR from a source and get to some info about every file
inside the NAR. */

View file

@ -4,8 +4,7 @@
#include "thread-pool.hh"
#include <cstring>
#include <utility>
#include <csignal>
#include <signal.h>
using namespace nix;
@ -53,7 +52,7 @@ void State::queueMonitorLoop(Connection & conn)
auto t_after_work = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_running.Increment(
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
/* Sleep until we get notification from the database about an
event. */
@ -80,7 +79,7 @@ void State::queueMonitorLoop(Connection & conn)
auto t_after_sleep = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_waiting.Increment(
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
}
exit(0);
@ -89,7 +88,7 @@ void State::queueMonitorLoop(Connection & conn)
struct PreviousFailure : public std::exception {
Step::ptr step;
PreviousFailure(Step::ptr step) : step(std::move(step)) { }
PreviousFailure(Step::ptr step) : step(step) { }
};
@ -118,7 +117,7 @@ bool State::getQueuedBuilds(Connection & conn,
for (auto const & row : res) {
auto builds_(builds.lock());
auto id = row["id"].as<BuildID>();
BuildID id = row["id"].as<BuildID>();
if (buildOne && id != buildOne) continue;
if (builds_->count(id)) continue;
@ -138,7 +137,7 @@ bool State::getQueuedBuilds(Connection & conn,
newIDs.push_back(id);
newBuildsByID[id] = build;
newBuildsByPath.emplace(build->drvPath, id);
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
}
}
@ -163,7 +162,7 @@ bool State::getQueuedBuilds(Connection & conn,
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
build->id,
(int) bsAborted,
time(nullptr));
time(0));
txn.commit();
build->finishedInDB = true;
nrBuildsDone++;
@ -177,7 +176,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Create steps for this derivation and its dependencies. */
try {
step = createStep(destStore, conn, build, build->drvPath,
build, nullptr, finishedDrvs, newSteps, newRunnable);
build, 0, finishedDrvs, newSteps, newRunnable);
} catch (PreviousFailure & ex) {
/* Some step previously failed, so mark the build as
@ -222,7 +221,7 @@ bool State::getQueuedBuilds(Connection & conn,
"where id = $1 and finished = 0",
build->id,
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
time(nullptr));
time(0));
notifyBuildFinished(txn, build->id, {});
txn.commit();
build->finishedInDB = true;
@ -255,7 +254,7 @@ bool State::getQueuedBuilds(Connection & conn,
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
time_t now = time(nullptr);
time_t now = time(0);
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
markSucceededBuild(txn, build, res, true, now, now);
@ -356,7 +355,7 @@ void State::processQueueChange(Connection & conn)
pqxx::work txn(conn);
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
for (auto const & row : res)
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<int>();
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<BuildID>();
}
{
@ -439,7 +438,7 @@ Step::ptr State::createStep(ref<Store> destStore,
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
{
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return nullptr;
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
/* Check if the requested step already exists. If not, create a
new step. In any case, make the step reachable from
@ -517,7 +516,7 @@ Step::ptr State::createStep(ref<Store> destStore,
std::map<DrvOutput, std::optional<StorePath>> paths;
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
auto outputHash = outputHashes.at(outputName);
paths.insert({{.drvHash=outputHash, .outputName=outputName}, maybeOutputPath});
paths.insert({{outputHash, outputName}, maybeOutputPath});
}
auto missing = getMissingRemotePaths(destStore, paths);
@ -561,7 +560,7 @@ Step::ptr State::createStep(ref<Store> destStore,
auto & path = *pathOpt;
try {
time_t startTime = time(nullptr);
time_t startTime = time(0);
if (localStore->isValidPath(path))
printInfo("copying output %1% of %2% from local store",
@ -579,7 +578,7 @@ Step::ptr State::createStep(ref<Store> destStore,
StorePathSet { path },
NoRepair, CheckSigs, NoSubstitute);
time_t stopTime = time(nullptr);
time_t stopTime = time(0);
{
auto mc = startDbUpdate();
@ -603,7 +602,7 @@ Step::ptr State::createStep(ref<Store> destStore,
// FIXME: check whether all outputs are in the binary cache.
if (valid) {
finishedDrvs.insert(drvPath);
return nullptr;
return 0;
}
/* No, we need to build. */
@ -611,7 +610,7 @@ Step::ptr State::createStep(ref<Store> destStore,
/* Create steps for the dependencies. */
for (auto & i : step->drv->inputDrvs.map) {
auto dep = createStep(destStore, conn, build, i.first, nullptr, step, finishedDrvs, newSteps, newRunnable);
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
if (dep) {
auto step_(step->state.lock());
step_->deps.insert(dep);
@ -659,11 +658,11 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
auto res2 = txn.exec_params
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
time(nullptr) - Jobset::schedulingWindow * 10,
time(0) - Jobset::schedulingWindow * 10,
jobsetID);
for (auto const & row : res2) {
auto startTime = row["startTime"].as<time_t>();
auto stopTime = row["stopTime"].as<time_t>();
time_t startTime = row["startTime"].as<time_t>();
time_t stopTime = row["stopTime"].as<time_t>();
jobset->addStep(startTime, stopTime - startTime);
}
@ -703,7 +702,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
localStore->printStorePath(output));
if (r.empty()) continue;
auto id = r[0][0].as<BuildID>();
BuildID id = r[0][0].as<BuildID>();
printInfo("reusing build %d", id);

View file

@ -8,7 +8,6 @@
#include <queue>
#include <regex>
#include <semaphore>
#include <utility>
#include <prometheus/counter.h>
#include <prometheus/gauge.h>
@ -27,16 +26,16 @@
#include "machines.hh"
using BuildID = unsigned int;
typedef unsigned int BuildID;
using JobsetID = unsigned int;
typedef unsigned int JobsetID;
using system_time = std::chrono::time_point<std::chrono::system_clock>;
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
using counter = std::atomic<unsigned long>;
typedef std::atomic<unsigned long> counter;
enum BuildStatus {
typedef enum {
bsSuccess = 0,
bsFailed = 1,
bsDepFailed = 2, // builds only
@ -50,10 +49,10 @@ enum BuildStatus {
bsNarSizeLimitExceeded = 11,
bsNotDeterministic = 12,
bsBusy = 100, // not stored
};
} BuildStatus;
enum StepState {
typedef enum {
ssPreparing = 1,
ssConnecting = 10,
ssSendingInputs = 20,
@ -61,7 +60,7 @@ enum StepState {
ssWaitingForLocalSlot = 35,
ssReceivingOutputs = 40,
ssPostProcessing = 50,
};
} StepState;
struct RemoteResult
@ -79,7 +78,7 @@ struct RemoteResult
unsigned int overhead = 0;
nix::Path logFile;
[[nodiscard]] BuildStatus buildStatus() const
BuildStatus buildStatus() const
{
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
}
@ -96,10 +95,10 @@ class Jobset
{
public:
using ptr = std::shared_ptr<Jobset>;
using wptr = std::weak_ptr<Jobset>;
typedef std::shared_ptr<Jobset> ptr;
typedef std::weak_ptr<Jobset> wptr;
static const time_t schedulingWindow = static_cast<time_t>(24 * 60 * 60);
static const time_t schedulingWindow = 24 * 60 * 60;
private:
@ -116,7 +115,7 @@ public:
return (double) seconds / shares;
}
void setShares(unsigned int shares_)
void setShares(int shares_)
{
assert(shares_ > 0);
shares = shares_;
@ -132,8 +131,8 @@ public:
struct Build
{
using ptr = std::shared_ptr<Build>;
using wptr = std::weak_ptr<Build>;
typedef std::shared_ptr<Build> ptr;
typedef std::weak_ptr<Build> wptr;
BuildID id;
nix::StorePath drvPath;
@ -164,8 +163,8 @@ struct Build
struct Step
{
using ptr = std::shared_ptr<Step>;
using wptr = std::weak_ptr<Step>;
typedef std::shared_ptr<Step> ptr;
typedef std::weak_ptr<Step> wptr;
nix::StorePath drvPath;
std::unique_ptr<nix::Derivation> drv;
@ -222,8 +221,13 @@ struct Step
nix::Sync<State> state;
Step(nix::StorePath drvPath) : drvPath(std::move(drvPath))
Step(const nix::StorePath & drvPath) : drvPath(drvPath)
{ }
~Step()
{
//printMsg(lvlError, format("destroying step %1%") % drvPath);
}
};
@ -235,7 +239,7 @@ void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
struct Machine : nix::Machine
{
using ptr = std::shared_ptr<Machine>;
typedef std::shared_ptr<Machine> ptr;
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
we are not yet used to, but once we are, we don't need this. */
@ -250,7 +254,7 @@ struct Machine : nix::Machine
float speedFactorFloat = 1.0;
struct State {
using ptr = std::shared_ptr<State>;
typedef std::shared_ptr<State> ptr;
counter currentJobs{0};
counter nrStepsDone{0};
counter totalStepTime{0}; // total time for steps, including closure copying
@ -354,22 +358,22 @@ private:
bool useSubstitutes = false;
/* The queued builds. */
using Builds = std::map<BuildID, Build::ptr>;
typedef std::map<BuildID, Build::ptr> Builds;
nix::Sync<Builds> builds;
/* The jobsets. */
using Jobsets = std::map<std::pair<std::string, std::string>, Jobset::ptr>;
typedef std::map<std::pair<std::string, std::string>, Jobset::ptr> Jobsets;
nix::Sync<Jobsets> jobsets;
/* All active or pending build steps (i.e. dependencies of the
queued builds). Note that these are weak pointers. Steps are
kept alive by being reachable from Builds or by being in
progress. */
using Steps = std::map<nix::StorePath, Step::wptr>;
typedef std::map<nix::StorePath, Step::wptr> Steps;
nix::Sync<Steps> steps;
/* Build steps that have no unbuilt dependencies. */
using Runnable = std::list<Step::wptr>;
typedef std::list<Step::wptr> Runnable;
nix::Sync<Runnable> runnable;
/* CV for waking up the dispatcher. */
@ -381,7 +385,7 @@ private:
/* The build machines. */
std::mutex machinesReadyLock;
using Machines = std::map<std::string, Machine::ptr>;
typedef std::map<std::string, Machine::ptr> Machines;
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
/* Throttler for CPU-bound local work. */
@ -427,7 +431,7 @@ private:
struct MachineReservation
{
using ptr = std::shared_ptr<MachineReservation>;
typedef std::shared_ptr<MachineReservation> ptr;
State & state;
Step::ptr step;
Machine::ptr machine;
@ -530,7 +534,7 @@ private:
void finishBuildStep(pqxx::work & txn, const RemoteResult & result, BuildID buildId, unsigned int stepNr,
const std::string & machine);
unsigned int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
@ -590,7 +594,6 @@ private:
enum StepResult { sDone, sRetry, sMaybeCancelled };
StepResult doBuildStep(nix::ref<nix::Store> destStore,
MachineReservation::ptr & reservation,
Connection & conn,
std::shared_ptr<ActiveStep> activeStep);
void buildRemote(nix::ref<nix::Store> destStore,
@ -619,6 +622,8 @@ private:
void addRoot(const nix::StorePath & storePath);
void runMetricsExporter();
public:
void showStatus();

View file

@ -242,35 +242,23 @@ sub push : Chained('api') PathPart('push') Args(0) {
$c->{stash}->{json}->{jobsetsTriggered} = [];
my $force = exists $c->request->query_params->{force};
my @jobsetNames = split /,/, ($c->request->query_params->{jobsets} // "");
my @jobsets;
foreach my $s (@jobsetNames) {
my @jobsets = split /,/, ($c->request->query_params->{jobsets} // "");
foreach my $s (@jobsets) {
my ($p, $j) = parseJobsetName($s);
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
push @jobsets, $jobset if defined $jobset;
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
triggerJobset($self, $c, $jobset, $force);
}
my @repos = split /,/, ($c->request->query_params->{repos} // "");
foreach my $r (@repos) {
foreach ($c->model('DB::Jobsets')->search(
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
{ 'project.enabled' => 1, 'me.enabled' => 1 },
{
join => 'project',
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
order_by => 'me.id DESC'
})) {
push @jobsets, $_;
}
}
foreach my $jobset (@jobsets) {
requireRestartPrivileges($c, $jobset->project);
}
foreach my $jobset (@jobsets) {
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
triggerJobset($self, $c, $jobset, $force);
});
}
$self->status_ok(
@ -285,7 +273,7 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
$c->{stash}->{json}->{jobsetsTriggered} = [];
my $in = $c->request->{data};
my $owner = $in->{repository}->{owner}->{login} or die;
my $owner = $in->{repository}->{owner}->{name} or die;
my $repo = $in->{repository}->{name} or die;
print STDERR "got push from GitHub repository $owner/$repo\n";
@ -297,23 +285,6 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
$c->response->body("");
}
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
my ($self, $c) = @_;
$c->{stash}->{json}->{jobsetsTriggered} = [];
my $in = $c->request->{data};
my $url = $in->{repository}->{clone_url} or die;
$url =~ s/.git$//;
print STDERR "got push from Gitea repository $url\n";
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
{ 'project.enabled' => 1, 'me.enabled' => 1 },
{ join => 'project'
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
});
$c->response->body("");
}
1;

View file

@ -35,7 +35,6 @@ sub noLoginNeeded {
return $whitelisted ||
$c->request->path eq "api/push-github" ||
$c->request->path eq "api/push-gitea" ||
$c->request->path eq "google-login" ||
$c->request->path eq "github-redirect" ||
$c->request->path eq "github-login" ||
@ -81,7 +80,7 @@ sub begin :Private {
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
# XSRF protection: require POST requests to have the same origin.
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
my $referer = $c->req->header('Referer');
$referer //= $c->req->header('Origin');
my $base = $c->req->base;

View file

@ -412,7 +412,8 @@ sub readIntoSocket{
my $sock;
eval {
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
my $x= join(" ", @{$args{cmd}});
open($sock, "-|", $x) or die q(failed to open socket from command:\n $x);
};
return $sock;

View file

@ -6,7 +6,7 @@
-- add a map of the lowercase name of your table to the CamelCase
-- version of your table.
--
-- 3. Run `just update-dbix` in the root
-- 3. Run `make -C src/sql update-dbix` in the root
-- of the project directory, and git add / git commit the changed,
-- generated files.
--

View file

@ -35,17 +35,6 @@ my $queuedBuilds = $ctx->makeAndEvaluateJobset(
build => 0
);
# Login and save cookie for future requests
my $req = request(POST '/login',
Referer => 'http://localhost/',
Content => {
username => 'root',
password => 'rootPassword'
}
);
is($req->code, 302, "Logging in gets a 302");
my $cookie = $req->header("set-cookie");
subtest "/api/queue" => sub {
my $response = request(GET '/api/queue?nr=1');
ok($response->is_success, "The API enpdoint showing the queue returns 200.");
@ -113,17 +102,6 @@ subtest "/api/nrbuilds" => sub {
};
subtest "/api/push" => sub {
subtest "without authentication" => sub {
my $build = $finishedBuilds->{"one_job"};
my $jobset = $build->jobset;
my $projectName = $jobset->project->name;
my $jobsetName = $jobset->name;
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1");
is($response->code, 403, "The API enpdoint for triggering jobsets requires authentication.");
};
subtest "with a specific jobset" => sub {
my $build = $finishedBuilds->{"one_job"};
my $jobset = $build->jobset;
@ -131,8 +109,7 @@ subtest "/api/push" => sub {
my $jobsetName = $jobset->name;
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1",
Cookie => $cookie);
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1");
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
my $data = is_json($response);
@ -151,8 +128,7 @@ subtest "/api/push" => sub {
print STDERR $repo;
my $response = request(GET "/api/push?repos=$repo&force=1",
Cookie => $cookie);
my $response = request(GET "/api/push?repos=$repo&force=1");
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
my $data = is_json($response);
@ -196,7 +172,7 @@ subtest "/api/push-github" => sub {
"Content" => encode_json({
repository => {
owner => {
login => "OWNER",
name => "OWNER",
},
name => "LEGACY-REPO",
}
@ -222,7 +198,7 @@ subtest "/api/push-github" => sub {
"Content" => encode_json({
repository => {
owner => {
login => "OWNER",
name => "OWNER",
},
name => "FLAKE-REPO",
}

View file

@ -11,14 +11,20 @@ my $ctx = test_context();
Catalyst::Test->import('Hydra');
$ctx->db(); # Ensure DB initialization.
my $user = $ctx->db()->resultset('Users')->create({
username => 'alice',
emailaddress => 'root@invalid.org',
password => '!'
});
$user->setPassword('foobar');
$user->userroles->update_or_create({ role => 'admin' });
# Login and save cookie for future requests
my $req = request(POST '/login',
Referer => 'http://localhost/',
Content => {
username => 'root',
password => 'rootPassword'
username => 'alice',
password => 'foobar'
}
);
is($req->code, 302, "Logging in gets a 302");

View file

@ -115,13 +115,11 @@ sub db {
$self->{_db} = Hydra::Model::DB->new();
if (!(defined $setup && $setup == 0)) {
my $user = $self->{_db}->resultset('Users')->create({
$self->{_db}->resultset('Users')->create({
username => "root",
emailaddress => 'root@invalid.org',
password => '!'
password => ''
});
$user->setPassword('rootPassword');
$user->userroles->update_or_create({ role => 'admin' });
}
}