forked from lix-project/hydra
Compare commits
30 commits
Author | SHA1 | Date | |
---|---|---|---|
Maximilian Bosch | ee1234c15c | ||
Maximilian Bosch | 7c7078cccf | ||
Maximilian Bosch | a5099d9e80 | ||
799441dcf6 | |||
e4d466ffcd | |||
raito | d3257e4761 | ||
Ilya K | f23ec71227 | ||
leo60228 | ac37e44982 | ||
Maximilian Bosch | 6a88e647e7 | ||
Pierre Bourdon | 8d5d4942e1 | ||
Pierre Bourdon | e5a8ee5c17 | ||
Pierre Bourdon | fd7fd0ad65 | ||
Pierre Bourdon | d3fcedbcf5 | ||
Pierre Bourdon | 3891ad77e3 | ||
Pierre Bourdon | 21fd1f8993 | ||
emily | ab6d81fad4 | ||
64df0cba47 | |||
6179b298cb | |||
Pierre Bourdon | 44b9a7b95d | ||
Maximilian Bosch | 3ee51dbe58 | ||
Maximilian Bosch | e987f74954 | ||
Maximilian Bosch | 1f802c008c | ||
Maximilian Bosch | 3a4e0d4917 | ||
Maximilian Bosch | 3517acc5ba | ||
71rd | 459aa0a598 | ||
eldritch horrors | f1b552ecbf | ||
Pierre Bourdon | db8c2cc4a8 | ||
8858abb1a6 | |||
ef619eca99 | |||
41dfa0e443 |
12
.clang-tidy
Normal file
12
.clang-tidy
Normal file
|
@ -0,0 +1,12 @@
|
|||
UseColor: true
|
||||
Checks:
|
||||
- -*
|
||||
|
||||
- bugprone-*
|
||||
# kind of nonsense
|
||||
- -bugprone-easily-swappable-parameters
|
||||
# many warnings due to not recognizing `assert` properly
|
||||
- -bugprone-unchecked-optional-access
|
||||
|
||||
- modernize-*
|
||||
- -modernize-use-trailing-return-type
|
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,37 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Hydra Server:**
|
||||
|
||||
Please fill out this data as well as you can, but don't worry if you can't -- just do your best.
|
||||
|
||||
- OS and version: [e.g. NixOS 22.05.20211203.ee3794c]
|
||||
- Version of Hydra
|
||||
- Version of Nix Hydra is built against
|
||||
- Version of the Nix daemon
|
||||
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
|
@ -1,6 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
14
.github/workflows/test.yml
vendored
14
.github/workflows/test.yml
vendored
|
@ -1,14 +0,0 @@
|
|||
name: "Test"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: cachix/install-nix-action@v17
|
||||
#- run: nix flake check
|
||||
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -5,3 +5,5 @@
|
|||
/src/sql/tmp.sqlite
|
||||
result
|
||||
result-*
|
||||
.hydra-data
|
||||
outputs
|
||||
|
|
27
README.md
27
README.md
|
@ -78,11 +78,11 @@ $ nix-build
|
|||
### Development Environment
|
||||
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ autoreconfPhase
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
$ nix develop
|
||||
[nix-shell]$ just setup
|
||||
[nix-shell]$ just install
|
||||
```
|
||||
|
||||
### Executing Hydra During Development
|
||||
|
@ -91,10 +91,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
|||
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ # hack hack
|
||||
$ make
|
||||
$ foreman start
|
||||
$ nix develop
|
||||
[nix-shell]$ just install
|
||||
[nix-shell]$ foreman start
|
||||
```
|
||||
|
||||
Have a look at the [Procfile](./Procfile) if you want to see how the processes are being started. In order to avoid
|
||||
|
@ -115,22 +114,22 @@ Start by following the steps in [Development Environment](#development-environme
|
|||
Then, you can run the tests and the perlcritic linter together with:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
$ make check
|
||||
$ nix develop
|
||||
[nix-shell]$ just test
|
||||
```
|
||||
|
||||
You can run a single test with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ yath test ./t/foo/bar.t
|
||||
$ nix develop
|
||||
[nix-shell]$ yath test ./t/foo/bar.t
|
||||
```
|
||||
|
||||
And you can run just perlcritic with:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ make perlcritic
|
||||
$ nix develop
|
||||
[nix-shell]$ just perlcritic
|
||||
```
|
||||
|
||||
### JSON API
|
||||
|
|
|
@ -1,122 +0,0 @@
|
|||
* Recreating the schema bindings:
|
||||
|
||||
$ make -C src/sql update-dbix
|
||||
|
||||
* Running the test server:
|
||||
|
||||
$ DBIC_TRACE=1 ./script/hydra_server.pl
|
||||
|
||||
* Setting the maximum number of concurrent builds per system type:
|
||||
|
||||
$ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
||||
|
||||
* Creating a user:
|
||||
|
||||
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
|
||||
--password-prompt
|
||||
|
||||
(Replace "foobar" with the desired password.)
|
||||
|
||||
To make the user an admin:
|
||||
|
||||
$ hydra-create-user root --role admin
|
||||
|
||||
To enable a non-admin user to create projects:
|
||||
|
||||
$ hydra-create-user root --role create-projects
|
||||
|
||||
* Changing the priority of a scheduled build:
|
||||
|
||||
update buildschedulinginfo set priority = 200 where id = <ID>;
|
||||
|
||||
* Changing the priority of all builds for a jobset:
|
||||
|
||||
update buildschedulinginfo set priority = 20 where id in (select id from builds where finished = 0 and project = 'nixpkgs' and jobset = 'trunk');
|
||||
|
||||
|
||||
* Steps to install:
|
||||
|
||||
- Install the Hydra closure.
|
||||
|
||||
- Set HYDRA_DATA to /somewhere.
|
||||
|
||||
- Run hydra_init.pl
|
||||
|
||||
- Start hydra_server
|
||||
|
||||
- Visit http://localhost:3000/
|
||||
|
||||
- Create a user (see above)
|
||||
|
||||
- Create a project, jobset etc.
|
||||
|
||||
- Start hydra_evaluator and hydra_queue_runner
|
||||
|
||||
|
||||
* Job selection:
|
||||
|
||||
php-sat:build [system = "i686-linux"]
|
||||
php-sat:build [same system]
|
||||
tarball [same patchelfSrc]
|
||||
--if system i686-linux --arg build {...}
|
||||
|
||||
|
||||
* Restart all aborted builds in a given evaluation (e.g. 820909):
|
||||
|
||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus = 3 and exists (select 1 from jobsetevalmembers where eval = 820909 and build = id));
|
||||
|
||||
|
||||
* Restart all builds in a given evaluation that had a build step time out:
|
||||
|
||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus != 0 and exists (select 1 from jobsetevalmembers where eval = 926992 and build = id) and exists (select 1 from buildsteps where build = id and status = 7));
|
||||
|
||||
|
||||
* select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) x join builds y on x.timestamp = y.timestamp and x.project = y.project and x.jobset = y.jobset and x.job = y.job and x.system = y.system;
|
||||
|
||||
select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) natural join builds;
|
||||
|
||||
|
||||
* Delete all scheduled builds that are not already building:
|
||||
|
||||
delete from builds where finished = 0 and not exists (select 1 from buildschedulinginfo s where s.id = builds.id and busy != 0);
|
||||
|
||||
|
||||
* select x.project, x.jobset, x.job, x.system, x.id, x.timestamp, r.buildstatus, b.id, b.timestamp
|
||||
from (select project, jobset, job, system, max(id) as id from Builds where finished = 1 group by project, jobset, job, system) as a_
|
||||
natural join Builds x
|
||||
natural join BuildResultInfo r
|
||||
left join Builds b on b.id =
|
||||
(select max(id) from builds c
|
||||
natural join buildresultinfo r2
|
||||
where x.project = c.project and x.jobset = c.jobset and x.job = c.job and x.system = c.system
|
||||
and x.id > c.id and r.buildstatus != r2.buildstatus);
|
||||
|
||||
* Using PostgreSQL (version 9.2 or newer is required):
|
||||
|
||||
$ HYDRA_DBI="dbi:Pg:dbname=hydra;" hydra-server
|
||||
|
||||
|
||||
* Find the builds with the highest number of build steps:
|
||||
|
||||
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;
|
||||
|
||||
|
||||
* Evaluating the NixOS Hydra jobs:
|
||||
|
||||
$ ./hydra_eval_jobs ~/Dev/nixos-wc/release.nix --arg nixpkgs '{outPath = /home/eelco/Dev/nixpkgs-wc;}' --arg nixosSrc '{outPath = /home/eelco/Dev/nixos-wc; rev = 1234;}' --arg services '{outhPath = /home/eelco/services-wc;}' --argstr system i686-linux --argstr system x86_64-linux --arg officialRelease false
|
||||
|
||||
|
||||
* Show all the failing jobs/systems in the nixpkgs:stdenv jobset that
|
||||
succeed in the nixpkgs:trunk jobset:
|
||||
|
||||
select job, system from builds b natural join buildresultinfo where project = 'nixpkgs' and jobset = 'stdenv' and iscurrent = 1 and finished = 1 and buildstatus != 0 and exists (select 1 from builds natural join buildresultinfo where project = 'nixpkgs' and jobset = 'trunk' and job = b.job and system = b.system and iscurrent = 1 and finished = 1 and buildstatus = 0) order by job, system;
|
||||
|
||||
|
||||
* Get all Nixpkgs jobs that have never built succesfully:
|
||||
|
||||
select project, jobset, job from builds b1
|
||||
where project = 'nixpkgs' and jobset = 'trunk' and iscurrent = 1
|
||||
group by project, jobset, job
|
||||
having not exists
|
||||
(select 1 from builds b2 where b1.project = b2.project and b1.jobset = b2.jobset and b1.job = b2.job and finished = 1 and buildstatus = 0)
|
||||
order by project, jobset, job;
|
|
@ -12,15 +12,14 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
|
|||
and dependencies can be found:
|
||||
|
||||
```console
|
||||
$ nix-shell
|
||||
$ nix develop
|
||||
```
|
||||
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ autoreconfPhase
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
[nix-shell]$ just setup
|
||||
[nix-shell]$ just install
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
|
@ -30,6 +29,8 @@ foreman:
|
|||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
|
@ -39,18 +40,13 @@ $ ./src/script/hydra-server
|
|||
You can run Hydra's test suite with the following:
|
||||
|
||||
```console
|
||||
[nix-shell]$ make check
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
||||
[nix-shell]$ just test
|
||||
[nix-shell]$ # or run yath directly:
|
||||
[nix-shell]$ yath test
|
||||
[nix-shell]$ # to run as many tests as you have cores:
|
||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
||||
```
|
||||
|
||||
When using `yath` instead of `make check`, ensure you have run `make`
|
||||
in the root of the repository at least once.
|
||||
|
||||
**Warning**: Currently, the tests can fail
|
||||
if run with high parallelism [due to an issue in
|
||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||
|
@ -101,3 +97,14 @@ Off NixOS, change `/etc/nix/nix.conf`:
|
|||
```conf
|
||||
trusted-users = root YOURUSERNAME
|
||||
```
|
||||
|
||||
### Updating schema bindings
|
||||
|
||||
```
|
||||
just update-dbix
|
||||
```
|
||||
|
||||
### Find the builds with the highest number of build steps:
|
||||
|
||||
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;
|
||||
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# Webhooks
|
||||
|
||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
||||
click on `Add webhook`.
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
|
@ -11,3 +14,14 @@ click on `Add webhook`.
|
|||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
|
46
flake.lock
46
flake.lock
|
@ -24,11 +24,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1719994518,
|
||||
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=",
|
||||
"lastModified": 1722555600,
|
||||
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7",
|
||||
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -48,11 +48,11 @@
|
|||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1721091462,
|
||||
"narHash": "sha256-0cmEeoOiB91BviTJHzIyxkY+Gxv3O8ZnnExVAoXEFGI=",
|
||||
"lastModified": 1728163191,
|
||||
"narHash": "sha256-SW0IEBsPN1EysqzvfDT+8Kimtzy03O1BxQQm7ZB6fRY=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "6b4d46e9e0e1dd80e0977684ab20d14bcd1a6bc3",
|
||||
"revCount": 15967,
|
||||
"rev": "ed9b7f4f84fd60ad8618645cc1bae2d686ff0db6",
|
||||
"revCount": 16323,
|
||||
"type": "git",
|
||||
"url": "https://git.lix.systems/lix-project/lix"
|
||||
},
|
||||
|
@ -74,11 +74,11 @@
|
|||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1721195872,
|
||||
"narHash": "sha256-TlvRq634MSl22BWLmpTy2vdtKntbZlsUwdMq8Mp9AWs=",
|
||||
"lastModified": 1723579251,
|
||||
"narHash": "sha256-xnHtfw0gRhV+2S9U7hQwvp2klTy1Iv7FlMMO0/WiMVc=",
|
||||
"ref": "refs/heads/main",
|
||||
"rev": "c057494450f2d1420726ddb0bab145a5ff4ddfdd",
|
||||
"revCount": 608,
|
||||
"rev": "42a160bce2fd9ffebc3809746bc80cc7208f9b08",
|
||||
"revCount": 609,
|
||||
"type": "git",
|
||||
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
|
||||
},
|
||||
|
@ -111,11 +111,11 @@
|
|||
"nix2container": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1712990762,
|
||||
"narHash": "sha256-hO9W3w7NcnYeX8u8cleHiSpK2YJo7ecarFTUlbybl7k=",
|
||||
"lastModified": 1720642556,
|
||||
"narHash": "sha256-qsnqk13UmREKmRT7c8hEnz26X3GFFyIQrqx4EaRc1Is=",
|
||||
"owner": "nlewo",
|
||||
"repo": "nix2container",
|
||||
"rev": "20aad300c925639d5d6cbe30013c8357ce9f2a2e",
|
||||
"rev": "3853e5caf9ad24103b13aa6e0e8bcebb47649fe4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -126,11 +126,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1720691131,
|
||||
"narHash": "sha256-CWT+KN8aTPyMIx8P303gsVxUnkinIz0a/Cmasz1jyIM=",
|
||||
"lastModified": 1728193676,
|
||||
"narHash": "sha256-PbDWAIjKJdlVg+qQRhzdSor04bAPApDqIv2DofTyynk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a046c1202e11b62cbede5385ba64908feb7bfac4",
|
||||
"rev": "ecbc1ca8ffd6aea8372ad16be9ebbb39889e55b6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -159,11 +159,11 @@
|
|||
"pre-commit-hooks": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1712055707,
|
||||
"narHash": "sha256-4XLvuSIDZJGS17xEwSrNuJLL7UjDYKGJSbK1WWX2AK8=",
|
||||
"lastModified": 1721042469,
|
||||
"narHash": "sha256-6FPUl7HVtvRHCCBQne7Ylp4p+dpP3P/OYuzjztZ4s70=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "e35aed5fda3cc79f88ed7f1795021e559582093a",
|
||||
"rev": "f451c19376071a90d8c58ab1a953c6e9840527fd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -187,11 +187,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1721059077,
|
||||
"narHash": "sha256-gCICMMX7VMSKKt99giDDtRLkHJ0cwSgBtDijJAqTlto=",
|
||||
"lastModified": 1723454642,
|
||||
"narHash": "sha256-S0Gvsenh0II7EAaoc9158ZB4vYyuycvMGKGxIbERNAM=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "0fb28f237f83295b4dd05e342f333b447c097398",
|
||||
"rev": "349de7bc435bdff37785c2466f054ed1766173be",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
15
flake.nix
15
flake.nix
|
@ -73,6 +73,21 @@
|
|||
default = pkgsBySystem.${system}.hydra;
|
||||
});
|
||||
|
||||
devShells = forEachSystem (system: let
|
||||
pkgs = pkgsBySystem.${system};
|
||||
lib = pkgs.lib;
|
||||
|
||||
mkDevShell = stdenv: (pkgs.mkShell.override { inherit stdenv; }) {
|
||||
inputsFrom = [ (self.packages.${system}.default.override { inherit stdenv; }) ];
|
||||
|
||||
packages =
|
||||
lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) pkgs.clang-tools;
|
||||
};
|
||||
in {
|
||||
default = mkDevShell pkgs.stdenv;
|
||||
clang = mkDevShell pkgs.clangStdenv;
|
||||
});
|
||||
|
||||
nixosModules = import ./nixos-modules {
|
||||
overlays = overlayList;
|
||||
};
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
# wait for hydra-server to listen
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-evaluator
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-evaluator
|
||||
|
|
|
@ -28,4 +28,4 @@ use-substitutes = true
|
|||
</hydra_notify>
|
||||
EOF
|
||||
fi
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333 --restart --debug
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-dev-server --port 63333 --restart --debug
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
# wait for hydra-server to listen
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-notify
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-notify
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
# wait until hydra is listening on port 63333
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-queue-runner
|
||||
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-queue-runner
|
||||
|
|
17
justfile
Normal file
17
justfile
Normal file
|
@ -0,0 +1,17 @@
|
|||
setup *OPTIONS:
|
||||
meson setup build --prefix="$PWD/outputs/out" $mesonFlags {{ OPTIONS }}
|
||||
|
||||
build *OPTIONS:
|
||||
meson compile -C build {{ OPTIONS }}
|
||||
|
||||
install *OPTIONS: (build OPTIONS)
|
||||
meson install -C build
|
||||
|
||||
test *OPTIONS:
|
||||
meson test -C build --print-errorlogs {{ OPTIONS }}
|
||||
|
||||
update-dbix:
|
||||
cd src/sql && ./update-dbix-harness.sh
|
||||
|
||||
perlcritic:
|
||||
perlcritic .
|
|
@ -37,6 +37,7 @@
|
|||
|
||||
, cacert
|
||||
, foreman
|
||||
, just
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
|
@ -190,6 +191,8 @@ stdenv.mkDerivation (finalAttrs: {
|
|||
postgresql_13
|
||||
pixz
|
||||
nix-eval-jobs
|
||||
perlPackages.PLS
|
||||
just
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
|
@ -233,8 +236,8 @@ stdenv.mkDerivation (finalAttrs: {
|
|||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
PATH=$(pwd)/outputs/out/bin:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$(pwd)/t/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
|
|
|
@ -14,11 +14,12 @@
|
|||
#include <sys/wait.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <utility>
|
||||
|
||||
using namespace nix;
|
||||
using boost::format;
|
||||
|
||||
typedef std::pair<std::string, std::string> JobsetName;
|
||||
using JobsetName = std::pair<std::string, std::string>;
|
||||
|
||||
class JobsetId {
|
||||
public:
|
||||
|
@ -28,8 +29,8 @@ class JobsetId {
|
|||
int id;
|
||||
|
||||
|
||||
JobsetId(const std::string & project, const std::string & jobset, int id)
|
||||
: project{ project }, jobset{ jobset }, id{ id }
|
||||
JobsetId(std::string project, std::string jobset, int id)
|
||||
: project{std::move( project )}, jobset{std::move( jobset )}, id{ id }
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -41,7 +42,7 @@ class JobsetId {
|
|||
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
[[nodiscard]] std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
|
@ -88,11 +89,11 @@ struct Evaluator
|
|||
JobsetId name;
|
||||
std::optional<EvaluationStyle> evaluation_style;
|
||||
time_t lastCheckedTime, triggerTime;
|
||||
int checkInterval;
|
||||
time_t checkInterval;
|
||||
Pid pid;
|
||||
};
|
||||
|
||||
typedef std::map<JobsetId, Jobset> Jobsets;
|
||||
using Jobsets = std::map<JobsetId, Jobset>;
|
||||
|
||||
std::optional<JobsetName> evalOne;
|
||||
|
||||
|
@ -138,13 +139,15 @@ struct Evaluator
|
|||
|
||||
if (evalOne && name != *evalOne) continue;
|
||||
|
||||
auto res = state->jobsets.try_emplace(name, Jobset{name});
|
||||
auto res = state->jobsets.try_emplace(name, Jobset{.name=name});
|
||||
|
||||
auto & jobset = res.first->second;
|
||||
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
|
||||
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
|
||||
jobset.checkInterval = row["checkInterval"].as<time_t>();
|
||||
switch (row["jobset_enabled"].as<int>(0)) {
|
||||
|
||||
int eval_style = row["jobset_enabled"].as<int>(0);
|
||||
switch (eval_style) {
|
||||
case 1:
|
||||
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
|
||||
break;
|
||||
|
@ -154,6 +157,9 @@ struct Evaluator
|
|||
case 3:
|
||||
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
|
||||
break;
|
||||
default:
|
||||
// Disabled or unknown. Leave as nullopt.
|
||||
break;
|
||||
}
|
||||
|
||||
seen.insert(name);
|
||||
|
@ -175,7 +181,7 @@ struct Evaluator
|
|||
|
||||
void startEval(State & state, Jobset & jobset)
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
|
||||
printInfo("starting evaluation of jobset ‘%s’ (last checked %d s ago)",
|
||||
jobset.name.display(),
|
||||
|
@ -228,7 +234,7 @@ struct Evaluator
|
|||
return false;
|
||||
}
|
||||
|
||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
|
||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(nullptr)) {
|
||||
// Time to schedule a fresh evaluation. If the jobset
|
||||
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
|
||||
// has no remaining, unfinished work.
|
||||
|
@ -301,7 +307,7 @@ struct Evaluator
|
|||
|
||||
/* Put jobsets in order of ascending trigger time, last checked
|
||||
time, and name. */
|
||||
std::sort(sorted.begin(), sorted.end(),
|
||||
std::ranges::sort(sorted,
|
||||
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
|
||||
return
|
||||
a->second.triggerTime != b->second.triggerTime
|
||||
|
@ -324,7 +330,7 @@ struct Evaluator
|
|||
|
||||
while (true) {
|
||||
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
|
||||
std::chrono::seconds sleepTime = std::chrono::seconds::max();
|
||||
|
||||
|
@ -411,7 +417,7 @@ struct Evaluator
|
|||
printInfo("evaluation of jobset ‘%s’ %s",
|
||||
jobset.name.display(), statusToString(status));
|
||||
|
||||
auto now = time(0);
|
||||
auto now = time(nullptr);
|
||||
|
||||
jobset.triggerTime = notTriggered;
|
||||
jobset.lastCheckedTime = now;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <ranges>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -41,6 +42,7 @@ static Strings extraStoreArgs(std::string & machine)
|
|||
}
|
||||
} catch (BadURL &) {
|
||||
// We just try to continue with `machine->sshName` here for backwards compat.
|
||||
printMsg(lvlWarn, "could not parse machine URL '%s', passing through to SSH", machine);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -133,8 +135,8 @@ static void copyClosureTo(
|
|||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
if (!present.count(*i)) missing.insert(*i);
|
||||
for (auto & i : std::ranges::reverse_view(sorted))
|
||||
if (!present.count(i)) missing.insert(i);
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
|
@ -304,12 +306,12 @@ static BuildResult performBuild(
|
|||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
startTime = time(nullptr);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
stopTime = time(nullptr);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
|
@ -338,10 +340,10 @@ static BuildResult performBuild(
|
|||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
auto drvOutput = DrvOutput { .drvHash=outputHash, .outputName=outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
Realisation { .id=drvOutput, .outPath=*outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -368,7 +370,7 @@ static std::map<StorePath, ValidPathInfo> queryPathInfos(
|
|||
auto references = ServeProto::Serialise<StorePathSet>::read(localStore, conn);
|
||||
readLongLong(conn.from); // download size
|
||||
auto narSize = readLongLong(conn.from);
|
||||
auto narHash = Hash::parseAny(readString(conn.from), htSHA256);
|
||||
auto narHash = Hash::parseAny(readString(conn.from), HashType::SHA256);
|
||||
auto ca = ContentAddress::parseOpt(readString(conn.from));
|
||||
readStrings<StringSet>(conn.from); // sigs
|
||||
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
|
||||
|
@ -397,8 +399,7 @@ static void copyPathFromRemote(
|
|||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
auto coro = [&]() -> WireFormatGenerator {
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
|
@ -409,11 +410,11 @@ static void copyPathFromRemote(
|
|||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
co_yield extractNarDataFilter(conn.from, localStore.printStorePath(info.path), narMembers);
|
||||
};
|
||||
GeneratorSource source2{coro()};
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
destStore.addToStore(info, source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
|
@ -624,6 +625,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||
* step, since this requires a DB roundtrip. */
|
||||
if (!localWorkThrottler.try_acquire()) {
|
||||
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||
updateStep(ssWaitingForLocalSlot);
|
||||
localWorkThrottler.acquire();
|
||||
}
|
||||
|
@ -635,7 +637,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
reservation = nullptr;
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
|
@ -698,7 +700,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30));
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "strings.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
|
@ -34,11 +35,8 @@ BuildOutput getBuildOutput(
|
|||
auto outputS = store->printStorePath(output);
|
||||
if (!narMembers.count(outputS)) {
|
||||
printInfo("fetching NAR contents of '%s'...", outputS);
|
||||
auto source = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
sink << store->narFromPath(output);
|
||||
});
|
||||
extractNarData(*source, outputS, narMembers);
|
||||
GeneratorSource source{store->narFromPath(output)};
|
||||
extractNarData(source, outputS, narMembers);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <cmath>
|
||||
|
||||
#include "error.hh"
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "finally.hh"
|
||||
|
@ -35,10 +36,18 @@ void State::builder(MachineReservation::ptr reservation)
|
|||
activeSteps_.lock()->erase(activeStep);
|
||||
});
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
try {
|
||||
auto destStore = getDestStore();
|
||||
// Might release the reservation.
|
||||
res = doBuildStep(destStore, reservation, activeStep);
|
||||
res = doBuildStep(destStore, reservation, *conn, activeStep);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "db lost while building ‘%s’ on ‘%s’: %s (retriable)",
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
reservation ? reservation->machine->sshName : std::string("(no machine)"),
|
||||
e.what());
|
||||
conn.markBad();
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
|
@ -50,7 +59,7 @@ void State::builder(MachineReservation::ptr reservation)
|
|||
/* If the machine hasn't been released yet, release and wake up the dispatcher. */
|
||||
if (reservation) {
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
reservation = nullptr;
|
||||
wakeDispatcher();
|
||||
}
|
||||
|
||||
|
@ -64,7 +73,7 @@ void State::builder(MachineReservation::ptr reservation)
|
|||
step_->tries++;
|
||||
nrRetries++;
|
||||
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
|
||||
int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10);
|
||||
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10));
|
||||
printMsg(lvlInfo, "will retry ‘%s’ after %ss", localStore->printStorePath(step->drvPath), delta);
|
||||
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
|
||||
}
|
||||
|
@ -76,6 +85,7 @@ void State::builder(MachineReservation::ptr reservation)
|
|||
|
||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
MachineReservation::ptr & reservation,
|
||||
Connection & conn,
|
||||
std::shared_ptr<ActiveStep> activeStep)
|
||||
{
|
||||
auto step(reservation->step);
|
||||
|
@ -106,8 +116,6 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
buildOptions.maxLogSize = maxLogSize;
|
||||
buildOptions.enforceDeterminism = step->isDeterministic;
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
{
|
||||
std::set<Build::ptr> dependents;
|
||||
std::set<Step::ptr> steps;
|
||||
|
@ -132,7 +140,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
for (auto build2 : dependents) {
|
||||
if (build2->drvPath == step->drvPath) {
|
||||
build = build2;
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
notifyBuildStarted(txn, build->id);
|
||||
txn.commit();
|
||||
}
|
||||
|
@ -178,16 +186,16 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
time_t stepStartTime = result.startTime = time(0);
|
||||
time_t stepStartTime = result.startTime = time(nullptr);
|
||||
|
||||
/* If any of the outputs have previously failed, then don't bother
|
||||
building again. */
|
||||
if (checkCachedFailure(step, *conn))
|
||||
if (checkCachedFailure(step, conn))
|
||||
result.stepStatus = bsCachedFailure;
|
||||
else {
|
||||
|
||||
|
@ -195,13 +203,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
building. */
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
auto updateStep = [&](StepState stepState) {
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
updateBuildStep(txn, buildId, stepNr, stepState);
|
||||
txn.commit();
|
||||
};
|
||||
|
@ -230,7 +238,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
}
|
||||
}
|
||||
|
||||
time_t stepStopTime = time(0);
|
||||
time_t stepStopTime = time(nullptr);
|
||||
if (!result.stopTime) result.stopTime = stepStopTime;
|
||||
|
||||
/* For standard failures, we don't care about the error
|
||||
|
@ -244,7 +252,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
auto step_(step->state.lock());
|
||||
if (!step_->jobsets.empty()) {
|
||||
// FIXME: loss of precision.
|
||||
time_t charge = (result.stopTime - result.startTime) / step_->jobsets.size();
|
||||
time_t charge = (result.stopTime - result.startTime) / static_cast<time_t>(step_->jobsets.size());
|
||||
for (auto & jobset : step_->jobsets)
|
||||
jobset->addStep(result.startTime, charge);
|
||||
}
|
||||
|
@ -252,7 +260,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||
txn.commit();
|
||||
}
|
||||
|
@ -328,7 +336,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
{
|
||||
auto mc = startDbUpdate();
|
||||
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
|
@ -356,7 +364,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
/* Send notification about the builds that have this step as
|
||||
the top-level. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
pqxx::work txn(conn);
|
||||
for (auto id : buildIDs)
|
||||
notifyBuildFinished(txn, id, {});
|
||||
txn.commit();
|
||||
|
@ -385,7 +393,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
}
|
||||
|
||||
} else
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished);
|
||||
failStep(conn, step, buildId, result, machine, stepFinished);
|
||||
|
||||
// FIXME: keep stats about aborted steps?
|
||||
nrStepsDone++;
|
||||
|
|
|
@ -46,7 +46,7 @@ void State::dispatcher()
|
|||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.dispatcher_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||
|
||||
/* Sleep until we're woken up (either because a runnable build
|
||||
|
@ -63,7 +63,7 @@ void State::dispatcher()
|
|||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.dispatcher_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printError("dispatcher: %s", e.what());
|
||||
|
@ -190,7 +190,7 @@ system_time State::doDispatch()
|
|||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
std::ranges::sort(runnableSorted,
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
|
@ -240,11 +240,11 @@ system_time State::doDispatch()
|
|||
- Then by speed factor.
|
||||
|
||||
- Finally by load. */
|
||||
sort(machinesSorted.begin(), machinesSorted.end(),
|
||||
std::ranges::sort(machinesSorted,
|
||||
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
||||
{
|
||||
float ta = std::round(a.currentJobs / a.machine->speedFactorFloat);
|
||||
float tb = std::round(b.currentJobs / b.machine->speedFactorFloat);
|
||||
float ta = std::round(static_cast<float>(a.currentJobs) / a.machine->speedFactorFloat);
|
||||
float tb = std::round(static_cast<float>(b.currentJobs) / b.machine->speedFactorFloat);
|
||||
return
|
||||
ta != tb ? ta < tb :
|
||||
a.machine->speedFactorFloat != b.machine->speedFactorFloat ? a.machine->speedFactorFloat > b.machine->speedFactorFloat :
|
||||
|
@ -345,7 +345,7 @@ void State::abortUnsupported()
|
|||
auto machines2 = *machines.lock();
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
auto now2 = time(0);
|
||||
auto now2 = time(nullptr);
|
||||
|
||||
std::unordered_set<Step::ptr> aborted;
|
||||
|
||||
|
@ -436,7 +436,7 @@ void Jobset::addStep(time_t startTime, time_t duration)
|
|||
|
||||
void Jobset::pruneSteps()
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
auto steps_(steps.lock());
|
||||
while (!steps_->empty()) {
|
||||
auto i = steps_->begin();
|
||||
|
@ -464,7 +464,7 @@ State::MachineReservation::~MachineReservation()
|
|||
auto prev = machine->state->currentJobs--;
|
||||
assert(prev);
|
||||
if (prev == 1)
|
||||
machine->state->idleSince = time(0);
|
||||
machine->state->idleSince = time(nullptr);
|
||||
|
||||
{
|
||||
auto machineTypes_(state.machineTypes.lock());
|
||||
|
|
|
@ -14,7 +14,7 @@ struct BuildProduct
|
|||
bool isRegular = false;
|
||||
std::optional<nix::Hash> sha256hash;
|
||||
std::optional<off_t> fileSize;
|
||||
BuildProduct() { }
|
||||
BuildProduct() = default;
|
||||
};
|
||||
|
||||
struct BuildMetric
|
||||
|
|
|
@ -105,7 +105,7 @@ State::State(std::optional<std::string> metricsAddrOpt)
|
|||
: config(std::make_unique<HydraConfig>())
|
||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||
, localWorkThrottler(static_cast<ptrdiff_t>(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2))))
|
||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||
|
@ -138,7 +138,7 @@ nix::MaintainCount<counter> State::startDbUpdate()
|
|||
{
|
||||
if (nrActiveDbUpdates > 6)
|
||||
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
|
||||
return MaintainCount<counter>(nrActiveDbUpdates);
|
||||
return {nrActiveDbUpdates};
|
||||
}
|
||||
|
||||
|
||||
|
@ -171,9 +171,9 @@ void State::parseMachines(const std::string & contents)
|
|||
for (auto & f : mandatoryFeatures)
|
||||
supportedFeatures.insert(f);
|
||||
|
||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||
using MaxJobs = std::remove_const_t<decltype(nix::Machine::maxJobs)>;
|
||||
|
||||
auto machine = std::make_shared<::Machine>(nix::Machine {
|
||||
auto machine = std::make_shared<::Machine>(::Machine {{
|
||||
// `storeUri`, not yet used
|
||||
"",
|
||||
// `systemTypes`, not yet used
|
||||
|
@ -194,11 +194,11 @@ void State::parseMachines(const std::string & contents)
|
|||
tokens[7] != "" && tokens[7] != "-"
|
||||
? base64Decode(tokens[7])
|
||||
: "",
|
||||
});
|
||||
}});
|
||||
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypesSet = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->speedFactorFloat = atof(tokens[4].c_str());
|
||||
machine->speedFactorFloat = static_cast<float>(atof(tokens[4].c_str()));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
|
@ -412,7 +412,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
|||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
unsigned int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
|
@ -534,7 +534,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
|||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
|
@ -594,7 +594,7 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
|||
createDirs(dirOf(lockPath));
|
||||
|
||||
auto lock = std::make_shared<PathLocks>();
|
||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return 0;
|
||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return nullptr;
|
||||
|
||||
return lock;
|
||||
}
|
||||
|
@ -602,10 +602,10 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
|||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"time", time(nullptr)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
|
@ -613,6 +613,7 @@ void State::dumpStatus(Connection & conn)
|
|||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
|
@ -620,7 +621,7 @@ void State::dumpStatus(Connection & conn)
|
|||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / (float) nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
|
@ -629,7 +630,7 @@ void State::dumpStatus(Connection & conn)
|
|||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / (float) nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
|
@ -649,8 +650,8 @@ void State::dumpStatus(Connection & conn)
|
|||
if (nrStepsDone) {
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / (float) nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / (float) nrStepsDone;
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -677,8 +678,8 @@ void State::dumpStatus(Connection & conn)
|
|||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / (float) s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / (float) s->nrStepsDone;
|
||||
}
|
||||
statusJson["machines"][m->sshName] = machine;
|
||||
}
|
||||
|
@ -706,7 +707,7 @@ void State::dumpStatus(Connection & conn)
|
|||
};
|
||||
if (i.second.runnable > 0)
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
i.second.runnable * (time(nullptr) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
|
@ -732,11 +733,11 @@ void State::dumpStatus(Connection & conn)
|
|||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / (double) stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
? (double) stats.narWriteBytes / (double) stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
|
@ -749,20 +750,20 @@ void State::dumpStatus(Connection & conn)
|
|||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
? (double) s3Stats.putBytes / (double) s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
? (double) s3Stats.getBytes / (double) s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
(double) (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ (double) s3Stats.put / 1000.0 * 0.005 +
|
||||
+ (double) s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -848,7 +849,7 @@ void State::run(BuildID buildOne)
|
|||
/* Can't be bothered to shut down cleanly. Goodbye! */
|
||||
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
|
||||
|
||||
startedAt = time(0);
|
||||
startedAt = time(nullptr);
|
||||
this->buildOne = buildOne;
|
||||
|
||||
auto lock = acquireGlobalLock();
|
||||
|
|
|
@ -3,11 +3,41 @@
|
|||
#include "archive.hh"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
struct Extractor : NARParseVisitor
|
||||
{
|
||||
class MyFileHandle : public FileHandle
|
||||
{
|
||||
NarMemberData & memberData;
|
||||
uint64_t expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
public:
|
||||
MyFileHandle(NarMemberData & memberData, uint64_t size) : memberData(memberData), expectedSize(size)
|
||||
{
|
||||
hashSink = std::make_unique<HashSink>(HashType::SHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
*memberData.fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (memberData.contents) {
|
||||
memberData.contents->append(data);
|
||||
}
|
||||
assert(memberData.fileSize <= expectedSize);
|
||||
if (memberData.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(memberData.fileSize == len);
|
||||
memberData.sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
"/nix-support/hydra-release-name",
|
||||
|
@ -15,11 +45,10 @@ struct Extractor : ParseSink
|
|||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
Extractor(NarMemberDatas & members, Path prefix)
|
||||
: members(members), prefix(std::move(prefix))
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
|
@ -27,41 +56,15 @@ struct Extractor : ParseSink
|
|||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
std::unique_ptr<FileHandle> createRegularFile(const Path & path, uint64_t size, bool executable) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
auto memberData = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
return std::make_unique<MyFileHandle>(*memberData, size);
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const std::string & target) override
|
||||
|
@ -76,7 +79,19 @@ void extractNarData(
|
|||
const Path & prefix,
|
||||
NarMemberDatas & members)
|
||||
{
|
||||
Extractor extractor(members, prefix);
|
||||
parseDump(extractor, source);
|
||||
// Note: this point may not be reached if we're in a coroutine.
|
||||
auto parser = extractNarDataFilter(source, prefix, members);
|
||||
while (parser.next()) {
|
||||
// ignore raw data
|
||||
}
|
||||
}
|
||||
|
||||
nix::WireFormatGenerator extractNarDataFilter(
|
||||
Source & source,
|
||||
const Path & prefix,
|
||||
NarMemberDatas & members)
|
||||
{
|
||||
return [](Source & source, const Path & prefix, NarMemberDatas & members) -> WireFormatGenerator {
|
||||
Extractor extractor(members, prefix);
|
||||
co_yield parseAndCopyDump(extractor, source);
|
||||
}(source, prefix, members);
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ struct NarMemberData
|
|||
std::optional<nix::Hash> sha256;
|
||||
};
|
||||
|
||||
typedef std::map<nix::Path, NarMemberData> NarMemberDatas;
|
||||
using NarMemberDatas = std::map<nix::Path, NarMemberData>;
|
||||
|
||||
/* Read a NAR from a source and get to some info about every file
|
||||
inside the NAR. */
|
||||
|
@ -21,3 +21,8 @@ void extractNarData(
|
|||
nix::Source & source,
|
||||
const nix::Path & prefix,
|
||||
NarMemberDatas & members);
|
||||
|
||||
nix::WireFormatGenerator extractNarDataFilter(
|
||||
nix::Source & source,
|
||||
const nix::Path & prefix,
|
||||
NarMemberDatas & members);
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
#include "thread-pool.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <signal.h>
|
||||
#include <utility>
|
||||
#include <csignal>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
@ -52,7 +53,7 @@ void State::queueMonitorLoop(Connection & conn)
|
|||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.queue_monitor_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
|
@ -79,7 +80,7 @@ void State::queueMonitorLoop(Connection & conn)
|
|||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.queue_monitor_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
|
||||
}
|
||||
|
||||
exit(0);
|
||||
|
@ -88,7 +89,7 @@ void State::queueMonitorLoop(Connection & conn)
|
|||
|
||||
struct PreviousFailure : public std::exception {
|
||||
Step::ptr step;
|
||||
PreviousFailure(Step::ptr step) : step(step) { }
|
||||
PreviousFailure(Step::ptr step) : step(std::move(step)) { }
|
||||
};
|
||||
|
||||
|
||||
|
@ -117,7 +118,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
auto id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
|
@ -137,7 +138,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
|
||||
newIDs.push_back(id);
|
||||
newBuildsByID[id] = build;
|
||||
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
|
||||
newBuildsByPath.emplace(build->drvPath, id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,7 +163,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
(int) bsAborted,
|
||||
time(0));
|
||||
time(nullptr));
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
nrBuildsDone++;
|
||||
|
@ -176,7 +177,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
/* Create steps for this derivation and its dependencies. */
|
||||
try {
|
||||
step = createStep(destStore, conn, build, build->drvPath,
|
||||
build, 0, finishedDrvs, newSteps, newRunnable);
|
||||
build, nullptr, finishedDrvs, newSteps, newRunnable);
|
||||
} catch (PreviousFailure & ex) {
|
||||
|
||||
/* Some step previously failed, so mark the build as
|
||||
|
@ -221,7 +222,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
"where id = $1 and finished = 0",
|
||||
build->id,
|
||||
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
|
||||
time(0));
|
||||
time(nullptr));
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
|
@ -254,7 +255,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||
markSucceededBuild(txn, build, res, true, now, now);
|
||||
|
@ -355,7 +356,7 @@ void State::processQueueChange(Connection & conn)
|
|||
pqxx::work txn(conn);
|
||||
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
|
||||
for (auto const & row : res)
|
||||
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<BuildID>();
|
||||
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<int>();
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -438,7 +439,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
|
||||
{
|
||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
|
||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return nullptr;
|
||||
|
||||
/* Check if the requested step already exists. If not, create a
|
||||
new step. In any case, make the step reachable from
|
||||
|
@ -516,7 +517,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
paths.insert({{.drvHash=outputHash, .outputName=outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
|
@ -560,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
time_t startTime = time(nullptr);
|
||||
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
|
@ -578,7 +579,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
time_t stopTime = time(nullptr);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
|
@ -602,7 +603,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
// FIXME: check whether all outputs are in the binary cache.
|
||||
if (valid) {
|
||||
finishedDrvs.insert(drvPath);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* No, we need to build. */
|
||||
|
@ -610,7 +611,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
auto dep = createStep(destStore, conn, build, i.first, nullptr, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
step_->deps.insert(dep);
|
||||
|
@ -658,11 +659,11 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
|||
auto res2 = txn.exec_params
|
||||
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||
time(0) - Jobset::schedulingWindow * 10,
|
||||
time(nullptr) - Jobset::schedulingWindow * 10,
|
||||
jobsetID);
|
||||
for (auto const & row : res2) {
|
||||
time_t startTime = row["startTime"].as<time_t>();
|
||||
time_t stopTime = row["stopTime"].as<time_t>();
|
||||
auto startTime = row["startTime"].as<time_t>();
|
||||
auto stopTime = row["stopTime"].as<time_t>();
|
||||
jobset->addStep(startTime, stopTime - startTime);
|
||||
}
|
||||
|
||||
|
@ -702,7 +703,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
auto id = r[0][0].as<BuildID>();
|
||||
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
|
@ -727,7 +728,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashType::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
#include <utility>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
|
@ -26,16 +27,16 @@
|
|||
#include "machines.hh"
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
using BuildID = unsigned int;
|
||||
|
||||
typedef unsigned int JobsetID;
|
||||
using JobsetID = unsigned int;
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
||||
using system_time = std::chrono::time_point<std::chrono::system_clock>;
|
||||
|
||||
typedef std::atomic<unsigned long> counter;
|
||||
using counter = std::atomic<unsigned long>;
|
||||
|
||||
|
||||
typedef enum {
|
||||
enum BuildStatus {
|
||||
bsSuccess = 0,
|
||||
bsFailed = 1,
|
||||
bsDepFailed = 2, // builds only
|
||||
|
@ -49,10 +50,10 @@ typedef enum {
|
|||
bsNarSizeLimitExceeded = 11,
|
||||
bsNotDeterministic = 12,
|
||||
bsBusy = 100, // not stored
|
||||
} BuildStatus;
|
||||
};
|
||||
|
||||
|
||||
typedef enum {
|
||||
enum StepState {
|
||||
ssPreparing = 1,
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
|
@ -60,7 +61,7 @@ typedef enum {
|
|||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
};
|
||||
|
||||
|
||||
struct RemoteResult
|
||||
|
@ -78,7 +79,7 @@ struct RemoteResult
|
|||
unsigned int overhead = 0;
|
||||
nix::Path logFile;
|
||||
|
||||
BuildStatus buildStatus() const
|
||||
[[nodiscard]] BuildStatus buildStatus() const
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
@ -95,10 +96,10 @@ class Jobset
|
|||
{
|
||||
public:
|
||||
|
||||
typedef std::shared_ptr<Jobset> ptr;
|
||||
typedef std::weak_ptr<Jobset> wptr;
|
||||
using ptr = std::shared_ptr<Jobset>;
|
||||
using wptr = std::weak_ptr<Jobset>;
|
||||
|
||||
static const time_t schedulingWindow = 24 * 60 * 60;
|
||||
static const time_t schedulingWindow = static_cast<time_t>(24 * 60 * 60);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -115,7 +116,7 @@ public:
|
|||
return (double) seconds / shares;
|
||||
}
|
||||
|
||||
void setShares(int shares_)
|
||||
void setShares(unsigned int shares_)
|
||||
{
|
||||
assert(shares_ > 0);
|
||||
shares = shares_;
|
||||
|
@ -131,8 +132,8 @@ public:
|
|||
|
||||
struct Build
|
||||
{
|
||||
typedef std::shared_ptr<Build> ptr;
|
||||
typedef std::weak_ptr<Build> wptr;
|
||||
using ptr = std::shared_ptr<Build>;
|
||||
using wptr = std::weak_ptr<Build>;
|
||||
|
||||
BuildID id;
|
||||
nix::StorePath drvPath;
|
||||
|
@ -163,8 +164,8 @@ struct Build
|
|||
|
||||
struct Step
|
||||
{
|
||||
typedef std::shared_ptr<Step> ptr;
|
||||
typedef std::weak_ptr<Step> wptr;
|
||||
using ptr = std::shared_ptr<Step>;
|
||||
using wptr = std::weak_ptr<Step>;
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
|
@ -221,13 +222,8 @@ struct Step
|
|||
|
||||
nix::Sync<State> state;
|
||||
|
||||
Step(const nix::StorePath & drvPath) : drvPath(drvPath)
|
||||
Step(nix::StorePath drvPath) : drvPath(std::move(drvPath))
|
||||
{ }
|
||||
|
||||
~Step()
|
||||
{
|
||||
//printMsg(lvlError, format("destroying step %1%") % drvPath);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -239,7 +235,7 @@ void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
|||
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
using ptr = std::shared_ptr<Machine>;
|
||||
|
||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||
we are not yet used to, but once we are, we don't need this. */
|
||||
|
@ -254,7 +250,7 @@ struct Machine : nix::Machine
|
|||
float speedFactorFloat = 1.0;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
using ptr = std::shared_ptr<State>;
|
||||
counter currentJobs{0};
|
||||
counter nrStepsDone{0};
|
||||
counter totalStepTime{0}; // total time for steps, including closure copying
|
||||
|
@ -358,22 +354,22 @@ private:
|
|||
bool useSubstitutes = false;
|
||||
|
||||
/* The queued builds. */
|
||||
typedef std::map<BuildID, Build::ptr> Builds;
|
||||
using Builds = std::map<BuildID, Build::ptr>;
|
||||
nix::Sync<Builds> builds;
|
||||
|
||||
/* The jobsets. */
|
||||
typedef std::map<std::pair<std::string, std::string>, Jobset::ptr> Jobsets;
|
||||
using Jobsets = std::map<std::pair<std::string, std::string>, Jobset::ptr>;
|
||||
nix::Sync<Jobsets> jobsets;
|
||||
|
||||
/* All active or pending build steps (i.e. dependencies of the
|
||||
queued builds). Note that these are weak pointers. Steps are
|
||||
kept alive by being reachable from Builds or by being in
|
||||
progress. */
|
||||
typedef std::map<nix::StorePath, Step::wptr> Steps;
|
||||
using Steps = std::map<nix::StorePath, Step::wptr>;
|
||||
nix::Sync<Steps> steps;
|
||||
|
||||
/* Build steps that have no unbuilt dependencies. */
|
||||
typedef std::list<Step::wptr> Runnable;
|
||||
using Runnable = std::list<Step::wptr>;
|
||||
nix::Sync<Runnable> runnable;
|
||||
|
||||
/* CV for waking up the dispatcher. */
|
||||
|
@ -385,7 +381,7 @@ private:
|
|||
|
||||
/* The build machines. */
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
using Machines = std::map<std::string, Machine::ptr>;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
|
@ -401,6 +397,7 @@ private:
|
|||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsWaitingForDownloadSlot{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
counter nrUnsupportedSteps{0};
|
||||
|
@ -431,7 +428,7 @@ private:
|
|||
|
||||
struct MachineReservation
|
||||
{
|
||||
typedef std::shared_ptr<MachineReservation> ptr;
|
||||
using ptr = std::shared_ptr<MachineReservation>;
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
|
@ -534,7 +531,7 @@ private:
|
|||
void finishBuildStep(pqxx::work & txn, const RemoteResult & result, BuildID buildId, unsigned int stepNr,
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
unsigned int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
@ -594,6 +591,7 @@ private:
|
|||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||
MachineReservation::ptr & reservation,
|
||||
Connection & conn,
|
||||
std::shared_ptr<ActiveStep> activeStep);
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
|
@ -622,8 +620,6 @@ private:
|
|||
|
||||
void addRoot(const nix::StorePath & storePath);
|
||||
|
||||
void runMetricsExporter();
|
||||
|
||||
public:
|
||||
|
||||
void showStatus();
|
||||
|
|
|
@ -242,23 +242,35 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
|||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
my @jobsets = split /,/, ($c->request->query_params->{jobsets} // "");
|
||||
foreach my $s (@jobsets) {
|
||||
my @jobsetNames = split /,/, ($c->request->query_params->{jobsets} // "");
|
||||
my @jobsets;
|
||||
|
||||
foreach my $s (@jobsetNames) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
push @jobsets, $jobset if defined $jobset;
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
||||
foreach ($c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
})) {
|
||||
push @jobsets, $_;
|
||||
}
|
||||
}
|
||||
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireRestartPrivileges($c, $jobset->project);
|
||||
}
|
||||
|
||||
foreach my $jobset (@jobsets) {
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
|
@ -273,7 +285,7 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
|||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $owner = $in->{repository}->{owner}->{name} or die;
|
||||
my $owner = $in->{repository}->{owner}->{login} or die;
|
||||
my $repo = $in->{repository}->{name} or die;
|
||||
print STDERR "got push from GitHub repository $owner/$repo\n";
|
||||
|
||||
|
@ -285,6 +297,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
|||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
|
|
@ -240,7 +240,7 @@ sub serveFile {
|
|||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type.
|
||||
|
|
|
@ -35,6 +35,7 @@ sub noLoginNeeded {
|
|||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
|
@ -80,7 +81,7 @@ sub begin :Private {
|
|||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
|
|
|
@ -36,6 +36,7 @@ our @EXPORT = qw(
|
|||
jobsetOverview
|
||||
jobsetOverview_
|
||||
pathIsInsidePrefix
|
||||
readIntoSocket
|
||||
readNixFile
|
||||
registerRoot
|
||||
restartBuilds
|
||||
|
@ -406,6 +407,16 @@ sub pathIsInsidePrefix {
|
|||
return $cur;
|
||||
}
|
||||
|
||||
sub readIntoSocket{
|
||||
my (%args) = @_;
|
||||
my $sock;
|
||||
|
||||
eval {
|
||||
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||
};
|
||||
|
||||
return $sock;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
-- add a map of the lowercase name of your table to the CamelCase
|
||||
-- version of your table.
|
||||
--
|
||||
-- 3. Run `make -C src/sql update-dbix` in the root
|
||||
-- 3. Run `just update-dbix` in the root
|
||||
-- of the project directory, and git add / git commit the changed,
|
||||
-- generated files.
|
||||
--
|
||||
|
|
|
@ -35,6 +35,17 @@ my $queuedBuilds = $ctx->makeAndEvaluateJobset(
|
|||
build => 0
|
||||
);
|
||||
|
||||
# Login and save cookie for future requests
|
||||
my $req = request(POST '/login',
|
||||
Referer => 'http://localhost/',
|
||||
Content => {
|
||||
username => 'root',
|
||||
password => 'rootPassword'
|
||||
}
|
||||
);
|
||||
is($req->code, 302, "Logging in gets a 302");
|
||||
my $cookie = $req->header("set-cookie");
|
||||
|
||||
subtest "/api/queue" => sub {
|
||||
my $response = request(GET '/api/queue?nr=1');
|
||||
ok($response->is_success, "The API enpdoint showing the queue returns 200.");
|
||||
|
@ -102,7 +113,7 @@ subtest "/api/nrbuilds" => sub {
|
|||
};
|
||||
|
||||
subtest "/api/push" => sub {
|
||||
subtest "with a specific jobset" => sub {
|
||||
subtest "without authentication" => sub {
|
||||
my $build = $finishedBuilds->{"one_job"};
|
||||
my $jobset = $build->jobset;
|
||||
my $projectName = $jobset->project->name;
|
||||
|
@ -110,6 +121,18 @@ subtest "/api/push" => sub {
|
|||
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
|
||||
|
||||
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1");
|
||||
is($response->code, 403, "The API enpdoint for triggering jobsets requires authentication.");
|
||||
};
|
||||
|
||||
subtest "with a specific jobset" => sub {
|
||||
my $build = $finishedBuilds->{"one_job"};
|
||||
my $jobset = $build->jobset;
|
||||
my $projectName = $jobset->project->name;
|
||||
my $jobsetName = $jobset->name;
|
||||
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
|
||||
|
||||
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1",
|
||||
Cookie => $cookie);
|
||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||
|
||||
my $data = is_json($response);
|
||||
|
@ -128,7 +151,8 @@ subtest "/api/push" => sub {
|
|||
|
||||
print STDERR $repo;
|
||||
|
||||
my $response = request(GET "/api/push?repos=$repo&force=1");
|
||||
my $response = request(GET "/api/push?repos=$repo&force=1",
|
||||
Cookie => $cookie);
|
||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||
|
||||
my $data = is_json($response);
|
||||
|
@ -172,7 +196,7 @@ subtest "/api/push-github" => sub {
|
|||
"Content" => encode_json({
|
||||
repository => {
|
||||
owner => {
|
||||
name => "OWNER",
|
||||
login => "OWNER",
|
||||
},
|
||||
name => "LEGACY-REPO",
|
||||
}
|
||||
|
@ -198,7 +222,7 @@ subtest "/api/push-github" => sub {
|
|||
"Content" => encode_json({
|
||||
repository => {
|
||||
owner => {
|
||||
name => "OWNER",
|
||||
login => "OWNER",
|
||||
},
|
||||
name => "FLAKE-REPO",
|
||||
}
|
||||
|
|
|
@ -11,20 +11,14 @@ my $ctx = test_context();
|
|||
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
my $user = $ctx->db()->resultset('Users')->create({
|
||||
username => 'alice',
|
||||
emailaddress => 'root@invalid.org',
|
||||
password => '!'
|
||||
});
|
||||
$user->setPassword('foobar');
|
||||
$user->userroles->update_or_create({ role => 'admin' });
|
||||
$ctx->db(); # Ensure DB initialization.
|
||||
|
||||
# Login and save cookie for future requests
|
||||
my $req = request(POST '/login',
|
||||
Referer => 'http://localhost/',
|
||||
Content => {
|
||||
username => 'alice',
|
||||
password => 'foobar'
|
||||
username => 'root',
|
||||
password => 'rootPassword'
|
||||
}
|
||||
);
|
||||
is($req->code, 302, "Logging in gets a 302");
|
||||
|
|
|
@ -101,7 +101,7 @@ sub new {
|
|||
$opts{'before_init'}->($self);
|
||||
}
|
||||
|
||||
expectOkay(5, ("hydra-init"));
|
||||
expectOkay(30, ("hydra-init"));
|
||||
|
||||
return $self;
|
||||
}
|
||||
|
@ -115,11 +115,13 @@ sub db {
|
|||
$self->{_db} = Hydra::Model::DB->new();
|
||||
|
||||
if (!(defined $setup && $setup == 0)) {
|
||||
$self->{_db}->resultset('Users')->create({
|
||||
my $user = $self->{_db}->resultset('Users')->create({
|
||||
username => "root",
|
||||
emailaddress => 'root@invalid.org',
|
||||
password => ''
|
||||
password => '!'
|
||||
});
|
||||
$user->setPassword('rootPassword');
|
||||
$user->userroles->update_or_create({ role => 'admin' });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ sub add_user {
|
|||
my $email = $opts{'email'} // "$name\@example";
|
||||
my $password = $opts{'password'} // rand_chars();
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(1, ("slappasswd", "-s", $password));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("slappasswd", "-s", $password));
|
||||
if ($res) {
|
||||
die "Failed to execute slappasswd ($res): $stderr, $stdout";
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ sub start {
|
|||
sub validateConfig {
|
||||
my ($self) = @_;
|
||||
|
||||
expectOkay(1, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
||||
expectOkay(5, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
||||
}
|
||||
|
||||
sub _spawn {
|
||||
|
@ -218,7 +218,7 @@ sub load_ldif {
|
|||
|
||||
my $path = "${\$self->{'_tmpdir'}}/load.ldif";
|
||||
write_file($path, $content);
|
||||
expectOkay(1, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
||||
expectOkay(5, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
||||
$self->validateConfig();
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ subtest "Building, caching, and then garbage collecting the underlying job" => s
|
|||
|
||||
ok(unlink(Hydra::Helper::Nix::gcRootFor($path)), "Unlinking the GC root for underlying Dependency succeeds");
|
||||
|
||||
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(5, "nix-store", "--delete", $path);
|
||||
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(15, "nix-store", "--delete", $path);
|
||||
is($ret, 0, "Deleting the underlying dependency should succeed");
|
||||
};
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ my $db = $ctx->db();
|
|||
|
||||
subtest "Handling password and password hash creation" => sub {
|
||||
subtest "Creating a user with a plain text password (insecure) stores the password securely" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "plain-text-user", "--password", "foobar"));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "plain-text-user", "--password", "foobar"));
|
||||
is($res, 0, "hydra-create-user should exit zero");
|
||||
like($stderr, qr/Submitting plaintext passwords as arguments is deprecated and will be removed/, "Submitting a plain text password is deprecated.");
|
||||
|
||||
|
@ -23,7 +23,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
};
|
||||
|
||||
subtest "Creating a user with a sha1 password (still insecure) stores the password as a hashed sha1" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "old-password-hash-user", "--password-hash", "8843d7f92416211de9ebb963ff4ce28125932878"));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "old-password-hash-user", "--password-hash", "8843d7f92416211de9ebb963ff4ce28125932878"));
|
||||
is($res, 0, "hydra-create-user should exit zero");
|
||||
|
||||
my $user = $db->resultset('Users')->find({ username => "old-password-hash-user" });
|
||||
|
@ -36,7 +36,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
};
|
||||
|
||||
subtest "Creating a user with an argon2 password stores the password as given" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "argon2-hash-user", "--password-hash", '$argon2id$v=19$m=262144,t=3,p=1$tMnV5paYjmIrUIb6hylaNA$M8/e0i3NGrjhOliVLa5LqQ'));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "argon2-hash-user", "--password-hash", '$argon2id$v=19$m=262144,t=3,p=1$tMnV5paYjmIrUIb6hylaNA$M8/e0i3NGrjhOliVLa5LqQ'));
|
||||
is($res, 0, "hydra-create-user should exit zero");
|
||||
|
||||
my $user = $db->resultset('Users')->find({ username => "argon2-hash-user" });
|
||||
|
@ -50,7 +50,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
|
||||
subtest "Creating a user by prompting for the password" => sub {
|
||||
subtest "with the same password twice" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(5, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nmy-password\n");
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(15, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nmy-password\n");
|
||||
is($res, 0, "hydra-create-user should exit zero");
|
||||
|
||||
my $user = $db->resultset('Users')->find({ username => "prompted-pass-user" });
|
||||
|
@ -62,7 +62,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
};
|
||||
|
||||
subtest "With mismatched password confirmation" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(5, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nnot-my-password\n");
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(15, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nnot-my-password\n");
|
||||
isnt($res, 0, "hydra-create-user should exit non-zero");
|
||||
};
|
||||
};
|
||||
|
@ -76,7 +76,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
);
|
||||
|
||||
for my $case (@cases) {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, (
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(15, (
|
||||
"hydra-create-user", "bogus-password-options", @{$case}));
|
||||
like($stderr, qr/please specify only one of --password-prompt or --password-hash/, "We get an error about specifying the password");
|
||||
isnt($res, 0, "hydra-create-user should exit non-zero with conflicting " . join(" ", @{$case}));
|
||||
|
@ -84,7 +84,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||
};
|
||||
|
||||
subtest "A password is not required for creating a Google-based account" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, (
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(15, (
|
||||
"hydra-create-user", "google-account", "--type", "google"));
|
||||
is($res, 0, "hydra-create-user should exit zero");
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@ subtest "hydra-init upgrades user's password hashes from sha1 to sha1 inside Arg
|
|||
$janet->setPassword("foobar");
|
||||
|
||||
is($alice->password, "8843d7f92416211de9ebb963ff4ce28125932878", "Alices's sha1 is stored in the database");
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-init"));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(30, ("hydra-init"));
|
||||
if ($res != 0) {
|
||||
is($stdout, "");
|
||||
is($stderr, "");
|
||||
|
@ -55,7 +55,7 @@ subtest "hydra-init upgrades user's password hashes from sha1 to sha1 inside Arg
|
|||
};
|
||||
|
||||
subtest "Running hydra-init don't break Alice or Janet's passwords" => sub {
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-init"));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(30, ("hydra-init"));
|
||||
is($res, 0, "hydra-init should exit zero");
|
||||
|
||||
my $updatedAlice = $db->resultset('Users')->find({ username => "alice" });
|
||||
|
|
|
@ -21,7 +21,7 @@ if (defined($ENV{"NIX_BUILD_CORES"})
|
|||
print STDERR "test.pl: Defaulting \$YATH_JOB_COUNT to \$NIX_BUILD_CORES (${\$ENV{'NIX_BUILD_CORES'}})\n";
|
||||
}
|
||||
|
||||
system($^X, find_yath(), '-D', 'test', '--default-search' => './', @ARGV);
|
||||
system($^X, find_yath(), '-D', 'test', '--qvf', '--event-timeout', 240, '--default-search' => './', @ARGV);
|
||||
my $exit = $?;
|
||||
|
||||
# This makes sure it works with prove.
|
||||
|
|
Loading…
Reference in a new issue