forked from lix-project/hydra
Compare commits
47 commits
Author | SHA1 | Date | |
---|---|---|---|
Maximilian Bosch | eccf01d4fe | ||
Maximilian Bosch | 42c4a85ec8 | ||
Maximilian Bosch | 2b1c46730c | ||
Maximilian Bosch | 51608e1ca1 | ||
leo60228 | 6285440304 | ||
jade | 988554eb7a | ||
jade | 4acb1959a1 | ||
jade | 453adb7f25 | ||
Maximilian Bosch | acd54bfbd6 | ||
leo60228 | a4b2b58e2b | ||
Maximilian Bosch | ee1234c15c | ||
Maximilian Bosch | 7c7078cccf | ||
Maximilian Bosch | a5099d9e80 | ||
799441dcf6 | |||
e4d466ffcd | |||
raito | d3257e4761 | ||
Ilya K | f23ec71227 | ||
leo60228 | ac37e44982 | ||
Maximilian Bosch | 6a88e647e7 | ||
Pierre Bourdon | 8d5d4942e1 | ||
Pierre Bourdon | e5a8ee5c17 | ||
Pierre Bourdon | fd7fd0ad65 | ||
Pierre Bourdon | d3fcedbcf5 | ||
Pierre Bourdon | 3891ad77e3 | ||
Pierre Bourdon | 21fd1f8993 | ||
emily | ab6d81fad4 | ||
64df0cba47 | |||
6179b298cb | |||
Pierre Bourdon | 44b9a7b95d | ||
Maximilian Bosch | 3ee51dbe58 | ||
Maximilian Bosch | e987f74954 | ||
Maximilian Bosch | 1f802c008c | ||
Maximilian Bosch | 3a4e0d4917 | ||
Maximilian Bosch | 3517acc5ba | ||
71rd | 459aa0a598 | ||
eldritch horrors | f1b552ecbf | ||
Pierre Bourdon | db8c2cc4a8 | ||
8858abb1a6 | |||
ef619eca99 | |||
41dfa0e443 | |||
Pierre Bourdon | 4b107e6ff3 | ||
Pierre Bourdon | 4b886d9c45 | ||
Pierre Bourdon | fbb894af4e | ||
8a984efaef | |||
Pierre Bourdon | abc9f11417 | ||
Pierre Bourdon | 9a4a5dd624 | ||
Janik Haag | ac406a9175 |
12
.clang-tidy
Normal file
12
.clang-tidy
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
UseColor: true
|
||||||
|
Checks:
|
||||||
|
- -*
|
||||||
|
|
||||||
|
- bugprone-*
|
||||||
|
# kind of nonsense
|
||||||
|
- -bugprone-easily-swappable-parameters
|
||||||
|
# many warnings due to not recognizing `assert` properly
|
||||||
|
- -bugprone-unchecked-optional-access
|
||||||
|
|
||||||
|
- modernize-*
|
||||||
|
- -modernize-use-trailing-return-type
|
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -1,37 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: bug
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**To Reproduce**
|
|
||||||
Steps to reproduce the behavior:
|
|
||||||
1. Go to '...'
|
|
||||||
2. Click on '....'
|
|
||||||
3. Scroll down to '....'
|
|
||||||
4. See error
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
||||||
|
|
||||||
**Screenshots**
|
|
||||||
If applicable, add screenshots to help explain your problem.
|
|
||||||
|
|
||||||
**Hydra Server:**
|
|
||||||
|
|
||||||
Please fill out this data as well as you can, but don't worry if you can't -- just do your best.
|
|
||||||
|
|
||||||
- OS and version: [e.g. NixOS 22.05.20211203.ee3794c]
|
|
||||||
- Version of Hydra
|
|
||||||
- Version of Nix Hydra is built against
|
|
||||||
- Version of the Nix daemon
|
|
||||||
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context about the problem here.
|
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
|
||||||
A clear and concise description of what you want to happen.
|
|
||||||
|
|
||||||
**Describe alternatives you've considered**
|
|
||||||
A clear and concise description of any alternative solutions or features you've considered.
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context or screenshots about the feature request here.
|
|
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
|
@ -1,6 +0,0 @@
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
14
.github/workflows/test.yml
vendored
14
.github/workflows/test.yml
vendored
|
@ -1,14 +0,0 @@
|
||||||
name: "Test"
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
jobs:
|
|
||||||
tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: cachix/install-nix-action@v17
|
|
||||||
#- run: nix flake check
|
|
||||||
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi
|
|
42
.gitignore
vendored
42
.gitignore
vendored
|
@ -1,47 +1,9 @@
|
||||||
/.pls_cache
|
|
||||||
*.o
|
|
||||||
*~
|
*~
|
||||||
Makefile
|
.test_info.*
|
||||||
Makefile.in
|
|
||||||
.deps
|
|
||||||
.hydra-data
|
|
||||||
/config.guess
|
|
||||||
/config.log
|
|
||||||
/config.status
|
|
||||||
/config.sub
|
|
||||||
/configure
|
|
||||||
/depcomp
|
|
||||||
/libtool
|
|
||||||
/ltmain.sh
|
|
||||||
/autom4te.cache
|
|
||||||
/aclocal.m4
|
|
||||||
/missing
|
|
||||||
/install-sh
|
|
||||||
/src/sql/hydra-postgresql.sql
|
/src/sql/hydra-postgresql.sql
|
||||||
/src/sql/hydra-sqlite.sql
|
/src/sql/hydra-sqlite.sql
|
||||||
/src/sql/tmp.sqlite
|
/src/sql/tmp.sqlite
|
||||||
/src/root/static/bootstrap
|
|
||||||
/src/root/static/js/flot
|
|
||||||
/tests
|
|
||||||
/doc/manual/images
|
|
||||||
/doc/manual/manual.html
|
|
||||||
/doc/manual/manual.pdf
|
|
||||||
/t/.bzr*
|
|
||||||
/t/.git*
|
|
||||||
/t/.hg*
|
|
||||||
/t/nix
|
|
||||||
/t/data
|
|
||||||
/t/jobs/config.nix
|
|
||||||
t/jobs/declarative/project.json
|
|
||||||
/inst
|
|
||||||
hydra-config.h
|
|
||||||
hydra-config.h.in
|
|
||||||
result
|
result
|
||||||
result-*
|
result-*
|
||||||
|
.hydra-data
|
||||||
outputs
|
outputs
|
||||||
config
|
|
||||||
stamp-h1
|
|
||||||
src/hydra-evaluator/hydra-evaluator
|
|
||||||
src/hydra-queue-runner/hydra-queue-runner
|
|
||||||
src/root/static/fontawesome/
|
|
||||||
src/root/static/bootstrap*/
|
|
||||||
|
|
12
Makefile.am
12
Makefile.am
|
@ -1,12 +0,0 @@
|
||||||
SUBDIRS = src doc
|
|
||||||
if CAN_DO_CHECK
|
|
||||||
SUBDIRS += t
|
|
||||||
endif
|
|
||||||
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
||||||
EXTRA_DIST = nixos-modules/hydra.nix
|
|
||||||
|
|
||||||
install-data-local: nixos-modules/hydra.nix
|
|
||||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
|
||||||
$(INSTALL_DATA) nixos-modules/hydra.nix $(DESTDIR)$(datadir)/nix/hydra-module.nix
|
|
38
README.md
38
README.md
|
@ -4,6 +4,17 @@
|
||||||
|
|
||||||
Hydra is a [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) service for [Nix](https://nixos.org/nix) based projects.
|
Hydra is a [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) service for [Nix](https://nixos.org/nix) based projects.
|
||||||
|
|
||||||
|
## Branches
|
||||||
|
|
||||||
|
| Branch | Lix release | Maintained |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| `main` | [`main` branch of Lix](https://git.lix.systems/lix-project/lix/commits/branch/main) | ✅ |
|
||||||
|
| `lix-2.91` | [2.91](https://lix.systems/blog/2024-08-12-lix-2.91-release/) | ✅ |
|
||||||
|
|
||||||
|
Active development happens on `main` only.
|
||||||
|
Branches that track a Lix release are maintained as long as the Lix version is
|
||||||
|
maintained. These only receive critical bugfixes.
|
||||||
|
|
||||||
## Installation And Setup
|
## Installation And Setup
|
||||||
|
|
||||||
**Note**: The instructions provided below are intended to enable new users to get a simple, local installation up and running. They are by no means sufficient for running a production server, let alone a public instance.
|
**Note**: The instructions provided below are intended to enable new users to get a simple, local installation up and running. They are by no means sufficient for running a production server, let alone a public instance.
|
||||||
|
@ -78,11 +89,11 @@ $ nix-build
|
||||||
### Development Environment
|
### Development Environment
|
||||||
|
|
||||||
You can use the provided shell.nix to get a working development environment:
|
You can use the provided shell.nix to get a working development environment:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ autoreconfPhase
|
[nix-shell]$ just setup
|
||||||
$ configurePhase # NOTE: not ./configure
|
[nix-shell]$ just install
|
||||||
$ make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Executing Hydra During Development
|
### Executing Hydra During Development
|
||||||
|
@ -91,10 +102,9 @@ When working on new features or bug fixes you need to be able to run Hydra from
|
||||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ # hack hack
|
[nix-shell]$ just install
|
||||||
$ make
|
[nix-shell]$ foreman start
|
||||||
$ foreman start
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Have a look at the [Procfile](./Procfile) if you want to see how the processes are being started. In order to avoid
|
Have a look at the [Procfile](./Procfile) if you want to see how the processes are being started. In order to avoid
|
||||||
|
@ -115,22 +125,22 @@ Start by following the steps in [Development Environment](#development-environme
|
||||||
Then, you can run the tests and the perlcritic linter together with:
|
Then, you can run the tests and the perlcritic linter together with:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make check
|
[nix-shell]$ just test
|
||||||
```
|
```
|
||||||
|
|
||||||
You can run a single test with:
|
You can run a single test with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ yath test ./t/foo/bar.t
|
[nix-shell]$ yath test ./t/foo/bar.t
|
||||||
```
|
```
|
||||||
|
|
||||||
And you can run just perlcritic with:
|
And you can run just perlcritic with:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
$ make perlcritic
|
[nix-shell]$ just perlcritic
|
||||||
```
|
```
|
||||||
|
|
||||||
### JSON API
|
### JSON API
|
||||||
|
|
90
configure.ac
90
configure.ac
|
@ -1,90 +0,0 @@
|
||||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
|
|
||||||
AC_CONFIG_AUX_DIR(config)
|
|
||||||
AM_INIT_AUTOMAKE([foreign serial-tests])
|
|
||||||
|
|
||||||
AC_LANG([C++])
|
|
||||||
|
|
||||||
AC_PROG_CC
|
|
||||||
AC_PROG_INSTALL
|
|
||||||
AC_PROG_LN_S
|
|
||||||
AC_PROG_LIBTOOL
|
|
||||||
AC_PROG_CXX
|
|
||||||
|
|
||||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
|
||||||
|
|
||||||
AC_ARG_WITH([docbook-xsl],
|
|
||||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
|
||||||
[path of the DocBook XSL stylesheets])],
|
|
||||||
[docbookxsl="$withval"],
|
|
||||||
[docbookxsl="/docbook-xsl-missing"])
|
|
||||||
AC_SUBST([docbookxsl])
|
|
||||||
|
|
||||||
|
|
||||||
AC_DEFUN([NEED_PROG],
|
|
||||||
[
|
|
||||||
AC_PATH_PROG($1, $2)
|
|
||||||
if test -z "$$1"; then
|
|
||||||
AC_MSG_ERROR([$2 is required])
|
|
||||||
fi
|
|
||||||
])
|
|
||||||
|
|
||||||
NEED_PROG(perl, perl)
|
|
||||||
|
|
||||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
|
||||||
|
|
||||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
|
||||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
|
||||||
then
|
|
||||||
# This may be executed from within a build chroot, so pacify
|
|
||||||
# `nix-store' instead of letting it choke while trying to mkdir
|
|
||||||
# /nix/var.
|
|
||||||
NIX_STATE_DIR="$TMPDIR"
|
|
||||||
export NIX_STATE_DIR
|
|
||||||
fi
|
|
||||||
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
|
|
||||||
AC_MSG_RESULT([yes])
|
|
||||||
else
|
|
||||||
AC_MSG_RESULT([no])
|
|
||||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
|
||||||
fi
|
|
||||||
|
|
||||||
PKG_CHECK_MODULES([NIX], [lix-main lix-expr lix-store])
|
|
||||||
|
|
||||||
testPath="$(dirname $(type -p expr))"
|
|
||||||
AC_SUBST(testPath)
|
|
||||||
|
|
||||||
CXXFLAGS+=" -include lix/config.h -std=gnu++20"
|
|
||||||
|
|
||||||
AC_CONFIG_FILES([
|
|
||||||
Makefile
|
|
||||||
doc/Makefile
|
|
||||||
doc/manual/Makefile
|
|
||||||
src/Makefile
|
|
||||||
src/hydra-evaluator/Makefile
|
|
||||||
src/hydra-queue-runner/Makefile
|
|
||||||
src/sql/Makefile
|
|
||||||
src/ttf/Makefile
|
|
||||||
src/lib/Makefile
|
|
||||||
src/root/Makefile
|
|
||||||
src/script/Makefile
|
|
||||||
])
|
|
||||||
|
|
||||||
# Tests might be filtered out
|
|
||||||
AM_CONDITIONAL([CAN_DO_CHECK], [test -f "$srcdir/t/api-test.t"])
|
|
||||||
AM_COND_IF(
|
|
||||||
[CAN_DO_CHECK],
|
|
||||||
[
|
|
||||||
jobsPath="$(realpath ./t/jobs)"
|
|
||||||
AC_SUBST(jobsPath)
|
|
||||||
AC_CONFIG_FILES([
|
|
||||||
t/Makefile
|
|
||||||
t/jobs/config.nix
|
|
||||||
t/jobs/declarative/project.json
|
|
||||||
])
|
|
||||||
])
|
|
||||||
|
|
||||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
|
||||||
|
|
||||||
AC_CONFIG_HEADER([hydra-config.h])
|
|
||||||
|
|
||||||
AC_OUTPUT
|
|
|
@ -1,4 +0,0 @@
|
||||||
SUBDIRS = manual
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
||||||
|
|
|
@ -1,122 +0,0 @@
|
||||||
* Recreating the schema bindings:
|
|
||||||
|
|
||||||
$ make -C src/sql update-dbix
|
|
||||||
|
|
||||||
* Running the test server:
|
|
||||||
|
|
||||||
$ DBIC_TRACE=1 ./script/hydra_server.pl
|
|
||||||
|
|
||||||
* Setting the maximum number of concurrent builds per system type:
|
|
||||||
|
|
||||||
$ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
|
||||||
|
|
||||||
* Creating a user:
|
|
||||||
|
|
||||||
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
|
|
||||||
--password-prompt
|
|
||||||
|
|
||||||
(Replace "foobar" with the desired password.)
|
|
||||||
|
|
||||||
To make the user an admin:
|
|
||||||
|
|
||||||
$ hydra-create-user root --role admin
|
|
||||||
|
|
||||||
To enable a non-admin user to create projects:
|
|
||||||
|
|
||||||
$ hydra-create-user root --role create-projects
|
|
||||||
|
|
||||||
* Changing the priority of a scheduled build:
|
|
||||||
|
|
||||||
update buildschedulinginfo set priority = 200 where id = <ID>;
|
|
||||||
|
|
||||||
* Changing the priority of all builds for a jobset:
|
|
||||||
|
|
||||||
update buildschedulinginfo set priority = 20 where id in (select id from builds where finished = 0 and project = 'nixpkgs' and jobset = 'trunk');
|
|
||||||
|
|
||||||
|
|
||||||
* Steps to install:
|
|
||||||
|
|
||||||
- Install the Hydra closure.
|
|
||||||
|
|
||||||
- Set HYDRA_DATA to /somewhere.
|
|
||||||
|
|
||||||
- Run hydra_init.pl
|
|
||||||
|
|
||||||
- Start hydra_server
|
|
||||||
|
|
||||||
- Visit http://localhost:3000/
|
|
||||||
|
|
||||||
- Create a user (see above)
|
|
||||||
|
|
||||||
- Create a project, jobset etc.
|
|
||||||
|
|
||||||
- Start hydra_evaluator and hydra_queue_runner
|
|
||||||
|
|
||||||
|
|
||||||
* Job selection:
|
|
||||||
|
|
||||||
php-sat:build [system = "i686-linux"]
|
|
||||||
php-sat:build [same system]
|
|
||||||
tarball [same patchelfSrc]
|
|
||||||
--if system i686-linux --arg build {...}
|
|
||||||
|
|
||||||
|
|
||||||
* Restart all aborted builds in a given evaluation (e.g. 820909):
|
|
||||||
|
|
||||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus = 3 and exists (select 1 from jobsetevalmembers where eval = 820909 and build = id));
|
|
||||||
|
|
||||||
|
|
||||||
* Restart all builds in a given evaluation that had a build step time out:
|
|
||||||
|
|
||||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus != 0 and exists (select 1 from jobsetevalmembers where eval = 926992 and build = id) and exists (select 1 from buildsteps where build = id and status = 7));
|
|
||||||
|
|
||||||
|
|
||||||
* select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) x join builds y on x.timestamp = y.timestamp and x.project = y.project and x.jobset = y.jobset and x.job = y.job and x.system = y.system;
|
|
||||||
|
|
||||||
select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) natural join builds;
|
|
||||||
|
|
||||||
|
|
||||||
* Delete all scheduled builds that are not already building:
|
|
||||||
|
|
||||||
delete from builds where finished = 0 and not exists (select 1 from buildschedulinginfo s where s.id = builds.id and busy != 0);
|
|
||||||
|
|
||||||
|
|
||||||
* select x.project, x.jobset, x.job, x.system, x.id, x.timestamp, r.buildstatus, b.id, b.timestamp
|
|
||||||
from (select project, jobset, job, system, max(id) as id from Builds where finished = 1 group by project, jobset, job, system) as a_
|
|
||||||
natural join Builds x
|
|
||||||
natural join BuildResultInfo r
|
|
||||||
left join Builds b on b.id =
|
|
||||||
(select max(id) from builds c
|
|
||||||
natural join buildresultinfo r2
|
|
||||||
where x.project = c.project and x.jobset = c.jobset and x.job = c.job and x.system = c.system
|
|
||||||
and x.id > c.id and r.buildstatus != r2.buildstatus);
|
|
||||||
|
|
||||||
* Using PostgreSQL (version 9.2 or newer is required):
|
|
||||||
|
|
||||||
$ HYDRA_DBI="dbi:Pg:dbname=hydra;" hydra-server
|
|
||||||
|
|
||||||
|
|
||||||
* Find the builds with the highest number of build steps:
|
|
||||||
|
|
||||||
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;
|
|
||||||
|
|
||||||
|
|
||||||
* Evaluating the NixOS Hydra jobs:
|
|
||||||
|
|
||||||
$ ./hydra_eval_jobs ~/Dev/nixos-wc/release.nix --arg nixpkgs '{outPath = /home/eelco/Dev/nixpkgs-wc;}' --arg nixosSrc '{outPath = /home/eelco/Dev/nixos-wc; rev = 1234;}' --arg services '{outhPath = /home/eelco/services-wc;}' --argstr system i686-linux --argstr system x86_64-linux --arg officialRelease false
|
|
||||||
|
|
||||||
|
|
||||||
* Show all the failing jobs/systems in the nixpkgs:stdenv jobset that
|
|
||||||
succeed in the nixpkgs:trunk jobset:
|
|
||||||
|
|
||||||
select job, system from builds b natural join buildresultinfo where project = 'nixpkgs' and jobset = 'stdenv' and iscurrent = 1 and finished = 1 and buildstatus != 0 and exists (select 1 from builds natural join buildresultinfo where project = 'nixpkgs' and jobset = 'trunk' and job = b.job and system = b.system and iscurrent = 1 and finished = 1 and buildstatus = 0) order by job, system;
|
|
||||||
|
|
||||||
|
|
||||||
* Get all Nixpkgs jobs that have never built succesfully:
|
|
||||||
|
|
||||||
select project, jobset, job from builds b1
|
|
||||||
where project = 'nixpkgs' and jobset = 'trunk' and iscurrent = 1
|
|
||||||
group by project, jobset, job
|
|
||||||
having not exists
|
|
||||||
(select 1 from builds b2 where b1.project = b2.project and b1.jobset = b2.jobset and b1.job = b2.job and finished = 1 and buildstatus = 0)
|
|
||||||
order by project, jobset, job;
|
|
|
@ -1,6 +0,0 @@
|
||||||
MD_FILES = src/*.md
|
|
||||||
|
|
||||||
EXTRA_DIST = $(MD_FILES)
|
|
||||||
|
|
||||||
install: $(MD_FILES)
|
|
||||||
mdbook build . -d $(docdir)
|
|
33
doc/manual/meson.build
Normal file
33
doc/manual/meson.build
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
srcs = files(
|
||||||
|
'src/SUMMARY.md',
|
||||||
|
'src/about.md',
|
||||||
|
'src/api.md',
|
||||||
|
'src/configuration.md',
|
||||||
|
'src/hacking.md',
|
||||||
|
'src/installation.md',
|
||||||
|
'src/introduction.md',
|
||||||
|
'src/jobs.md',
|
||||||
|
'src/monitoring/README.md',
|
||||||
|
'src/notifications.md',
|
||||||
|
'src/plugins/README.md',
|
||||||
|
'src/plugins/RunCommand.md',
|
||||||
|
'src/plugins/declarative-projects.md',
|
||||||
|
'src/projects.md',
|
||||||
|
'src/webhooks.md',
|
||||||
|
)
|
||||||
|
|
||||||
|
manual = custom_target(
|
||||||
|
'manual',
|
||||||
|
command: [
|
||||||
|
mdbook, 'build', '@SOURCE_ROOT@/doc/manual', '-d', meson.current_build_dir() / 'html'
|
||||||
|
],
|
||||||
|
depend_files: srcs,
|
||||||
|
output: ['html'],
|
||||||
|
build_by_default: true,
|
||||||
|
)
|
||||||
|
|
||||||
|
install_subdir(
|
||||||
|
manual.full_path(),
|
||||||
|
install_dir: get_option('datadir') / 'doc/hydra',
|
||||||
|
strip_directory: true,
|
||||||
|
)
|
|
@ -12,15 +12,14 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||||
and dependencies can be found:
|
and dependencies can be found:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-shell
|
$ nix develop
|
||||||
```
|
```
|
||||||
|
|
||||||
To build Hydra, you should then do:
|
To build Hydra, you should then do:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ autoreconfPhase
|
[nix-shell]$ just setup
|
||||||
[nix-shell]$ configurePhase
|
[nix-shell]$ just install
|
||||||
[nix-shell]$ make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You start a local database, the webserver, and other components with
|
You start a local database, the webserver, and other components with
|
||||||
|
@ -30,6 +29,8 @@ foreman:
|
||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||||
|
|
||||||
You can run just the Hydra web server in your source tree as follows:
|
You can run just the Hydra web server in your source tree as follows:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
|
@ -39,18 +40,13 @@ $ ./src/script/hydra-server
|
||||||
You can run Hydra's test suite with the following:
|
You can run Hydra's test suite with the following:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ make check
|
[nix-shell]$ just test
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
|
||||||
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
|
|
||||||
[nix-shell]$ # or run yath directly:
|
[nix-shell]$ # or run yath directly:
|
||||||
[nix-shell]$ yath test
|
[nix-shell]$ yath test
|
||||||
[nix-shell]$ # to run as many tests as you have cores:
|
[nix-shell]$ # to run as many tests as you have cores:
|
||||||
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
[nix-shell]$ yath test -j $NIX_BUILD_CORES
|
||||||
```
|
```
|
||||||
|
|
||||||
When using `yath` instead of `make check`, ensure you have run `make`
|
|
||||||
in the root of the repository at least once.
|
|
||||||
|
|
||||||
**Warning**: Currently, the tests can fail
|
**Warning**: Currently, the tests can fail
|
||||||
if run with high parallelism [due to an issue in
|
if run with high parallelism [due to an issue in
|
||||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||||
|
@ -101,3 +97,14 @@ Off NixOS, change `/etc/nix/nix.conf`:
|
||||||
```conf
|
```conf
|
||||||
trusted-users = root YOURUSERNAME
|
trusted-users = root YOURUSERNAME
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Updating schema bindings
|
||||||
|
|
||||||
|
```
|
||||||
|
just update-dbix
|
||||||
|
```
|
||||||
|
|
||||||
|
### Find the builds with the highest number of build steps:
|
||||||
|
|
||||||
|
select id, (select count(*) from buildsteps where build = x.id) as n from builds x order by n desc;
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
# Webhooks
|
# Webhooks
|
||||||
|
|
||||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||||
jobset has a github repo in its input.
|
jobset has a github repo in its input.
|
||||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
|
||||||
click on `Add webhook`.
|
## GitHub
|
||||||
|
|
||||||
|
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||||
|
and in the `Webhooks` tab click on `Add webhook`.
|
||||||
|
|
||||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||||
- In `Content type` switch to `application/json`.
|
- In `Content type` switch to `application/json`.
|
||||||
|
@ -11,3 +14,14 @@ click on `Add webhook`.
|
||||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||||
|
|
||||||
Then add the hook with `Add webhook`.
|
Then add the hook with `Add webhook`.
|
||||||
|
|
||||||
|
## Gitea
|
||||||
|
|
||||||
|
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||||
|
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||||
|
|
||||||
|
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||||
|
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||||
|
- Change the branch filter to match the git branch hydra builds.
|
||||||
|
|
||||||
|
Then add the hook with `Add webhook`.
|
||||||
|
|
54
flake.lock
54
flake.lock
|
@ -24,11 +24,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1719994518,
|
"lastModified": 1730504689,
|
||||||
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=",
|
"narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-parts",
|
"repo": "flake-parts",
|
||||||
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7",
|
"rev": "506278e768c2a08bec68eb62932193e341f55c90",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -48,11 +48,11 @@
|
||||||
"pre-commit-hooks": "pre-commit-hooks"
|
"pre-commit-hooks": "pre-commit-hooks"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721091462,
|
"lastModified": 1732112222,
|
||||||
"narHash": "sha256-0cmEeoOiB91BviTJHzIyxkY+Gxv3O8ZnnExVAoXEFGI=",
|
"narHash": "sha256-H7GN4++a4vE49SUNojZx+FSk4mmpb2ifJUtJMJHProI=",
|
||||||
"ref": "refs/heads/main",
|
"ref": "refs/heads/main",
|
||||||
"rev": "6b4d46e9e0e1dd80e0977684ab20d14bcd1a6bc3",
|
"rev": "66f6dbda32959dd5cf3a9aaba15af72d037ab7ff",
|
||||||
"revCount": 15967,
|
"revCount": 16513,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.lix.systems/lix-project/lix"
|
"url": "https://git.lix.systems/lix-project/lix"
|
||||||
},
|
},
|
||||||
|
@ -74,11 +74,11 @@
|
||||||
"treefmt-nix": "treefmt-nix"
|
"treefmt-nix": "treefmt-nix"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721195872,
|
"lastModified": 1732351635,
|
||||||
"narHash": "sha256-TlvRq634MSl22BWLmpTy2vdtKntbZlsUwdMq8Mp9AWs=",
|
"narHash": "sha256-H94CcQ3yamG5+RMxtxXllR02YIlxQ5WD/8PcolO9yEA=",
|
||||||
"ref": "refs/heads/main",
|
"ref": "refs/heads/main",
|
||||||
"rev": "c057494450f2d1420726ddb0bab145a5ff4ddfdd",
|
"rev": "dfc286ca3dc49118c30d8d6205d6d6af76c62b7a",
|
||||||
"revCount": 608,
|
"revCount": 617,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
|
"url": "https://git.lix.systems/lix-project/nix-eval-jobs"
|
||||||
},
|
},
|
||||||
|
@ -95,11 +95,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1720066371,
|
"lastModified": 1731952509,
|
||||||
"narHash": "sha256-uPlLYH2S0ACj0IcgaK9Lsf4spmJoGejR9DotXiXSBZQ=",
|
"narHash": "sha256-p4gB3Rhw8R6Ak4eMl8pqjCPOLCZRqaehZxdZ/mbFClM=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "nix-github-actions",
|
"repo": "nix-github-actions",
|
||||||
"rev": "622f829f5fe69310a866c8a6cd07e747c44ef820",
|
"rev": "7b5f051df789b6b20d259924d349a9ba3319b226",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -111,11 +111,11 @@
|
||||||
"nix2container": {
|
"nix2container": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1712990762,
|
"lastModified": 1724996935,
|
||||||
"narHash": "sha256-hO9W3w7NcnYeX8u8cleHiSpK2YJo7ecarFTUlbybl7k=",
|
"narHash": "sha256-njRK9vvZ1JJsP8oV2OgkBrpJhgQezI03S7gzskCcHos=",
|
||||||
"owner": "nlewo",
|
"owner": "nlewo",
|
||||||
"repo": "nix2container",
|
"repo": "nix2container",
|
||||||
"rev": "20aad300c925639d5d6cbe30013c8357ce9f2a2e",
|
"rev": "fa6bb0a1159f55d071ba99331355955ae30b3401",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -126,16 +126,16 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1720691131,
|
"lastModified": 1733120037,
|
||||||
"narHash": "sha256-CWT+KN8aTPyMIx8P303gsVxUnkinIz0a/Cmasz1jyIM=",
|
"narHash": "sha256-En+gSoVJ3iQKPDU1FHrR6zIxSLXKjzKY+pnh9tt+Yts=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "a046c1202e11b62cbede5385ba64908feb7bfac4",
|
"rev": "f9f0d5c5380be0a599b1fb54641fa99af8281539",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-24.05",
|
"ref": "nixos-24.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
@ -159,11 +159,11 @@
|
||||||
"pre-commit-hooks": {
|
"pre-commit-hooks": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1712055707,
|
"lastModified": 1726745158,
|
||||||
"narHash": "sha256-4XLvuSIDZJGS17xEwSrNuJLL7UjDYKGJSbK1WWX2AK8=",
|
"narHash": "sha256-D5AegvGoEjt4rkKedmxlSEmC+nNLMBPWFxvmYnVLhjk=",
|
||||||
"owner": "cachix",
|
"owner": "cachix",
|
||||||
"repo": "git-hooks.nix",
|
"repo": "git-hooks.nix",
|
||||||
"rev": "e35aed5fda3cc79f88ed7f1795021e559582093a",
|
"rev": "4e743a6920eab45e8ba0fbe49dc459f1423a4b74",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -187,11 +187,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721059077,
|
"lastModified": 1732292307,
|
||||||
"narHash": "sha256-gCICMMX7VMSKKt99giDDtRLkHJ0cwSgBtDijJAqTlto=",
|
"narHash": "sha256-5WSng844vXt8uytT5djmqBCkopyle6ciFgteuA9bJpw=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "treefmt-nix",
|
"repo": "treefmt-nix",
|
||||||
"rev": "0fb28f237f83295b4dd05e342f333b447c097398",
|
"rev": "705df92694af7093dfbb27109ce16d828a79155f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
17
flake.nix
17
flake.nix
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
description = "A Nix-based continuous build system";
|
description = "A Nix-based continuous build system";
|
||||||
|
|
||||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05";
|
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
|
||||||
|
|
||||||
inputs.lix.url = "git+https://git.lix.systems/lix-project/lix";
|
inputs.lix.url = "git+https://git.lix.systems/lix-project/lix";
|
||||||
inputs.lix.inputs.nixpkgs.follows = "nixpkgs";
|
inputs.lix.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
@ -73,6 +73,21 @@
|
||||||
default = pkgsBySystem.${system}.hydra;
|
default = pkgsBySystem.${system}.hydra;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
devShells = forEachSystem (system: let
|
||||||
|
pkgs = pkgsBySystem.${system};
|
||||||
|
lib = pkgs.lib;
|
||||||
|
|
||||||
|
mkDevShell = stdenv: (pkgs.mkShell.override { inherit stdenv; }) {
|
||||||
|
inputsFrom = [ (self.packages.${system}.default.override { inherit stdenv; }) ];
|
||||||
|
|
||||||
|
packages =
|
||||||
|
lib.optional (stdenv.cc.isClang && stdenv.hostPlatform == stdenv.buildPlatform) pkgs.clang-tools;
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
default = mkDevShell pkgs.stdenv;
|
||||||
|
clang = mkDevShell pkgs.clangStdenv;
|
||||||
|
});
|
||||||
|
|
||||||
nixosModules = import ./nixos-modules {
|
nixosModules = import ./nixos-modules {
|
||||||
overlays = overlayList;
|
overlays = overlayList;
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,4 +3,4 @@
|
||||||
# wait for hydra-server to listen
|
# wait for hydra-server to listen
|
||||||
while ! nc -z localhost 63333; do sleep 1; done
|
while ! nc -z localhost 63333; do sleep 1; done
|
||||||
|
|
||||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-evaluator
|
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-evaluator
|
||||||
|
|
|
@ -28,4 +28,4 @@ use-substitutes = true
|
||||||
</hydra_notify>
|
</hydra_notify>
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333 --restart --debug
|
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-dev-server --port 63333 --restart --debug
|
||||||
|
|
|
@ -3,4 +3,4 @@
|
||||||
# wait for hydra-server to listen
|
# wait for hydra-server to listen
|
||||||
while ! nc -z localhost 63333; do sleep 1; done
|
while ! nc -z localhost 63333; do sleep 1; done
|
||||||
|
|
||||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-notify
|
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-notify
|
||||||
|
|
|
@ -3,4 +3,4 @@
|
||||||
# wait until hydra is listening on port 63333
|
# wait until hydra is listening on port 63333
|
||||||
while ! nc -z localhost 63333; do sleep 1; done
|
while ! nc -z localhost 63333; do sleep 1; done
|
||||||
|
|
||||||
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-queue-runner
|
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec $(pwd)/outputs/out/bin/hydra-queue-runner
|
||||||
|
|
17
justfile
Normal file
17
justfile
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
setup *OPTIONS:
|
||||||
|
meson setup build --prefix="$PWD/outputs/out" $mesonFlags {{ OPTIONS }}
|
||||||
|
|
||||||
|
build *OPTIONS:
|
||||||
|
meson compile -C build {{ OPTIONS }}
|
||||||
|
|
||||||
|
install *OPTIONS: (build OPTIONS)
|
||||||
|
meson install -C build
|
||||||
|
|
||||||
|
test *OPTIONS:
|
||||||
|
meson test -C build --print-errorlogs {{ OPTIONS }}
|
||||||
|
|
||||||
|
update-dbix:
|
||||||
|
cd src/sql && ./update-dbix-harness.sh
|
||||||
|
|
||||||
|
perlcritic:
|
||||||
|
perlcritic .
|
36
meson.build
Normal file
36
meson.build
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
project('hydra', 'cpp',
|
||||||
|
version: files('version.txt'),
|
||||||
|
license: 'GPL-3.0',
|
||||||
|
default_options: [
|
||||||
|
'debug=true',
|
||||||
|
'optimization=2',
|
||||||
|
'cpp_std=c++20',
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
lix_expr_dep = dependency('lix-expr', required: true)
|
||||||
|
lix_main_dep = dependency('lix-main', required: true)
|
||||||
|
lix_store_dep = dependency('lix-store', required: true)
|
||||||
|
|
||||||
|
# Lix/Nix need extra flags not provided in its pkg-config files.
|
||||||
|
lix_dep = declare_dependency(
|
||||||
|
dependencies: [
|
||||||
|
lix_expr_dep,
|
||||||
|
lix_main_dep,
|
||||||
|
lix_store_dep,
|
||||||
|
],
|
||||||
|
compile_args: ['-include', 'lix/config.h'],
|
||||||
|
)
|
||||||
|
|
||||||
|
pqxx_dep = dependency('libpqxx', required: true)
|
||||||
|
|
||||||
|
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||||
|
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||||
|
|
||||||
|
mdbook = find_program('mdbook', native: true)
|
||||||
|
perl = find_program('perl', native: true)
|
||||||
|
|
||||||
|
subdir('doc/manual')
|
||||||
|
subdir('nixos-modules')
|
||||||
|
subdir('src')
|
||||||
|
subdir('t')
|
|
@ -339,6 +339,7 @@ in
|
||||||
systemd.services.hydra-queue-runner =
|
systemd.services.hydra-queue-runner =
|
||||||
{ wantedBy = [ "multi-user.target" ];
|
{ wantedBy = [ "multi-user.target" ];
|
||||||
requires = [ "hydra-init.service" ];
|
requires = [ "hydra-init.service" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||||
restartTriggers = [ hydraConf ];
|
restartTriggers = [ hydraConf ];
|
||||||
|
|
4
nixos-modules/meson.build
Normal file
4
nixos-modules/meson.build
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
install_data('hydra.nix',
|
||||||
|
install_dir: get_option('datadir') / 'nix',
|
||||||
|
rename: ['hydra-module.nix'],
|
||||||
|
)
|
42
package.nix
42
package.nix
|
@ -12,7 +12,8 @@
|
||||||
, git
|
, git
|
||||||
|
|
||||||
, makeWrapper
|
, makeWrapper
|
||||||
, autoreconfHook
|
, meson
|
||||||
|
, ninja
|
||||||
, nukeReferences
|
, nukeReferences
|
||||||
, pkg-config
|
, pkg-config
|
||||||
, mdbook
|
, mdbook
|
||||||
|
@ -36,6 +37,7 @@
|
||||||
|
|
||||||
, cacert
|
, cacert
|
||||||
, foreman
|
, foreman
|
||||||
|
, just
|
||||||
, glibcLocales
|
, glibcLocales
|
||||||
, libressl
|
, libressl
|
||||||
, openldap
|
, openldap
|
||||||
|
@ -92,6 +94,7 @@ let
|
||||||
DigestSHA1
|
DigestSHA1
|
||||||
EmailMIME
|
EmailMIME
|
||||||
EmailSender
|
EmailSender
|
||||||
|
FileCopyRecursive
|
||||||
FileLibMagic
|
FileLibMagic
|
||||||
FileSlurper
|
FileSlurper
|
||||||
FileWhich
|
FileWhich
|
||||||
|
@ -139,20 +142,13 @@ stdenv.mkDerivation (finalAttrs: {
|
||||||
src = fileset.toSource {
|
src = fileset.toSource {
|
||||||
root = ./.;
|
root = ./.;
|
||||||
fileset = fileset.unions ([
|
fileset = fileset.unions ([
|
||||||
./version.txt
|
|
||||||
./configure.ac
|
|
||||||
./Makefile.am
|
|
||||||
./src
|
|
||||||
./doc
|
./doc
|
||||||
./nixos-modules/hydra.nix
|
./meson.build
|
||||||
# These are always needed to appease Automake
|
./nixos-modules
|
||||||
./t/Makefile.am
|
./src
|
||||||
./t/jobs/config.nix.in
|
|
||||||
./t/jobs/declarative/project.json.in
|
|
||||||
] ++ lib.optionals finalAttrs.doCheck [
|
|
||||||
./t
|
./t
|
||||||
|
./version.txt
|
||||||
./.perlcriticrc
|
./.perlcriticrc
|
||||||
./.yath.rc
|
|
||||||
]);
|
]);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -160,7 +156,8 @@ stdenv.mkDerivation (finalAttrs: {
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
makeWrapper
|
makeWrapper
|
||||||
autoreconfHook
|
meson
|
||||||
|
ninja
|
||||||
nukeReferences
|
nukeReferences
|
||||||
pkg-config
|
pkg-config
|
||||||
mdbook
|
mdbook
|
||||||
|
@ -194,6 +191,8 @@ stdenv.mkDerivation (finalAttrs: {
|
||||||
postgresql_13
|
postgresql_13
|
||||||
pixz
|
pixz
|
||||||
nix-eval-jobs
|
nix-eval-jobs
|
||||||
|
perlPackages.PLS
|
||||||
|
just
|
||||||
];
|
];
|
||||||
|
|
||||||
checkInputs = [
|
checkInputs = [
|
||||||
|
@ -228,11 +227,17 @@ stdenv.mkDerivation (finalAttrs: {
|
||||||
|
|
||||||
OPENLDAP_ROOT = openldap;
|
OPENLDAP_ROOT = openldap;
|
||||||
|
|
||||||
|
mesonBuildType = "release";
|
||||||
|
|
||||||
|
postPatch = ''
|
||||||
|
patchShebangs .
|
||||||
|
'';
|
||||||
|
|
||||||
shellHook = ''
|
shellHook = ''
|
||||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||||
|
|
||||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
|
PATH=$(pwd)/outputs/out/bin:$PATH
|
||||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
PERL5LIB=$(pwd)/src/lib:$(pwd)/t/lib:$PERL5LIB
|
||||||
export HYDRA_HOME="$(pwd)/src/"
|
export HYDRA_HOME="$(pwd)/src/"
|
||||||
mkdir -p .hydra-data
|
mkdir -p .hydra-data
|
||||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||||
|
@ -241,14 +246,11 @@ stdenv.mkDerivation (finalAttrs: {
|
||||||
popd >/dev/null
|
popd >/dev/null
|
||||||
'';
|
'';
|
||||||
|
|
||||||
NIX_LDFLAGS = [ "-lpthread" ];
|
|
||||||
|
|
||||||
enableParallelBuilding = true;
|
|
||||||
|
|
||||||
doCheck = true;
|
doCheck = true;
|
||||||
|
|
||||||
|
mesonCheckFlags = [ "--verbose" ];
|
||||||
|
|
||||||
preCheck = ''
|
preCheck = ''
|
||||||
patchShebangs .
|
|
||||||
export LOGNAME=''${LOGNAME:-foo}
|
export LOGNAME=''${LOGNAME:-foo}
|
||||||
# set $HOME for bzr so it can create its trace file
|
# set $HOME for bzr so it can create its trace file
|
||||||
export HOME=$(mktemp -d)
|
export HOME=$(mktemp -d)
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
SUBDIRS = hydra-evaluator hydra-queue-runner sql script lib root ttf
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
|
|
@ -1,5 +0,0 @@
|
||||||
bin_PROGRAMS = hydra-evaluator
|
|
||||||
|
|
||||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
|
||||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
|
||||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
|
|
@ -1,9 +1,9 @@
|
||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "logging.hh"
|
#include "lix/libmain/shared.hh"
|
||||||
#include "pool.hh"
|
#include "lix/libutil/logging.hh"
|
||||||
#include "shared.hh"
|
#include "lix/libutil/pool.hh"
|
||||||
#include "signals.hh"
|
#include "lix/libutil/signals.hh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
@ -14,11 +14,12 @@
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
using boost::format;
|
using boost::format;
|
||||||
|
|
||||||
typedef std::pair<std::string, std::string> JobsetName;
|
using JobsetName = std::pair<std::string, std::string>;
|
||||||
|
|
||||||
class JobsetId {
|
class JobsetId {
|
||||||
public:
|
public:
|
||||||
|
@ -28,8 +29,8 @@ class JobsetId {
|
||||||
int id;
|
int id;
|
||||||
|
|
||||||
|
|
||||||
JobsetId(const std::string & project, const std::string & jobset, int id)
|
JobsetId(std::string project, std::string jobset, int id)
|
||||||
: project{ project }, jobset{ jobset }, id{ id }
|
: project{std::move( project )}, jobset{std::move( jobset )}, id{ id }
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +42,7 @@ class JobsetId {
|
||||||
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
|
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
|
||||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||||
|
|
||||||
std::string display() const {
|
[[nodiscard]] std::string display() const {
|
||||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -88,11 +89,11 @@ struct Evaluator
|
||||||
JobsetId name;
|
JobsetId name;
|
||||||
std::optional<EvaluationStyle> evaluation_style;
|
std::optional<EvaluationStyle> evaluation_style;
|
||||||
time_t lastCheckedTime, triggerTime;
|
time_t lastCheckedTime, triggerTime;
|
||||||
int checkInterval;
|
time_t checkInterval;
|
||||||
Pid pid;
|
Pid pid;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef std::map<JobsetId, Jobset> Jobsets;
|
using Jobsets = std::map<JobsetId, Jobset>;
|
||||||
|
|
||||||
std::optional<JobsetName> evalOne;
|
std::optional<JobsetName> evalOne;
|
||||||
|
|
||||||
|
@ -138,13 +139,15 @@ struct Evaluator
|
||||||
|
|
||||||
if (evalOne && name != *evalOne) continue;
|
if (evalOne && name != *evalOne) continue;
|
||||||
|
|
||||||
auto res = state->jobsets.try_emplace(name, Jobset{name});
|
auto res = state->jobsets.try_emplace(name, Jobset{.name=name});
|
||||||
|
|
||||||
auto & jobset = res.first->second;
|
auto & jobset = res.first->second;
|
||||||
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
|
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
|
||||||
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
|
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
|
||||||
jobset.checkInterval = row["checkInterval"].as<time_t>();
|
jobset.checkInterval = row["checkInterval"].as<time_t>();
|
||||||
switch (row["jobset_enabled"].as<int>(0)) {
|
|
||||||
|
int eval_style = row["jobset_enabled"].as<int>(0);
|
||||||
|
switch (eval_style) {
|
||||||
case 1:
|
case 1:
|
||||||
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
|
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
|
||||||
break;
|
break;
|
||||||
|
@ -154,6 +157,9 @@ struct Evaluator
|
||||||
case 3:
|
case 3:
|
||||||
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
|
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
// Disabled or unknown. Leave as nullopt.
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
seen.insert(name);
|
seen.insert(name);
|
||||||
|
@ -175,7 +181,7 @@ struct Evaluator
|
||||||
|
|
||||||
void startEval(State & state, Jobset & jobset)
|
void startEval(State & state, Jobset & jobset)
|
||||||
{
|
{
|
||||||
time_t now = time(0);
|
time_t now = time(nullptr);
|
||||||
|
|
||||||
printInfo("starting evaluation of jobset ‘%s’ (last checked %d s ago)",
|
printInfo("starting evaluation of jobset ‘%s’ (last checked %d s ago)",
|
||||||
jobset.name.display(),
|
jobset.name.display(),
|
||||||
|
@ -228,7 +234,7 @@ struct Evaluator
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
|
if (jobset.lastCheckedTime + jobset.checkInterval <= time(nullptr)) {
|
||||||
// Time to schedule a fresh evaluation. If the jobset
|
// Time to schedule a fresh evaluation. If the jobset
|
||||||
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
|
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
|
||||||
// has no remaining, unfinished work.
|
// has no remaining, unfinished work.
|
||||||
|
@ -301,7 +307,7 @@ struct Evaluator
|
||||||
|
|
||||||
/* Put jobsets in order of ascending trigger time, last checked
|
/* Put jobsets in order of ascending trigger time, last checked
|
||||||
time, and name. */
|
time, and name. */
|
||||||
std::sort(sorted.begin(), sorted.end(),
|
std::ranges::sort(sorted,
|
||||||
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
|
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
|
||||||
return
|
return
|
||||||
a->second.triggerTime != b->second.triggerTime
|
a->second.triggerTime != b->second.triggerTime
|
||||||
|
@ -324,7 +330,7 @@ struct Evaluator
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
|
|
||||||
time_t now = time(0);
|
time_t now = time(nullptr);
|
||||||
|
|
||||||
std::chrono::seconds sleepTime = std::chrono::seconds::max();
|
std::chrono::seconds sleepTime = std::chrono::seconds::max();
|
||||||
|
|
||||||
|
@ -411,7 +417,7 @@ struct Evaluator
|
||||||
printInfo("evaluation of jobset ‘%s’ %s",
|
printInfo("evaluation of jobset ‘%s’ %s",
|
||||||
jobset.name.display(), statusToString(status));
|
jobset.name.display(), statusToString(status));
|
||||||
|
|
||||||
auto now = time(0);
|
auto now = time(nullptr);
|
||||||
|
|
||||||
jobset.triggerTime = notTriggered;
|
jobset.triggerTime = notTriggered;
|
||||||
jobset.lastCheckedTime = now;
|
jobset.lastCheckedTime = now;
|
||||||
|
@ -507,14 +513,14 @@ int main(int argc, char * * argv)
|
||||||
|
|
||||||
std::vector<std::string> args;
|
std::vector<std::string> args;
|
||||||
|
|
||||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
LegacyArgs(argv[0], [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||||
if (*arg == "--unlock")
|
if (*arg == "--unlock")
|
||||||
unlock = true;
|
unlock = true;
|
||||||
else if (arg->starts_with("-"))
|
else if (arg->starts_with("-"))
|
||||||
return false;
|
return false;
|
||||||
args.push_back(*arg);
|
args.push_back(*arg);
|
||||||
return true;
|
return true;
|
||||||
});
|
}).parseCmdline(Strings(argv + 1, argv + argc));
|
||||||
|
|
||||||
|
|
||||||
if (unlock)
|
if (unlock)
|
||||||
|
|
9
src/hydra-evaluator/meson.build
Normal file
9
src/hydra-evaluator/meson.build
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
hydra_evaluator = executable('hydra-evaluator',
|
||||||
|
'hydra-evaluator.cc',
|
||||||
|
dependencies: [
|
||||||
|
libhydra_dep,
|
||||||
|
lix_dep,
|
||||||
|
pqxx_dep,
|
||||||
|
],
|
||||||
|
install: true,
|
||||||
|
)
|
|
@ -1,8 +0,0 @@
|
||||||
bin_PROGRAMS = hydra-queue-runner
|
|
||||||
|
|
||||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
|
||||||
builder.cc build-result.cc build-remote.cc \
|
|
||||||
hydra-build-result.hh counter.hh state.hh db.hh \
|
|
||||||
nar-extractor.cc nar-extractor.hh
|
|
||||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core
|
|
||||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
|
|
@ -1,20 +1,22 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
#include <ranges>
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
|
|
||||||
#include "build-result.hh"
|
#include "lix/libstore/build-result.hh"
|
||||||
#include "current-process.hh"
|
#include "lix/libstore/path.hh"
|
||||||
#include "path.hh"
|
#include "lix/libstore/serve-protocol-impl.hh"
|
||||||
#include "serve-protocol.hh"
|
#include "lix/libstore/serve-protocol.hh"
|
||||||
|
#include "lix/libstore/ssh.hh"
|
||||||
|
#include "lix/libutil/current-process.hh"
|
||||||
|
#include "lix/libutil/finally.hh"
|
||||||
|
#include "lix/libutil/url.hh"
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "serve-protocol.hh"
|
|
||||||
#include "serve-protocol-impl.hh"
|
#include "lix/libstore/temporary-dir.hh"
|
||||||
#include "ssh.hh"
|
|
||||||
#include "finally.hh"
|
|
||||||
#include "url.hh"
|
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
|
@ -41,6 +43,7 @@ static Strings extraStoreArgs(std::string & machine)
|
||||||
}
|
}
|
||||||
} catch (BadURL &) {
|
} catch (BadURL &) {
|
||||||
// We just try to continue with `machine->sshName` here for backwards compat.
|
// We just try to continue with `machine->sshName` here for backwards compat.
|
||||||
|
printMsg(lvlWarn, "could not parse machine URL '%s', passing through to SSH", machine);
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -65,8 +68,8 @@ static void openConnection(::Machine::ptr machine, Path tmpDir, int stderrFD, SS
|
||||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||||
if (machine->sshPublicHostKey != "") {
|
if (machine->sshPublicHostKey != "") {
|
||||||
Path fileName = tmpDir + "/host-key";
|
Path fileName = tmpDir + "/host-key";
|
||||||
auto p = machine->sshName.find("@");
|
auto p = sshName.find("@");
|
||||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
std::string host = p != std::string::npos ? std::string(sshName, p + 1) : sshName;
|
||||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||||
}
|
}
|
||||||
|
@ -133,8 +136,8 @@ static void copyClosureTo(
|
||||||
auto sorted = destStore.topoSortPaths(closure);
|
auto sorted = destStore.topoSortPaths(closure);
|
||||||
|
|
||||||
StorePathSet missing;
|
StorePathSet missing;
|
||||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
for (auto & i : std::ranges::reverse_view(sorted))
|
||||||
if (!present.count(*i)) missing.insert(*i);
|
if (!present.count(i)) missing.insert(i);
|
||||||
|
|
||||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||||
|
|
||||||
|
@ -304,12 +307,12 @@ static BuildResult performBuild(
|
||||||
|
|
||||||
time_t startTime, stopTime;
|
time_t startTime, stopTime;
|
||||||
|
|
||||||
startTime = time(0);
|
startTime = time(nullptr);
|
||||||
{
|
{
|
||||||
MaintainCount<counter> mc(nrStepsBuilding);
|
MaintainCount<counter> mc(nrStepsBuilding);
|
||||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||||
}
|
}
|
||||||
stopTime = time(0);
|
stopTime = time(nullptr);
|
||||||
|
|
||||||
if (!result.startTime) {
|
if (!result.startTime) {
|
||||||
// If the builder gave `startTime = 0`, use our measurements
|
// If the builder gave `startTime = 0`, use our measurements
|
||||||
|
@ -338,10 +341,10 @@ static BuildResult performBuild(
|
||||||
// were known
|
// were known
|
||||||
assert(outputPath);
|
assert(outputPath);
|
||||||
auto outputHash = outputHashes.at(outputName);
|
auto outputHash = outputHashes.at(outputName);
|
||||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
auto drvOutput = DrvOutput { .drvHash=outputHash, .outputName=outputName };
|
||||||
result.builtOutputs.insert_or_assign(
|
result.builtOutputs.insert_or_assign(
|
||||||
std::move(outputName),
|
std::move(outputName),
|
||||||
Realisation { drvOutput, *outputPath });
|
Realisation { .id=drvOutput, .outPath=*outputPath });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +371,7 @@ static std::map<StorePath, ValidPathInfo> queryPathInfos(
|
||||||
auto references = ServeProto::Serialise<StorePathSet>::read(localStore, conn);
|
auto references = ServeProto::Serialise<StorePathSet>::read(localStore, conn);
|
||||||
readLongLong(conn.from); // download size
|
readLongLong(conn.from); // download size
|
||||||
auto narSize = readLongLong(conn.from);
|
auto narSize = readLongLong(conn.from);
|
||||||
auto narHash = Hash::parseAny(readString(conn.from), htSHA256);
|
auto narHash = Hash::parseAny(readString(conn.from), HashType::SHA256);
|
||||||
auto ca = ContentAddress::parseOpt(readString(conn.from));
|
auto ca = ContentAddress::parseOpt(readString(conn.from));
|
||||||
readStrings<StringSet>(conn.from); // sigs
|
readStrings<StringSet>(conn.from); // sigs
|
||||||
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
|
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
|
||||||
|
@ -397,8 +400,7 @@ static void copyPathFromRemote(
|
||||||
/* Receive the NAR from the remote and add it to the
|
/* Receive the NAR from the remote and add it to the
|
||||||
destination store. Meanwhile, extract all the info from the
|
destination store. Meanwhile, extract all the info from the
|
||||||
NAR that getBuildOutput() needs. */
|
NAR that getBuildOutput() needs. */
|
||||||
auto source2 = sinkToSource([&](Sink & sink)
|
auto coro = [&]() -> WireFormatGenerator {
|
||||||
{
|
|
||||||
/* Note: we should only send the command to dump the store
|
/* Note: we should only send the command to dump the store
|
||||||
path to the remote if the NAR is actually going to get read
|
path to the remote if the NAR is actually going to get read
|
||||||
by the destination store, which won't happen if this path
|
by the destination store, which won't happen if this path
|
||||||
|
@ -409,11 +411,11 @@ static void copyPathFromRemote(
|
||||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||||
conn.to.flush();
|
conn.to.flush();
|
||||||
|
|
||||||
TeeSource tee(conn.from, sink);
|
co_yield extractNarDataFilter(conn.from, localStore.printStorePath(info.path), narMembers);
|
||||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
};
|
||||||
});
|
GeneratorSource source2{coro()};
|
||||||
|
|
||||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
destStore.addToStore(info, source2, NoRepair, NoCheckSigs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void copyPathsFromRemote(
|
static void copyPathsFromRemote(
|
||||||
|
@ -624,6 +626,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||||
* step, since this requires a DB roundtrip. */
|
* step, since this requires a DB roundtrip. */
|
||||||
if (!localWorkThrottler.try_acquire()) {
|
if (!localWorkThrottler.try_acquire()) {
|
||||||
|
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||||
updateStep(ssWaitingForLocalSlot);
|
updateStep(ssWaitingForLocalSlot);
|
||||||
localWorkThrottler.acquire();
|
localWorkThrottler.acquire();
|
||||||
}
|
}
|
||||||
|
@ -635,7 +638,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||||
* copying outputs and we end up building too many things that we
|
* copying outputs and we end up building too many things that we
|
||||||
* haven't been able to allow copy slots for. */
|
* haven't been able to allow copy slots for. */
|
||||||
assert(reservation.unique());
|
assert(reservation.unique());
|
||||||
reservation = 0;
|
reservation = nullptr;
|
||||||
wakeDispatcher();
|
wakeDispatcher();
|
||||||
|
|
||||||
StorePathSet outputs;
|
StorePathSet outputs;
|
||||||
|
@ -698,7 +701,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||||
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
||||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||||
info->lastFailure = now;
|
info->lastFailure = now;
|
||||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30));
|
||||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include "lix/libstore/fs-accessor.hh"
|
||||||
#include "fs-accessor.hh"
|
#include "lix/libstore/store-api.hh"
|
||||||
|
#include "lix/libutil/strings.hh"
|
||||||
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
|
@ -34,11 +35,8 @@ BuildOutput getBuildOutput(
|
||||||
auto outputS = store->printStorePath(output);
|
auto outputS = store->printStorePath(output);
|
||||||
if (!narMembers.count(outputS)) {
|
if (!narMembers.count(outputS)) {
|
||||||
printInfo("fetching NAR contents of '%s'...", outputS);
|
printInfo("fetching NAR contents of '%s'...", outputS);
|
||||||
auto source = sinkToSource([&](Sink & sink)
|
GeneratorSource source{store->narFromPath(output)};
|
||||||
{
|
extractNarData(source, outputS, narMembers);
|
||||||
sink << store->narFromPath(output);
|
|
||||||
});
|
|
||||||
extractNarData(*source, outputS, narMembers);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
#include "state.hh"
|
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "finally.hh"
|
#include "lix/libstore/binary-cache-store.hh"
|
||||||
#include "binary-cache-store.hh"
|
#include "lix/libutil/error.hh"
|
||||||
|
#include "lix/libutil/finally.hh"
|
||||||
|
#include "state.hh"
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
|
@ -35,10 +36,18 @@ void State::builder(MachineReservation::ptr reservation)
|
||||||
activeSteps_.lock()->erase(activeStep);
|
activeSteps_.lock()->erase(activeStep);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
auto conn(dbPool.get());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
// Might release the reservation.
|
// Might release the reservation.
|
||||||
res = doBuildStep(destStore, reservation, activeStep);
|
res = doBuildStep(destStore, reservation, *conn, activeStep);
|
||||||
|
} catch (pqxx::broken_connection & e) {
|
||||||
|
printMsg(lvlError, "db lost while building ‘%s’ on ‘%s’: %s (retriable)",
|
||||||
|
localStore->printStorePath(activeStep->step->drvPath),
|
||||||
|
reservation ? reservation->machine->sshName : std::string("(no machine)"),
|
||||||
|
e.what());
|
||||||
|
conn.markBad();
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||||
localStore->printStorePath(activeStep->step->drvPath),
|
localStore->printStorePath(activeStep->step->drvPath),
|
||||||
|
@ -50,7 +59,7 @@ void State::builder(MachineReservation::ptr reservation)
|
||||||
/* If the machine hasn't been released yet, release and wake up the dispatcher. */
|
/* If the machine hasn't been released yet, release and wake up the dispatcher. */
|
||||||
if (reservation) {
|
if (reservation) {
|
||||||
assert(reservation.unique());
|
assert(reservation.unique());
|
||||||
reservation = 0;
|
reservation = nullptr;
|
||||||
wakeDispatcher();
|
wakeDispatcher();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +73,7 @@ void State::builder(MachineReservation::ptr reservation)
|
||||||
step_->tries++;
|
step_->tries++;
|
||||||
nrRetries++;
|
nrRetries++;
|
||||||
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
|
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
|
||||||
int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10);
|
int delta = static_cast<int>(retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10));
|
||||||
printMsg(lvlInfo, "will retry ‘%s’ after %ss", localStore->printStorePath(step->drvPath), delta);
|
printMsg(lvlInfo, "will retry ‘%s’ after %ss", localStore->printStorePath(step->drvPath), delta);
|
||||||
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
|
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
|
||||||
}
|
}
|
||||||
|
@ -76,6 +85,7 @@ void State::builder(MachineReservation::ptr reservation)
|
||||||
|
|
||||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
MachineReservation::ptr & reservation,
|
MachineReservation::ptr & reservation,
|
||||||
|
Connection & conn,
|
||||||
std::shared_ptr<ActiveStep> activeStep)
|
std::shared_ptr<ActiveStep> activeStep)
|
||||||
{
|
{
|
||||||
auto step(reservation->step);
|
auto step(reservation->step);
|
||||||
|
@ -106,8 +116,6 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
buildOptions.maxLogSize = maxLogSize;
|
buildOptions.maxLogSize = maxLogSize;
|
||||||
buildOptions.enforceDeterminism = step->isDeterministic;
|
buildOptions.enforceDeterminism = step->isDeterministic;
|
||||||
|
|
||||||
auto conn(dbPool.get());
|
|
||||||
|
|
||||||
{
|
{
|
||||||
std::set<Build::ptr> dependents;
|
std::set<Build::ptr> dependents;
|
||||||
std::set<Step::ptr> steps;
|
std::set<Step::ptr> steps;
|
||||||
|
@ -132,7 +140,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
for (auto build2 : dependents) {
|
for (auto build2 : dependents) {
|
||||||
if (build2->drvPath == step->drvPath) {
|
if (build2->drvPath == step->drvPath) {
|
||||||
build = build2;
|
build = build2;
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
notifyBuildStarted(txn, build->id);
|
notifyBuildStarted(txn, build->id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
@ -178,16 +186,16 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
unlink(result.logFile.c_str());
|
unlink(result.logFile.c_str());
|
||||||
}
|
}
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
ignoreException();
|
ignoreExceptionInDestructor();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
time_t stepStartTime = result.startTime = time(0);
|
time_t stepStartTime = result.startTime = time(nullptr);
|
||||||
|
|
||||||
/* If any of the outputs have previously failed, then don't bother
|
/* If any of the outputs have previously failed, then don't bother
|
||||||
building again. */
|
building again. */
|
||||||
if (checkCachedFailure(step, *conn))
|
if (checkCachedFailure(step, conn))
|
||||||
result.stepStatus = bsCachedFailure;
|
result.stepStatus = bsCachedFailure;
|
||||||
else {
|
else {
|
||||||
|
|
||||||
|
@ -195,13 +203,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
building. */
|
building. */
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto updateStep = [&](StepState stepState) {
|
auto updateStep = [&](StepState stepState) {
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
updateBuildStep(txn, buildId, stepNr, stepState);
|
updateBuildStep(txn, buildId, stepNr, stepState);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
};
|
};
|
||||||
|
@ -230,7 +238,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time_t stepStopTime = time(0);
|
time_t stepStopTime = time(nullptr);
|
||||||
if (!result.stopTime) result.stopTime = stepStopTime;
|
if (!result.stopTime) result.stopTime = stepStopTime;
|
||||||
|
|
||||||
/* For standard failures, we don't care about the error
|
/* For standard failures, we don't care about the error
|
||||||
|
@ -244,7 +252,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
if (!step_->jobsets.empty()) {
|
if (!step_->jobsets.empty()) {
|
||||||
// FIXME: loss of precision.
|
// FIXME: loss of precision.
|
||||||
time_t charge = (result.stopTime - result.startTime) / step_->jobsets.size();
|
time_t charge = (result.stopTime - result.startTime) / static_cast<time_t>(step_->jobsets.size());
|
||||||
for (auto & jobset : step_->jobsets)
|
for (auto & jobset : step_->jobsets)
|
||||||
jobset->addStep(result.startTime, charge);
|
jobset->addStep(result.startTime, charge);
|
||||||
}
|
}
|
||||||
|
@ -252,7 +260,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
|
|
||||||
/* Finish the step in the database. */
|
/* Finish the step in the database. */
|
||||||
if (stepNr) {
|
if (stepNr) {
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
@ -328,7 +336,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
|
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
|
|
||||||
for (auto & b : direct) {
|
for (auto & b : direct) {
|
||||||
printInfo("marking build %1% as succeeded", b->id);
|
printInfo("marking build %1% as succeeded", b->id);
|
||||||
|
@ -356,7 +364,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
/* Send notification about the builds that have this step as
|
/* Send notification about the builds that have this step as
|
||||||
the top-level. */
|
the top-level. */
|
||||||
{
|
{
|
||||||
pqxx::work txn(*conn);
|
pqxx::work txn(conn);
|
||||||
for (auto id : buildIDs)
|
for (auto id : buildIDs)
|
||||||
notifyBuildFinished(txn, id, {});
|
notifyBuildFinished(txn, id, {});
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
@ -385,7 +393,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
} else
|
} else
|
||||||
failStep(*conn, step, buildId, result, machine, stepFinished);
|
failStep(conn, step, buildId, result, machine, stepFinished);
|
||||||
|
|
||||||
// FIXME: keep stats about aborted steps?
|
// FIXME: keep stats about aborted steps?
|
||||||
nrStepsDone++;
|
nrStepsDone++;
|
||||||
|
|
|
@ -46,7 +46,7 @@ void State::dispatcher()
|
||||||
auto t_after_work = std::chrono::steady_clock::now();
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
prom.dispatcher_time_spent_running.Increment(
|
prom.dispatcher_time_spent_running.Increment(
|
||||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
|
||||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||||
|
|
||||||
/* Sleep until we're woken up (either because a runnable build
|
/* Sleep until we're woken up (either because a runnable build
|
||||||
|
@ -63,7 +63,7 @@ void State::dispatcher()
|
||||||
|
|
||||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
prom.dispatcher_time_spent_waiting.Increment(
|
prom.dispatcher_time_spent_waiting.Increment(
|
||||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
|
||||||
|
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError("dispatcher: %s", e.what());
|
printError("dispatcher: %s", e.what());
|
||||||
|
@ -190,7 +190,7 @@ system_time State::doDispatch()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
std::ranges::sort(runnableSorted,
|
||||||
[](const StepInfo & a, const StepInfo & b)
|
[](const StepInfo & a, const StepInfo & b)
|
||||||
{
|
{
|
||||||
return
|
return
|
||||||
|
@ -240,11 +240,11 @@ system_time State::doDispatch()
|
||||||
- Then by speed factor.
|
- Then by speed factor.
|
||||||
|
|
||||||
- Finally by load. */
|
- Finally by load. */
|
||||||
sort(machinesSorted.begin(), machinesSorted.end(),
|
std::ranges::sort(machinesSorted,
|
||||||
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
||||||
{
|
{
|
||||||
float ta = std::round(a.currentJobs / a.machine->speedFactorFloat);
|
float ta = std::round(static_cast<float>(a.currentJobs) / a.machine->speedFactorFloat);
|
||||||
float tb = std::round(b.currentJobs / b.machine->speedFactorFloat);
|
float tb = std::round(static_cast<float>(b.currentJobs) / b.machine->speedFactorFloat);
|
||||||
return
|
return
|
||||||
ta != tb ? ta < tb :
|
ta != tb ? ta < tb :
|
||||||
a.machine->speedFactorFloat != b.machine->speedFactorFloat ? a.machine->speedFactorFloat > b.machine->speedFactorFloat :
|
a.machine->speedFactorFloat != b.machine->speedFactorFloat ? a.machine->speedFactorFloat > b.machine->speedFactorFloat :
|
||||||
|
@ -345,7 +345,7 @@ void State::abortUnsupported()
|
||||||
auto machines2 = *machines.lock();
|
auto machines2 = *machines.lock();
|
||||||
|
|
||||||
system_time now = std::chrono::system_clock::now();
|
system_time now = std::chrono::system_clock::now();
|
||||||
auto now2 = time(0);
|
auto now2 = time(nullptr);
|
||||||
|
|
||||||
std::unordered_set<Step::ptr> aborted;
|
std::unordered_set<Step::ptr> aborted;
|
||||||
|
|
||||||
|
@ -436,7 +436,7 @@ void Jobset::addStep(time_t startTime, time_t duration)
|
||||||
|
|
||||||
void Jobset::pruneSteps()
|
void Jobset::pruneSteps()
|
||||||
{
|
{
|
||||||
time_t now = time(0);
|
time_t now = time(nullptr);
|
||||||
auto steps_(steps.lock());
|
auto steps_(steps.lock());
|
||||||
while (!steps_->empty()) {
|
while (!steps_->empty()) {
|
||||||
auto i = steps_->begin();
|
auto i = steps_->begin();
|
||||||
|
@ -464,7 +464,7 @@ State::MachineReservation::~MachineReservation()
|
||||||
auto prev = machine->state->currentJobs--;
|
auto prev = machine->state->currentJobs--;
|
||||||
assert(prev);
|
assert(prev);
|
||||||
if (prev == 1)
|
if (prev == 1)
|
||||||
machine->state->idleSince = time(0);
|
machine->state->idleSince = time(nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto machineTypes_(state.machineTypes.lock());
|
auto machineTypes_(state.machineTypes.lock());
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include "hash.hh"
|
#include "lix/libstore/derivations.hh"
|
||||||
#include "derivations.hh"
|
#include "lix/libstore/store-api.hh"
|
||||||
#include "store-api.hh"
|
#include "lix/libutil/hash.hh"
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
struct BuildProduct
|
struct BuildProduct
|
||||||
|
@ -14,7 +14,7 @@ struct BuildProduct
|
||||||
bool isRegular = false;
|
bool isRegular = false;
|
||||||
std::optional<nix::Hash> sha256hash;
|
std::optional<nix::Hash> sha256hash;
|
||||||
std::optional<off_t> fileSize;
|
std::optional<off_t> fileSize;
|
||||||
BuildProduct() { }
|
BuildProduct() = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct BuildMetric
|
struct BuildMetric
|
||||||
|
|
|
@ -11,15 +11,16 @@
|
||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
#include "signals.hh"
|
|
||||||
#include "state.hh"
|
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include "lix/libstore/store-api.hh"
|
||||||
|
#include "lix/libutil/signals.hh"
|
||||||
|
#include "state.hh"
|
||||||
|
|
||||||
#include "globals.hh"
|
|
||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "s3-binary-cache-store.hh"
|
#include "lix/libmain/shared.hh"
|
||||||
#include "shared.hh"
|
#include "lix/libstore/globals.hh"
|
||||||
|
#include "lix/libstore/s3-binary-cache-store.hh"
|
||||||
|
#include "lix/libutil/args.hh"
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
using nlohmann::json;
|
using nlohmann::json;
|
||||||
|
@ -105,7 +106,7 @@ State::State(std::optional<std::string> metricsAddrOpt)
|
||||||
: config(std::make_unique<HydraConfig>())
|
: config(std::make_unique<HydraConfig>())
|
||||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
, localWorkThrottler(static_cast<ptrdiff_t>(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2))))
|
||||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||||
|
@ -138,7 +139,7 @@ nix::MaintainCount<counter> State::startDbUpdate()
|
||||||
{
|
{
|
||||||
if (nrActiveDbUpdates > 6)
|
if (nrActiveDbUpdates > 6)
|
||||||
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
|
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
|
||||||
return MaintainCount<counter>(nrActiveDbUpdates);
|
return {nrActiveDbUpdates};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,9 +172,9 @@ void State::parseMachines(const std::string & contents)
|
||||||
for (auto & f : mandatoryFeatures)
|
for (auto & f : mandatoryFeatures)
|
||||||
supportedFeatures.insert(f);
|
supportedFeatures.insert(f);
|
||||||
|
|
||||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
using MaxJobs = std::remove_const_t<decltype(nix::Machine::maxJobs)>;
|
||||||
|
|
||||||
auto machine = std::make_shared<::Machine>(nix::Machine {
|
auto machine = std::make_shared<::Machine>(::Machine {{
|
||||||
// `storeUri`, not yet used
|
// `storeUri`, not yet used
|
||||||
"",
|
"",
|
||||||
// `systemTypes`, not yet used
|
// `systemTypes`, not yet used
|
||||||
|
@ -194,11 +195,11 @@ void State::parseMachines(const std::string & contents)
|
||||||
tokens[7] != "" && tokens[7] != "-"
|
tokens[7] != "" && tokens[7] != "-"
|
||||||
? base64Decode(tokens[7])
|
? base64Decode(tokens[7])
|
||||||
: "",
|
: "",
|
||||||
});
|
}});
|
||||||
|
|
||||||
machine->sshName = tokens[0];
|
machine->sshName = tokens[0];
|
||||||
machine->systemTypesSet = tokenizeString<StringSet>(tokens[1], ",");
|
machine->systemTypesSet = tokenizeString<StringSet>(tokens[1], ",");
|
||||||
machine->speedFactorFloat = atof(tokens[4].c_str());
|
machine->speedFactorFloat = static_cast<float>(atof(tokens[4].c_str()));
|
||||||
|
|
||||||
/* Re-use the State object of the previous machine with the
|
/* Re-use the State object of the previous machine with the
|
||||||
same name. */
|
same name. */
|
||||||
|
@ -412,7 +413,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
unsigned int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||||
{
|
{
|
||||||
restart:
|
restart:
|
||||||
|
@ -534,7 +535,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||||
product.type,
|
product.type,
|
||||||
product.subtype,
|
product.subtype,
|
||||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base::Base16, false)) : std::nullopt,
|
||||||
product.path,
|
product.path,
|
||||||
product.name,
|
product.name,
|
||||||
product.defaultPath);
|
product.defaultPath);
|
||||||
|
@ -594,7 +595,7 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||||
createDirs(dirOf(lockPath));
|
createDirs(dirOf(lockPath));
|
||||||
|
|
||||||
auto lock = std::make_shared<PathLocks>();
|
auto lock = std::make_shared<PathLocks>();
|
||||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return 0;
|
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return nullptr;
|
||||||
|
|
||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
@ -602,10 +603,10 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||||
|
|
||||||
void State::dumpStatus(Connection & conn)
|
void State::dumpStatus(Connection & conn)
|
||||||
{
|
{
|
||||||
time_t now = time(0);
|
time_t now = time(nullptr);
|
||||||
json statusJson = {
|
json statusJson = {
|
||||||
{"status", "up"},
|
{"status", "up"},
|
||||||
{"time", time(0)},
|
{"time", time(nullptr)},
|
||||||
{"uptime", now - startedAt},
|
{"uptime", now - startedAt},
|
||||||
{"pid", getpid()},
|
{"pid", getpid()},
|
||||||
|
|
||||||
|
@ -613,6 +614,7 @@ void State::dumpStatus(Connection & conn)
|
||||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||||
|
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||||
|
@ -620,7 +622,7 @@ void State::dumpStatus(Connection & conn)
|
||||||
{"bytesReceived", bytesReceived.load()},
|
{"bytesReceived", bytesReceived.load()},
|
||||||
{"nrBuildsRead", nrBuildsRead.load()},
|
{"nrBuildsRead", nrBuildsRead.load()},
|
||||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / (float) nrBuildsRead},
|
||||||
{"nrBuildsDone", nrBuildsDone.load()},
|
{"nrBuildsDone", nrBuildsDone.load()},
|
||||||
{"nrStepsStarted", nrStepsStarted.load()},
|
{"nrStepsStarted", nrStepsStarted.load()},
|
||||||
{"nrStepsDone", nrStepsDone.load()},
|
{"nrStepsDone", nrStepsDone.load()},
|
||||||
|
@ -629,7 +631,7 @@ void State::dumpStatus(Connection & conn)
|
||||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / (float) nrDispatcherWakeups},
|
||||||
{"nrDbConnections", dbPool.count()},
|
{"nrDbConnections", dbPool.count()},
|
||||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||||
};
|
};
|
||||||
|
@ -649,8 +651,8 @@ void State::dumpStatus(Connection & conn)
|
||||||
if (nrStepsDone) {
|
if (nrStepsDone) {
|
||||||
statusJson["totalStepTime"] = totalStepTime.load();
|
statusJson["totalStepTime"] = totalStepTime.load();
|
||||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
statusJson["avgStepTime"] = (float) totalStepTime / (float) nrStepsDone;
|
||||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / (float) nrStepsDone;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -677,8 +679,8 @@ void State::dumpStatus(Connection & conn)
|
||||||
if (m->state->nrStepsDone) {
|
if (m->state->nrStepsDone) {
|
||||||
machine["totalStepTime"] = s->totalStepTime.load();
|
machine["totalStepTime"] = s->totalStepTime.load();
|
||||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
machine["avgStepTime"] = (float) s->totalStepTime / (float) s->nrStepsDone;
|
||||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / (float) s->nrStepsDone;
|
||||||
}
|
}
|
||||||
statusJson["machines"][m->sshName] = machine;
|
statusJson["machines"][m->sshName] = machine;
|
||||||
}
|
}
|
||||||
|
@ -706,7 +708,7 @@ void State::dumpStatus(Connection & conn)
|
||||||
};
|
};
|
||||||
if (i.second.runnable > 0)
|
if (i.second.runnable > 0)
|
||||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
i.second.runnable * (time(nullptr) - lastDispatcherCheck);
|
||||||
if (i.second.running == 0)
|
if (i.second.running == 0)
|
||||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||||
}
|
}
|
||||||
|
@ -732,11 +734,11 @@ void State::dumpStatus(Connection & conn)
|
||||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||||
{"narCompressionSavings",
|
{"narCompressionSavings",
|
||||||
stats.narWriteBytes
|
stats.narWriteBytes
|
||||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
? 1.0 - (double) stats.narWriteCompressedBytes / (double) stats.narWriteBytes
|
||||||
: 0.0},
|
: 0.0},
|
||||||
{"narCompressionSpeed", // MiB/s
|
{"narCompressionSpeed", // MiB/s
|
||||||
stats.narWriteCompressionTimeMs
|
stats.narWriteCompressionTimeMs
|
||||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
? (double) stats.narWriteBytes / (double) stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
: 0.0},
|
: 0.0},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -749,20 +751,20 @@ void State::dumpStatus(Connection & conn)
|
||||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||||
{"putSpeed",
|
{"putSpeed",
|
||||||
s3Stats.putTimeMs
|
s3Stats.putTimeMs
|
||||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
? (double) s3Stats.putBytes / (double) s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
: 0.0},
|
: 0.0},
|
||||||
{"get", s3Stats.get.load()},
|
{"get", s3Stats.get.load()},
|
||||||
{"getBytes", s3Stats.getBytes.load()},
|
{"getBytes", s3Stats.getBytes.load()},
|
||||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||||
{"getSpeed",
|
{"getSpeed",
|
||||||
s3Stats.getTimeMs
|
s3Stats.getTimeMs
|
||||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
? (double) s3Stats.getBytes / (double) s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||||
: 0.0},
|
: 0.0},
|
||||||
{"head", s3Stats.head.load()},
|
{"head", s3Stats.head.load()},
|
||||||
{"costDollarApprox",
|
{"costDollarApprox",
|
||||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
(double) (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||||
+ s3Stats.put / 1000.0 * 0.005 +
|
+ (double) s3Stats.put / 1000.0 * 0.005 +
|
||||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
+ (double) s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -848,7 +850,7 @@ void State::run(BuildID buildOne)
|
||||||
/* Can't be bothered to shut down cleanly. Goodbye! */
|
/* Can't be bothered to shut down cleanly. Goodbye! */
|
||||||
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
|
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
|
||||||
|
|
||||||
startedAt = time(0);
|
startedAt = time(nullptr);
|
||||||
this->buildOne = buildOne;
|
this->buildOne = buildOne;
|
||||||
|
|
||||||
auto lock = acquireGlobalLock();
|
auto lock = acquireGlobalLock();
|
||||||
|
@ -867,7 +869,7 @@ void State::run(BuildID buildOne)
|
||||||
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
|
|
||||||
Store::Params localParams;
|
StoreConfig::Params localParams;
|
||||||
localParams["max-connections"] = "16";
|
localParams["max-connections"] = "16";
|
||||||
localParams["max-connection-age"] = "600";
|
localParams["max-connection-age"] = "600";
|
||||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||||
|
@ -972,7 +974,7 @@ int main(int argc, char * * argv)
|
||||||
BuildID buildOne = 0;
|
BuildID buildOne = 0;
|
||||||
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
||||||
|
|
||||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
LegacyArgs(argv[0], [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||||
if (*arg == "--unlock")
|
if (*arg == "--unlock")
|
||||||
unlock = true;
|
unlock = true;
|
||||||
else if (*arg == "--status")
|
else if (*arg == "--status")
|
||||||
|
@ -987,7 +989,7 @@ int main(int argc, char * * argv)
|
||||||
} else
|
} else
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
});
|
}).parseCmdline(Strings(argv + 1, argv + argc));
|
||||||
|
|
||||||
settings.verboseBuild = true;
|
settings.verboseBuild = true;
|
||||||
|
|
||||||
|
|
22
src/hydra-queue-runner/meson.build
Normal file
22
src/hydra-queue-runner/meson.build
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
srcs = files(
|
||||||
|
'builder.cc',
|
||||||
|
'build-remote.cc',
|
||||||
|
'build-result.cc',
|
||||||
|
'dispatcher.cc',
|
||||||
|
'hydra-queue-runner.cc',
|
||||||
|
'nar-extractor.cc',
|
||||||
|
'queue-monitor.cc',
|
||||||
|
)
|
||||||
|
|
||||||
|
hydra_queue_runner = executable('hydra-queue-runner',
|
||||||
|
'hydra-queue-runner.cc',
|
||||||
|
srcs,
|
||||||
|
dependencies: [
|
||||||
|
libhydra_dep,
|
||||||
|
lix_dep,
|
||||||
|
pqxx_dep,
|
||||||
|
prom_cpp_core_dep,
|
||||||
|
prom_cpp_pull_dep,
|
||||||
|
],
|
||||||
|
install: true,
|
||||||
|
)
|
|
@ -1,13 +1,43 @@
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
|
||||||
#include "archive.hh"
|
#include "lix/libutil/archive.hh"
|
||||||
|
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
struct Extractor : ParseSink
|
struct Extractor : NARParseVisitor
|
||||||
{
|
{
|
||||||
|
class MyFileHandle : public FileHandle
|
||||||
|
{
|
||||||
|
NarMemberData & memberData;
|
||||||
|
uint64_t expectedSize;
|
||||||
|
std::unique_ptr<HashSink> hashSink;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MyFileHandle(NarMemberData & memberData, uint64_t size) : memberData(memberData), expectedSize(size)
|
||||||
|
{
|
||||||
|
hashSink = std::make_unique<HashSink>(HashType::SHA256);
|
||||||
|
}
|
||||||
|
|
||||||
|
void receiveContents(std::string_view data) override
|
||||||
|
{
|
||||||
|
*memberData.fileSize += data.size();
|
||||||
|
(*hashSink)(data);
|
||||||
|
if (memberData.contents) {
|
||||||
|
memberData.contents->append(data);
|
||||||
|
}
|
||||||
|
assert(memberData.fileSize <= expectedSize);
|
||||||
|
if (memberData.fileSize == expectedSize) {
|
||||||
|
auto [hash, len] = hashSink->finish();
|
||||||
|
assert(memberData.fileSize == len);
|
||||||
|
memberData.sha256 = hash;
|
||||||
|
hashSink.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
std::unordered_set<Path> filesToKeep {
|
std::unordered_set<Path> filesToKeep {
|
||||||
"/nix-support/hydra-build-products",
|
"/nix-support/hydra-build-products",
|
||||||
"/nix-support/hydra-release-name",
|
"/nix-support/hydra-release-name",
|
||||||
|
@ -15,11 +45,10 @@ struct Extractor : ParseSink
|
||||||
};
|
};
|
||||||
|
|
||||||
NarMemberDatas & members;
|
NarMemberDatas & members;
|
||||||
NarMemberData * curMember = nullptr;
|
|
||||||
Path prefix;
|
Path prefix;
|
||||||
|
|
||||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
Extractor(NarMemberDatas & members, Path prefix)
|
||||||
: members(members), prefix(prefix)
|
: members(members), prefix(std::move(prefix))
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
void createDirectory(const Path & path) override
|
void createDirectory(const Path & path) override
|
||||||
|
@ -27,41 +56,15 @@ struct Extractor : ParseSink
|
||||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||||
}
|
}
|
||||||
|
|
||||||
void createRegularFile(const Path & path) override
|
std::unique_ptr<FileHandle> createRegularFile(const Path & path, uint64_t size, bool executable) override
|
||||||
{
|
{
|
||||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
auto memberData = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||||
.type = FSAccessor::Type::tRegular,
|
.type = FSAccessor::Type::tRegular,
|
||||||
.fileSize = 0,
|
.fileSize = 0,
|
||||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||||
}).first->second;
|
}).first->second;
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<uint64_t> expectedSize;
|
return std::make_unique<MyFileHandle>(*memberData, size);
|
||||||
std::unique_ptr<HashSink> hashSink;
|
|
||||||
|
|
||||||
void preallocateContents(uint64_t size) override
|
|
||||||
{
|
|
||||||
expectedSize = size;
|
|
||||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
|
||||||
}
|
|
||||||
|
|
||||||
void receiveContents(std::string_view data) override
|
|
||||||
{
|
|
||||||
assert(expectedSize);
|
|
||||||
assert(curMember);
|
|
||||||
assert(hashSink);
|
|
||||||
*curMember->fileSize += data.size();
|
|
||||||
(*hashSink)(data);
|
|
||||||
if (curMember->contents) {
|
|
||||||
curMember->contents->append(data);
|
|
||||||
}
|
|
||||||
assert(curMember->fileSize <= expectedSize);
|
|
||||||
if (curMember->fileSize == expectedSize) {
|
|
||||||
auto [hash, len] = hashSink->finish();
|
|
||||||
assert(curMember->fileSize == len);
|
|
||||||
curMember->sha256 = hash;
|
|
||||||
hashSink.reset();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void createSymlink(const Path & path, const std::string & target) override
|
void createSymlink(const Path & path, const std::string & target) override
|
||||||
|
@ -76,7 +79,19 @@ void extractNarData(
|
||||||
const Path & prefix,
|
const Path & prefix,
|
||||||
NarMemberDatas & members)
|
NarMemberDatas & members)
|
||||||
{
|
{
|
||||||
Extractor extractor(members, prefix);
|
auto parser = extractNarDataFilter(source, prefix, members);
|
||||||
parseDump(extractor, source);
|
while (parser.next()) {
|
||||||
// Note: this point may not be reached if we're in a coroutine.
|
// ignore raw data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nix::WireFormatGenerator extractNarDataFilter(
|
||||||
|
Source & source,
|
||||||
|
const Path & prefix,
|
||||||
|
NarMemberDatas & members)
|
||||||
|
{
|
||||||
|
return [](Source & source, const Path & prefix, NarMemberDatas & members) -> WireFormatGenerator {
|
||||||
|
Extractor extractor(members, prefix);
|
||||||
|
co_yield parseAndCopyDump(extractor, source);
|
||||||
|
}(source, prefix, members);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "fs-accessor.hh"
|
#include "lix/libstore/fs-accessor.hh"
|
||||||
#include "types.hh"
|
#include "lix/libutil/hash.hh"
|
||||||
#include "serialise.hh"
|
#include "lix/libutil/serialise.hh"
|
||||||
#include "hash.hh"
|
#include "lix/libutil/types.hh"
|
||||||
|
|
||||||
struct NarMemberData
|
struct NarMemberData
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,7 @@ struct NarMemberData
|
||||||
std::optional<nix::Hash> sha256;
|
std::optional<nix::Hash> sha256;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef std::map<nix::Path, NarMemberData> NarMemberDatas;
|
using NarMemberDatas = std::map<nix::Path, NarMemberData>;
|
||||||
|
|
||||||
/* Read a NAR from a source and get to some info about every file
|
/* Read a NAR from a source and get to some info about every file
|
||||||
inside the NAR. */
|
inside the NAR. */
|
||||||
|
@ -21,3 +21,8 @@ void extractNarData(
|
||||||
nix::Source & source,
|
nix::Source & source,
|
||||||
const nix::Path & prefix,
|
const nix::Path & prefix,
|
||||||
NarMemberDatas & members);
|
NarMemberDatas & members);
|
||||||
|
|
||||||
|
nix::WireFormatGenerator extractNarDataFilter(
|
||||||
|
nix::Source & source,
|
||||||
|
const nix::Path & prefix,
|
||||||
|
NarMemberDatas & members);
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
#include "state.hh"
|
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "globals.hh"
|
#include "lix/libstore/globals.hh"
|
||||||
#include "thread-pool.hh"
|
#include "lix/libutil/thread-pool.hh"
|
||||||
|
#include "state.hh"
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <signal.h>
|
#include <utility>
|
||||||
|
#include <csignal>
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
|
@ -52,7 +53,7 @@ void State::queueMonitorLoop(Connection & conn)
|
||||||
auto t_after_work = std::chrono::steady_clock::now();
|
auto t_after_work = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
prom.queue_monitor_time_spent_running.Increment(
|
prom.queue_monitor_time_spent_running.Increment(
|
||||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count()));
|
||||||
|
|
||||||
/* Sleep until we get notification from the database about an
|
/* Sleep until we get notification from the database about an
|
||||||
event. */
|
event. */
|
||||||
|
@ -79,7 +80,7 @@ void State::queueMonitorLoop(Connection & conn)
|
||||||
|
|
||||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||||
prom.queue_monitor_time_spent_waiting.Increment(
|
prom.queue_monitor_time_spent_waiting.Increment(
|
||||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count()));
|
||||||
}
|
}
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
|
@ -88,7 +89,7 @@ void State::queueMonitorLoop(Connection & conn)
|
||||||
|
|
||||||
struct PreviousFailure : public std::exception {
|
struct PreviousFailure : public std::exception {
|
||||||
Step::ptr step;
|
Step::ptr step;
|
||||||
PreviousFailure(Step::ptr step) : step(step) { }
|
PreviousFailure(Step::ptr step) : step(std::move(step)) { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -117,7 +118,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
|
|
||||||
for (auto const & row : res) {
|
for (auto const & row : res) {
|
||||||
auto builds_(builds.lock());
|
auto builds_(builds.lock());
|
||||||
BuildID id = row["id"].as<BuildID>();
|
auto id = row["id"].as<BuildID>();
|
||||||
if (buildOne && id != buildOne) continue;
|
if (buildOne && id != buildOne) continue;
|
||||||
if (builds_->count(id)) continue;
|
if (builds_->count(id)) continue;
|
||||||
|
|
||||||
|
@ -137,7 +138,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
|
|
||||||
newIDs.push_back(id);
|
newIDs.push_back(id);
|
||||||
newBuildsByID[id] = build;
|
newBuildsByID[id] = build;
|
||||||
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
|
newBuildsByPath.emplace(build->drvPath, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,7 +163,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
|
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
|
||||||
build->id,
|
build->id,
|
||||||
(int) bsAborted,
|
(int) bsAborted,
|
||||||
time(0));
|
time(nullptr));
|
||||||
txn.commit();
|
txn.commit();
|
||||||
build->finishedInDB = true;
|
build->finishedInDB = true;
|
||||||
nrBuildsDone++;
|
nrBuildsDone++;
|
||||||
|
@ -176,7 +177,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
/* Create steps for this derivation and its dependencies. */
|
/* Create steps for this derivation and its dependencies. */
|
||||||
try {
|
try {
|
||||||
step = createStep(destStore, conn, build, build->drvPath,
|
step = createStep(destStore, conn, build, build->drvPath,
|
||||||
build, 0, finishedDrvs, newSteps, newRunnable);
|
build, nullptr, finishedDrvs, newSteps, newRunnable);
|
||||||
} catch (PreviousFailure & ex) {
|
} catch (PreviousFailure & ex) {
|
||||||
|
|
||||||
/* Some step previously failed, so mark the build as
|
/* Some step previously failed, so mark the build as
|
||||||
|
@ -221,7 +222,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
"where id = $1 and finished = 0",
|
"where id = $1 and finished = 0",
|
||||||
build->id,
|
build->id,
|
||||||
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
|
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
|
||||||
time(0));
|
time(nullptr));
|
||||||
notifyBuildFinished(txn, build->id, {});
|
notifyBuildFinished(txn, build->id, {});
|
||||||
txn.commit();
|
txn.commit();
|
||||||
build->finishedInDB = true;
|
build->finishedInDB = true;
|
||||||
|
@ -254,7 +255,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
time_t now = time(0);
|
time_t now = time(nullptr);
|
||||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||||
markSucceededBuild(txn, build, res, true, now, now);
|
markSucceededBuild(txn, build, res, true, now, now);
|
||||||
|
@ -355,7 +356,7 @@ void State::processQueueChange(Connection & conn)
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
|
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
|
||||||
for (auto const & row : res)
|
for (auto const & row : res)
|
||||||
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<BuildID>();
|
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<int>();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -410,7 +411,7 @@ std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||||
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||||
{
|
{
|
||||||
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||||
ThreadPool tp;
|
ThreadPool tp("hydra-getMissingRemotePaths");
|
||||||
|
|
||||||
for (auto & [output, maybeOutputPath] : paths) {
|
for (auto & [output, maybeOutputPath] : paths) {
|
||||||
if (!maybeOutputPath) {
|
if (!maybeOutputPath) {
|
||||||
|
@ -438,7 +439,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||||
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
|
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
|
||||||
{
|
{
|
||||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
|
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return nullptr;
|
||||||
|
|
||||||
/* Check if the requested step already exists. If not, create a
|
/* Check if the requested step already exists. If not, create a
|
||||||
new step. In any case, make the step reachable from
|
new step. In any case, make the step reachable from
|
||||||
|
@ -516,7 +517,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||||
auto outputHash = outputHashes.at(outputName);
|
auto outputHash = outputHashes.at(outputName);
|
||||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
paths.insert({{.drvHash=outputHash, .outputName=outputName}, maybeOutputPath});
|
||||||
}
|
}
|
||||||
|
|
||||||
auto missing = getMissingRemotePaths(destStore, paths);
|
auto missing = getMissingRemotePaths(destStore, paths);
|
||||||
|
@ -560,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
auto & path = *pathOpt;
|
auto & path = *pathOpt;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
time_t startTime = time(0);
|
time_t startTime = time(nullptr);
|
||||||
|
|
||||||
if (localStore->isValidPath(path))
|
if (localStore->isValidPath(path))
|
||||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||||
|
@ -578,7 +579,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
StorePathSet { path },
|
StorePathSet { path },
|
||||||
NoRepair, CheckSigs, NoSubstitute);
|
NoRepair, CheckSigs, NoSubstitute);
|
||||||
|
|
||||||
time_t stopTime = time(0);
|
time_t stopTime = time(nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
|
@ -602,7 +603,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
// FIXME: check whether all outputs are in the binary cache.
|
// FIXME: check whether all outputs are in the binary cache.
|
||||||
if (valid) {
|
if (valid) {
|
||||||
finishedDrvs.insert(drvPath);
|
finishedDrvs.insert(drvPath);
|
||||||
return 0;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No, we need to build. */
|
/* No, we need to build. */
|
||||||
|
@ -610,7 +611,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||||
|
|
||||||
/* Create steps for the dependencies. */
|
/* Create steps for the dependencies. */
|
||||||
for (auto & i : step->drv->inputDrvs.map) {
|
for (auto & i : step->drv->inputDrvs.map) {
|
||||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
auto dep = createStep(destStore, conn, build, i.first, nullptr, step, finishedDrvs, newSteps, newRunnable);
|
||||||
if (dep) {
|
if (dep) {
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
step_->deps.insert(dep);
|
step_->deps.insert(dep);
|
||||||
|
@ -658,11 +659,11 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||||
auto res2 = txn.exec_params
|
auto res2 = txn.exec_params
|
||||||
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||||
time(0) - Jobset::schedulingWindow * 10,
|
time(nullptr) - Jobset::schedulingWindow * 10,
|
||||||
jobsetID);
|
jobsetID);
|
||||||
for (auto const & row : res2) {
|
for (auto const & row : res2) {
|
||||||
time_t startTime = row["startTime"].as<time_t>();
|
auto startTime = row["startTime"].as<time_t>();
|
||||||
time_t stopTime = row["stopTime"].as<time_t>();
|
auto stopTime = row["stopTime"].as<time_t>();
|
||||||
jobset->addStep(startTime, stopTime - startTime);
|
jobset->addStep(startTime, stopTime - startTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -702,7 +703,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||||
localStore->printStorePath(output));
|
localStore->printStorePath(output));
|
||||||
if (r.empty()) continue;
|
if (r.empty()) continue;
|
||||||
BuildID id = r[0][0].as<BuildID>();
|
auto id = r[0][0].as<BuildID>();
|
||||||
|
|
||||||
printInfo("reusing build %d", id);
|
printInfo("reusing build %d", id);
|
||||||
|
|
||||||
|
@ -727,7 +728,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||||
product.fileSize = row[2].as<off_t>();
|
product.fileSize = row[2].as<off_t>();
|
||||||
}
|
}
|
||||||
if (!row[3].is_null())
|
if (!row[3].is_null())
|
||||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashType::SHA256);
|
||||||
if (!row[4].is_null())
|
if (!row[4].is_null())
|
||||||
product.path = row[4].as<std::string>();
|
product.path = row[4].as<std::string>();
|
||||||
product.name = row[5].as<std::string>();
|
product.name = row[5].as<std::string>();
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <queue>
|
#include <queue>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <semaphore>
|
#include <semaphore>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include <prometheus/counter.h>
|
#include <prometheus/counter.h>
|
||||||
#include <prometheus/gauge.h>
|
#include <prometheus/gauge.h>
|
||||||
|
@ -15,27 +16,27 @@
|
||||||
|
|
||||||
#include "db.hh"
|
#include "db.hh"
|
||||||
|
|
||||||
#include "parsed-derivations.hh"
|
#include "lix/libstore/build-result.hh"
|
||||||
#include "pathlocks.hh"
|
#include "lix/libstore/machines.hh"
|
||||||
#include "pool.hh"
|
#include "lix/libstore/parsed-derivations.hh"
|
||||||
#include "build-result.hh"
|
#include "lix/libstore/pathlocks.hh"
|
||||||
#include "store-api.hh"
|
#include "lix/libstore/serve-protocol.hh"
|
||||||
#include "sync.hh"
|
#include "lix/libstore/store-api.hh"
|
||||||
|
#include "lix/libutil/pool.hh"
|
||||||
|
#include "lix/libutil/sync.hh"
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
#include "serve-protocol.hh"
|
|
||||||
#include "machines.hh"
|
|
||||||
|
|
||||||
|
|
||||||
typedef unsigned int BuildID;
|
using BuildID = unsigned int;
|
||||||
|
|
||||||
typedef unsigned int JobsetID;
|
using JobsetID = unsigned int;
|
||||||
|
|
||||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
using system_time = std::chrono::time_point<std::chrono::system_clock>;
|
||||||
|
|
||||||
typedef std::atomic<unsigned long> counter;
|
using counter = std::atomic<unsigned long>;
|
||||||
|
|
||||||
|
|
||||||
typedef enum {
|
enum BuildStatus {
|
||||||
bsSuccess = 0,
|
bsSuccess = 0,
|
||||||
bsFailed = 1,
|
bsFailed = 1,
|
||||||
bsDepFailed = 2, // builds only
|
bsDepFailed = 2, // builds only
|
||||||
|
@ -49,10 +50,10 @@ typedef enum {
|
||||||
bsNarSizeLimitExceeded = 11,
|
bsNarSizeLimitExceeded = 11,
|
||||||
bsNotDeterministic = 12,
|
bsNotDeterministic = 12,
|
||||||
bsBusy = 100, // not stored
|
bsBusy = 100, // not stored
|
||||||
} BuildStatus;
|
};
|
||||||
|
|
||||||
|
|
||||||
typedef enum {
|
enum StepState {
|
||||||
ssPreparing = 1,
|
ssPreparing = 1,
|
||||||
ssConnecting = 10,
|
ssConnecting = 10,
|
||||||
ssSendingInputs = 20,
|
ssSendingInputs = 20,
|
||||||
|
@ -60,7 +61,7 @@ typedef enum {
|
||||||
ssWaitingForLocalSlot = 35,
|
ssWaitingForLocalSlot = 35,
|
||||||
ssReceivingOutputs = 40,
|
ssReceivingOutputs = 40,
|
||||||
ssPostProcessing = 50,
|
ssPostProcessing = 50,
|
||||||
} StepState;
|
};
|
||||||
|
|
||||||
|
|
||||||
struct RemoteResult
|
struct RemoteResult
|
||||||
|
@ -78,7 +79,7 @@ struct RemoteResult
|
||||||
unsigned int overhead = 0;
|
unsigned int overhead = 0;
|
||||||
nix::Path logFile;
|
nix::Path logFile;
|
||||||
|
|
||||||
BuildStatus buildStatus() const
|
[[nodiscard]] BuildStatus buildStatus() const
|
||||||
{
|
{
|
||||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||||
}
|
}
|
||||||
|
@ -95,10 +96,10 @@ class Jobset
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef std::shared_ptr<Jobset> ptr;
|
using ptr = std::shared_ptr<Jobset>;
|
||||||
typedef std::weak_ptr<Jobset> wptr;
|
using wptr = std::weak_ptr<Jobset>;
|
||||||
|
|
||||||
static const time_t schedulingWindow = 24 * 60 * 60;
|
static const time_t schedulingWindow = static_cast<time_t>(24 * 60 * 60);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
@ -115,7 +116,7 @@ public:
|
||||||
return (double) seconds / shares;
|
return (double) seconds / shares;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setShares(int shares_)
|
void setShares(unsigned int shares_)
|
||||||
{
|
{
|
||||||
assert(shares_ > 0);
|
assert(shares_ > 0);
|
||||||
shares = shares_;
|
shares = shares_;
|
||||||
|
@ -131,8 +132,8 @@ public:
|
||||||
|
|
||||||
struct Build
|
struct Build
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<Build> ptr;
|
using ptr = std::shared_ptr<Build>;
|
||||||
typedef std::weak_ptr<Build> wptr;
|
using wptr = std::weak_ptr<Build>;
|
||||||
|
|
||||||
BuildID id;
|
BuildID id;
|
||||||
nix::StorePath drvPath;
|
nix::StorePath drvPath;
|
||||||
|
@ -163,8 +164,8 @@ struct Build
|
||||||
|
|
||||||
struct Step
|
struct Step
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<Step> ptr;
|
using ptr = std::shared_ptr<Step>;
|
||||||
typedef std::weak_ptr<Step> wptr;
|
using wptr = std::weak_ptr<Step>;
|
||||||
|
|
||||||
nix::StorePath drvPath;
|
nix::StorePath drvPath;
|
||||||
std::unique_ptr<nix::Derivation> drv;
|
std::unique_ptr<nix::Derivation> drv;
|
||||||
|
@ -221,13 +222,8 @@ struct Step
|
||||||
|
|
||||||
nix::Sync<State> state;
|
nix::Sync<State> state;
|
||||||
|
|
||||||
Step(const nix::StorePath & drvPath) : drvPath(drvPath)
|
Step(nix::StorePath drvPath) : drvPath(std::move(drvPath))
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
~Step()
|
|
||||||
{
|
|
||||||
//printMsg(lvlError, format("destroying step %1%") % drvPath);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -239,7 +235,7 @@ void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||||
|
|
||||||
struct Machine : nix::Machine
|
struct Machine : nix::Machine
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<Machine> ptr;
|
using ptr = std::shared_ptr<Machine>;
|
||||||
|
|
||||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||||
we are not yet used to, but once we are, we don't need this. */
|
we are not yet used to, but once we are, we don't need this. */
|
||||||
|
@ -254,7 +250,7 @@ struct Machine : nix::Machine
|
||||||
float speedFactorFloat = 1.0;
|
float speedFactorFloat = 1.0;
|
||||||
|
|
||||||
struct State {
|
struct State {
|
||||||
typedef std::shared_ptr<State> ptr;
|
using ptr = std::shared_ptr<State>;
|
||||||
counter currentJobs{0};
|
counter currentJobs{0};
|
||||||
counter nrStepsDone{0};
|
counter nrStepsDone{0};
|
||||||
counter totalStepTime{0}; // total time for steps, including closure copying
|
counter totalStepTime{0}; // total time for steps, including closure copying
|
||||||
|
@ -358,22 +354,22 @@ private:
|
||||||
bool useSubstitutes = false;
|
bool useSubstitutes = false;
|
||||||
|
|
||||||
/* The queued builds. */
|
/* The queued builds. */
|
||||||
typedef std::map<BuildID, Build::ptr> Builds;
|
using Builds = std::map<BuildID, Build::ptr>;
|
||||||
nix::Sync<Builds> builds;
|
nix::Sync<Builds> builds;
|
||||||
|
|
||||||
/* The jobsets. */
|
/* The jobsets. */
|
||||||
typedef std::map<std::pair<std::string, std::string>, Jobset::ptr> Jobsets;
|
using Jobsets = std::map<std::pair<std::string, std::string>, Jobset::ptr>;
|
||||||
nix::Sync<Jobsets> jobsets;
|
nix::Sync<Jobsets> jobsets;
|
||||||
|
|
||||||
/* All active or pending build steps (i.e. dependencies of the
|
/* All active or pending build steps (i.e. dependencies of the
|
||||||
queued builds). Note that these are weak pointers. Steps are
|
queued builds). Note that these are weak pointers. Steps are
|
||||||
kept alive by being reachable from Builds or by being in
|
kept alive by being reachable from Builds or by being in
|
||||||
progress. */
|
progress. */
|
||||||
typedef std::map<nix::StorePath, Step::wptr> Steps;
|
using Steps = std::map<nix::StorePath, Step::wptr>;
|
||||||
nix::Sync<Steps> steps;
|
nix::Sync<Steps> steps;
|
||||||
|
|
||||||
/* Build steps that have no unbuilt dependencies. */
|
/* Build steps that have no unbuilt dependencies. */
|
||||||
typedef std::list<Step::wptr> Runnable;
|
using Runnable = std::list<Step::wptr>;
|
||||||
nix::Sync<Runnable> runnable;
|
nix::Sync<Runnable> runnable;
|
||||||
|
|
||||||
/* CV for waking up the dispatcher. */
|
/* CV for waking up the dispatcher. */
|
||||||
|
@ -385,7 +381,7 @@ private:
|
||||||
|
|
||||||
/* The build machines. */
|
/* The build machines. */
|
||||||
std::mutex machinesReadyLock;
|
std::mutex machinesReadyLock;
|
||||||
typedef std::map<std::string, Machine::ptr> Machines;
|
using Machines = std::map<std::string, Machine::ptr>;
|
||||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||||
|
|
||||||
/* Throttler for CPU-bound local work. */
|
/* Throttler for CPU-bound local work. */
|
||||||
|
@ -401,6 +397,7 @@ private:
|
||||||
counter nrStepsDone{0};
|
counter nrStepsDone{0};
|
||||||
counter nrStepsBuilding{0};
|
counter nrStepsBuilding{0};
|
||||||
counter nrStepsCopyingTo{0};
|
counter nrStepsCopyingTo{0};
|
||||||
|
counter nrStepsWaitingForDownloadSlot{0};
|
||||||
counter nrStepsCopyingFrom{0};
|
counter nrStepsCopyingFrom{0};
|
||||||
counter nrStepsWaiting{0};
|
counter nrStepsWaiting{0};
|
||||||
counter nrUnsupportedSteps{0};
|
counter nrUnsupportedSteps{0};
|
||||||
|
@ -431,7 +428,7 @@ private:
|
||||||
|
|
||||||
struct MachineReservation
|
struct MachineReservation
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<MachineReservation> ptr;
|
using ptr = std::shared_ptr<MachineReservation>;
|
||||||
State & state;
|
State & state;
|
||||||
Step::ptr step;
|
Step::ptr step;
|
||||||
Machine::ptr machine;
|
Machine::ptr machine;
|
||||||
|
@ -534,7 +531,7 @@ private:
|
||||||
void finishBuildStep(pqxx::work & txn, const RemoteResult & result, BuildID buildId, unsigned int stepNr,
|
void finishBuildStep(pqxx::work & txn, const RemoteResult & result, BuildID buildId, unsigned int stepNr,
|
||||||
const std::string & machine);
|
const std::string & machine);
|
||||||
|
|
||||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
unsigned int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||||
|
|
||||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||||
|
@ -594,6 +591,7 @@ private:
|
||||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||||
MachineReservation::ptr & reservation,
|
MachineReservation::ptr & reservation,
|
||||||
|
Connection & conn,
|
||||||
std::shared_ptr<ActiveStep> activeStep);
|
std::shared_ptr<ActiveStep> activeStep);
|
||||||
|
|
||||||
void buildRemote(nix::ref<nix::Store> destStore,
|
void buildRemote(nix::ref<nix::Store> destStore,
|
||||||
|
@ -622,8 +620,6 @@ private:
|
||||||
|
|
||||||
void addRoot(const nix::StorePath & storePath);
|
void addRoot(const nix::StorePath & storePath);
|
||||||
|
|
||||||
void runMetricsExporter();
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
void showStatus();
|
void showStatus();
|
||||||
|
|
|
@ -242,23 +242,35 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
my $force = exists $c->request->query_params->{force};
|
my $force = exists $c->request->query_params->{force};
|
||||||
my @jobsets = split /,/, ($c->request->query_params->{jobsets} // "");
|
my @jobsetNames = split /,/, ($c->request->query_params->{jobsets} // "");
|
||||||
foreach my $s (@jobsets) {
|
my @jobsets;
|
||||||
|
|
||||||
|
foreach my $s (@jobsetNames) {
|
||||||
my ($p, $j) = parseJobsetName($s);
|
my ($p, $j) = parseJobsetName($s);
|
||||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
push @jobsets, $jobset if defined $jobset;
|
||||||
triggerJobset($self, $c, $jobset, $force);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||||
foreach my $r (@repos) {
|
foreach my $r (@repos) {
|
||||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
foreach ($c->model('DB::Jobsets')->search(
|
||||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
{
|
{
|
||||||
join => 'project',
|
join => 'project',
|
||||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||||
order_by => 'me.id DESC'
|
order_by => 'me.id DESC'
|
||||||
});
|
})) {
|
||||||
|
push @jobsets, $_;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach my $jobset (@jobsets) {
|
||||||
|
requireRestartPrivileges($c, $jobset->project);
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach my $jobset (@jobsets) {
|
||||||
|
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||||
|
triggerJobset($self, $c, $jobset, $force);
|
||||||
}
|
}
|
||||||
|
|
||||||
$self->status_ok(
|
$self->status_ok(
|
||||||
|
@ -273,7 +285,7 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
my $in = $c->request->{data};
|
my $in = $c->request->{data};
|
||||||
my $owner = $in->{repository}->{owner}->{name} or die;
|
my $owner = $in->{repository}->{owner}->{login} or die;
|
||||||
my $repo = $in->{repository}->{name} or die;
|
my $repo = $in->{repository}->{name} or die;
|
||||||
print STDERR "got push from GitHub repository $owner/$repo\n";
|
print STDERR "got push from GitHub repository $owner/$repo\n";
|
||||||
|
|
||||||
|
@ -285,6 +297,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||||
$c->response->body("");
|
$c->response->body("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
|
my $in = $c->request->{data};
|
||||||
|
my $url = $in->{repository}->{clone_url} or die;
|
||||||
|
$url =~ s/.git$//;
|
||||||
|
print STDERR "got push from Gitea repository $url\n";
|
||||||
|
|
||||||
|
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||||
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
|
{ join => 'project'
|
||||||
|
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||||
|
});
|
||||||
|
$c->response->body("");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
|
|
@ -240,7 +240,7 @@ sub serveFile {
|
||||||
# XSS hole.
|
# XSS hole.
|
||||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||||
|
|
||||||
# Detect MIME type.
|
# Detect MIME type.
|
||||||
|
|
|
@ -76,7 +76,9 @@ sub view_GET {
|
||||||
$c->stash->{removed} = $diff->{removed};
|
$c->stash->{removed} = $diff->{removed};
|
||||||
$c->stash->{unfinished} = $diff->{unfinished};
|
$c->stash->{unfinished} = $diff->{unfinished};
|
||||||
$c->stash->{aborted} = $diff->{aborted};
|
$c->stash->{aborted} = $diff->{aborted};
|
||||||
$c->stash->{failed} = $diff->{failed};
|
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||||
|
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||||
|
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||||
|
|
||||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ sub noLoginNeeded {
|
||||||
|
|
||||||
return $whitelisted ||
|
return $whitelisted ||
|
||||||
$c->request->path eq "api/push-github" ||
|
$c->request->path eq "api/push-github" ||
|
||||||
|
$c->request->path eq "api/push-gitea" ||
|
||||||
$c->request->path eq "google-login" ||
|
$c->request->path eq "google-login" ||
|
||||||
$c->request->path eq "github-redirect" ||
|
$c->request->path eq "github-redirect" ||
|
||||||
$c->request->path eq "github-login" ||
|
$c->request->path eq "github-login" ||
|
||||||
|
@ -80,7 +81,7 @@ sub begin :Private {
|
||||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||||
|
|
||||||
# XSRF protection: require POST requests to have the same origin.
|
# XSRF protection: require POST requests to have the same origin.
|
||||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||||
my $referer = $c->req->header('Referer');
|
my $referer = $c->req->header('Referer');
|
||||||
$referer //= $c->req->header('Origin');
|
$referer //= $c->req->header('Origin');
|
||||||
my $base = $c->req->base;
|
my $base = $c->req->base;
|
||||||
|
|
|
@ -32,7 +32,12 @@ sub buildDiff {
|
||||||
removed => [],
|
removed => [],
|
||||||
unfinished => [],
|
unfinished => [],
|
||||||
aborted => [],
|
aborted => [],
|
||||||
failed => [],
|
|
||||||
|
# These summary counters cut across the categories to determine whether
|
||||||
|
# actions such as "Restart all failed" or "Bump queue" are available.
|
||||||
|
totalAborted => 0,
|
||||||
|
totalFailed => 0,
|
||||||
|
totalQueued => 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
my $n = 0;
|
my $n = 0;
|
||||||
|
@ -80,8 +85,15 @@ sub buildDiff {
|
||||||
} else {
|
} else {
|
||||||
push @{$ret->{new}}, $build if !$found;
|
push @{$ret->{new}}, $build if !$found;
|
||||||
}
|
}
|
||||||
if (defined $build->buildstatus && $build->buildstatus != 0) {
|
|
||||||
push @{$ret->{failed}}, $build;
|
if ($build->finished != 0 && $build->buildstatus != 0) {
|
||||||
|
if ($aborted) {
|
||||||
|
++$ret->{totalAborted};
|
||||||
|
} else {
|
||||||
|
++$ret->{totalFailed};
|
||||||
|
}
|
||||||
|
} elsif ($build->finished == 0) {
|
||||||
|
++$ret->{totalQueued};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ our @EXPORT = qw(
|
||||||
jobsetOverview
|
jobsetOverview
|
||||||
jobsetOverview_
|
jobsetOverview_
|
||||||
pathIsInsidePrefix
|
pathIsInsidePrefix
|
||||||
|
readIntoSocket
|
||||||
readNixFile
|
readNixFile
|
||||||
registerRoot
|
registerRoot
|
||||||
restartBuilds
|
restartBuilds
|
||||||
|
@ -406,6 +407,16 @@ sub pathIsInsidePrefix {
|
||||||
return $cur;
|
return $cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub readIntoSocket{
|
||||||
|
my (%args) = @_;
|
||||||
|
my $sock;
|
||||||
|
|
||||||
|
eval {
|
||||||
|
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
|
||||||
|
};
|
||||||
|
|
||||||
|
return $sock;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
PERL_MODULES = \
|
|
||||||
$(wildcard *.pm) \
|
|
||||||
$(wildcard Hydra/*.pm) \
|
|
||||||
$(wildcard Hydra/Helper/*.pm) \
|
|
||||||
$(wildcard Hydra/Model/*.pm) \
|
|
||||||
$(wildcard Hydra/View/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/Result/*.pm) \
|
|
||||||
$(wildcard Hydra/Schema/ResultSet/*.pm) \
|
|
||||||
$(wildcard Hydra/Controller/*.pm) \
|
|
||||||
$(wildcard Hydra/Base/*.pm) \
|
|
||||||
$(wildcard Hydra/Base/Controller/*.pm) \
|
|
||||||
$(wildcard Hydra/Script/*.pm) \
|
|
||||||
$(wildcard Hydra/Component/*.pm) \
|
|
||||||
$(wildcard Hydra/Event/*.pm) \
|
|
||||||
$(wildcard Hydra/Plugin/*.pm)
|
|
||||||
|
|
||||||
EXTRA_DIST = \
|
|
||||||
$(PERL_MODULES)
|
|
||||||
|
|
||||||
hydradir = $(libexecdir)/hydra/lib
|
|
||||||
nobase_hydra_DATA = $(PERL_MODULES)
|
|
|
@ -2,8 +2,8 @@
|
||||||
|
|
||||||
#include <pqxx/pqxx>
|
#include <pqxx/pqxx>
|
||||||
|
|
||||||
#include "strings.hh"
|
#include "lix/libutil/environment-variables.hh"
|
||||||
#include "environment-variables.hh"
|
#include "lix/libutil/strings.hh"
|
||||||
|
|
||||||
|
|
||||||
struct Connection : pqxx::connection
|
struct Connection : pqxx::connection
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
#include "environment-variables.hh"
|
#include "lix/libutil/environment-variables.hh"
|
||||||
#include "file-system.hh"
|
#include "lix/libutil/file-system.hh"
|
||||||
#include "strings.hh"
|
#include "lix/libutil/strings.hh"
|
||||||
|
|
||||||
struct HydraConfig
|
struct HydraConfig
|
||||||
{
|
{
|
||||||
|
|
5
src/libhydra/meson.build
Normal file
5
src/libhydra/meson.build
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
libhydra_inc = include_directories('.')
|
||||||
|
|
||||||
|
libhydra_dep = declare_dependency(
|
||||||
|
include_directories: [libhydra_inc],
|
||||||
|
)
|
16
src/meson.build
Normal file
16
src/meson.build
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
# Native code
|
||||||
|
subdir('libhydra')
|
||||||
|
subdir('hydra-evaluator')
|
||||||
|
subdir('hydra-queue-runner')
|
||||||
|
|
||||||
|
# Data and interpreted
|
||||||
|
foreach dir : ['lib', 'root', 'sql', 'ttf']
|
||||||
|
install_subdir(dir,
|
||||||
|
install_dir: get_option('libexecdir') / 'hydra',
|
||||||
|
)
|
||||||
|
endforeach
|
||||||
|
install_subdir('script',
|
||||||
|
install_dir: get_option('bindir'),
|
||||||
|
install_mode: 'rwxr-xr-x',
|
||||||
|
strip_directory: true,
|
||||||
|
)
|
|
@ -1,39 +0,0 @@
|
||||||
TEMPLATES = $(wildcard *.tt)
|
|
||||||
STATIC = \
|
|
||||||
$(wildcard static/images/*) \
|
|
||||||
$(wildcard static/css/*) \
|
|
||||||
static/js/bootbox.min.js \
|
|
||||||
static/js/popper.min.js \
|
|
||||||
static/js/common.js \
|
|
||||||
static/js/jquery/jquery-3.4.1.min.js \
|
|
||||||
static/js/jquery/jquery-ui-1.10.4.min.js
|
|
||||||
|
|
||||||
FLOT = flot-0.8.3.zip
|
|
||||||
BOOTSTRAP = bootstrap-4.3.1-dist.zip
|
|
||||||
FONTAWESOME = fontawesome-free-5.10.2-web.zip
|
|
||||||
|
|
||||||
ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME)
|
|
||||||
|
|
||||||
EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS)
|
|
||||||
|
|
||||||
hydradir = $(libexecdir)/hydra/root
|
|
||||||
nobase_hydra_DATA = $(EXTRA_DIST)
|
|
||||||
|
|
||||||
all:
|
|
||||||
mkdir -p $(srcdir)/static/js
|
|
||||||
unzip -u -d $(srcdir)/static $(BOOTSTRAP)
|
|
||||||
rm -rf $(srcdir)/static/bootstrap
|
|
||||||
mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap
|
|
||||||
unzip -u -d $(srcdir)/static/js $(FLOT)
|
|
||||||
unzip -u -d $(srcdir)/static $(FONTAWESOME)
|
|
||||||
rm -rf $(srcdir)/static/fontawesome
|
|
||||||
mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome
|
|
||||||
|
|
||||||
install-data-local: $(ZIPS)
|
|
||||||
mkdir -p $(hydradir)/static/js
|
|
||||||
cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js
|
|
||||||
mkdir -p $(hydradir)/static/bootstrap
|
|
||||||
cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap
|
|
||||||
mkdir -p $(hydradir)/static/fontawesome/{css,webfonts}
|
|
||||||
cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css
|
|
||||||
cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts
|
|
Binary file not shown.
|
@ -411,7 +411,7 @@ BLOCK renderInputDiff; %]
|
||||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||||
[% IF bi1.type == "git" %]
|
[% IF bi1.type == "git" %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
|
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -48,16 +48,16 @@ c.uri_for(c.controller('JobsetEval').action_for('view'),
|
||||||
<a class="nav-link dropdown-toggle" data-toggle="dropdown" href="#">Actions</a>
|
<a class="nav-link dropdown-toggle" data-toggle="dropdown" href="#">Actions</a>
|
||||||
<div class="dropdown-menu">
|
<div class="dropdown-menu">
|
||||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('create_jobset'), [eval.id]) %]">Create a jobset from this evaluation</a>
|
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('create_jobset'), [eval.id]) %]">Create a jobset from this evaluation</a>
|
||||||
[% IF unfinished.size > 0 %]
|
[% IF totalQueued > 0 %]
|
||||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('cancel'), [eval.id]) %]">Cancel all scheduled builds</a>
|
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('cancel'), [eval.id]) %]">Cancel all scheduled builds</a>
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF aborted.size > 0 || stillFail.size > 0 || nowFail.size > 0 || failed.size > 0 %]
|
[% IF totalFailed > 0 %]
|
||||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_failed'), [eval.id]) %]">Restart all failed builds</a>
|
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_failed'), [eval.id]) %]">Restart all failed builds</a>
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF aborted.size > 0 %]
|
[% IF totalAborted > 0 %]
|
||||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_aborted'), [eval.id]) %]">Restart all aborted builds</a>
|
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('restart_aborted'), [eval.id]) %]">Restart all aborted builds</a>
|
||||||
[% END %]
|
[% END %]
|
||||||
[% IF unfinished.size > 0 %]
|
[% IF totalQueued > 0 %]
|
||||||
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('bump'), [eval.id]) %]">Bump builds to front of queue</a>
|
<a class="dropdown-item" href="[% c.uri_for(c.controller('JobsetEval').action_for('bump'), [eval.id]) %]">Bump builds to front of queue</a>
|
||||||
[% END %]
|
[% END %]
|
||||||
</div>
|
</div>
|
||||||
|
|
3719
src/root/static/bootstrap/css/bootstrap-grid.css
vendored
Normal file
3719
src/root/static/bootstrap/css/bootstrap-grid.css
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
src/root/static/bootstrap/css/bootstrap-grid.css.map
Normal file
1
src/root/static/bootstrap/css/bootstrap-grid.css.map
Normal file
File diff suppressed because one or more lines are too long
7
src/root/static/bootstrap/css/bootstrap-grid.min.css
vendored
Normal file
7
src/root/static/bootstrap/css/bootstrap-grid.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
src/root/static/bootstrap/css/bootstrap-grid.min.css.map
Normal file
1
src/root/static/bootstrap/css/bootstrap-grid.min.css.map
Normal file
File diff suppressed because one or more lines are too long
331
src/root/static/bootstrap/css/bootstrap-reboot.css
vendored
Normal file
331
src/root/static/bootstrap/css/bootstrap-reboot.css
vendored
Normal file
|
@ -0,0 +1,331 @@
|
||||||
|
/*!
|
||||||
|
* Bootstrap Reboot v4.3.1 (https://getbootstrap.com/)
|
||||||
|
* Copyright 2011-2019 The Bootstrap Authors
|
||||||
|
* Copyright 2011-2019 Twitter, Inc.
|
||||||
|
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||||
|
* Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
|
||||||
|
*/
|
||||||
|
*,
|
||||||
|
*::before,
|
||||||
|
*::after {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
|
||||||
|
html {
|
||||||
|
font-family: sans-serif;
|
||||||
|
line-height: 1.15;
|
||||||
|
-webkit-text-size-adjust: 100%;
|
||||||
|
-webkit-tap-highlight-color: rgba(0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
article, aside, figcaption, figure, footer, header, hgroup, main, nav, section {
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
body {
|
||||||
|
margin: 0;
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||||
|
font-size: 1rem;
|
||||||
|
font-weight: 400;
|
||||||
|
line-height: 1.5;
|
||||||
|
color: #212529;
|
||||||
|
text-align: left;
|
||||||
|
background-color: #fff;
|
||||||
|
}
|
||||||
|
|
||||||
|
[tabindex="-1"]:focus {
|
||||||
|
outline: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
hr {
|
||||||
|
box-sizing: content-box;
|
||||||
|
height: 0;
|
||||||
|
overflow: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1, h2, h3, h4, h5, h6 {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
p {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
abbr[title],
|
||||||
|
abbr[data-original-title] {
|
||||||
|
text-decoration: underline;
|
||||||
|
-webkit-text-decoration: underline dotted;
|
||||||
|
text-decoration: underline dotted;
|
||||||
|
cursor: help;
|
||||||
|
border-bottom: 0;
|
||||||
|
-webkit-text-decoration-skip-ink: none;
|
||||||
|
text-decoration-skip-ink: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
address {
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
font-style: normal;
|
||||||
|
line-height: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
ol,
|
||||||
|
ul,
|
||||||
|
dl {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
ol ol,
|
||||||
|
ul ul,
|
||||||
|
ol ul,
|
||||||
|
ul ol {
|
||||||
|
margin-bottom: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
dt {
|
||||||
|
font-weight: 700;
|
||||||
|
}
|
||||||
|
|
||||||
|
dd {
|
||||||
|
margin-bottom: .5rem;
|
||||||
|
margin-left: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
blockquote {
|
||||||
|
margin: 0 0 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
b,
|
||||||
|
strong {
|
||||||
|
font-weight: bolder;
|
||||||
|
}
|
||||||
|
|
||||||
|
small {
|
||||||
|
font-size: 80%;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub,
|
||||||
|
sup {
|
||||||
|
position: relative;
|
||||||
|
font-size: 75%;
|
||||||
|
line-height: 0;
|
||||||
|
vertical-align: baseline;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub {
|
||||||
|
bottom: -.25em;
|
||||||
|
}
|
||||||
|
|
||||||
|
sup {
|
||||||
|
top: -.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
a {
|
||||||
|
color: #007bff;
|
||||||
|
text-decoration: none;
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:hover {
|
||||||
|
color: #0056b3;
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:not([href]):not([tabindex]) {
|
||||||
|
color: inherit;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus {
|
||||||
|
color: inherit;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:not([href]):not([tabindex]):focus {
|
||||||
|
outline: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pre,
|
||||||
|
code,
|
||||||
|
kbd,
|
||||||
|
samp {
|
||||||
|
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||||
|
font-size: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
pre {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
overflow: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
figure {
|
||||||
|
margin: 0 0 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
img {
|
||||||
|
vertical-align: middle;
|
||||||
|
border-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
svg {
|
||||||
|
overflow: hidden;
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
|
||||||
|
table {
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
caption {
|
||||||
|
padding-top: 0.75rem;
|
||||||
|
padding-bottom: 0.75rem;
|
||||||
|
color: #6c757d;
|
||||||
|
text-align: left;
|
||||||
|
caption-side: bottom;
|
||||||
|
}
|
||||||
|
|
||||||
|
th {
|
||||||
|
text-align: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
label {
|
||||||
|
display: inline-block;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
button {
|
||||||
|
border-radius: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:focus {
|
||||||
|
outline: 1px dotted;
|
||||||
|
outline: 5px auto -webkit-focus-ring-color;
|
||||||
|
}
|
||||||
|
|
||||||
|
input,
|
||||||
|
button,
|
||||||
|
select,
|
||||||
|
optgroup,
|
||||||
|
textarea {
|
||||||
|
margin: 0;
|
||||||
|
font-family: inherit;
|
||||||
|
font-size: inherit;
|
||||||
|
line-height: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
button,
|
||||||
|
input {
|
||||||
|
overflow: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
button,
|
||||||
|
select {
|
||||||
|
text-transform: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
word-wrap: normal;
|
||||||
|
}
|
||||||
|
|
||||||
|
button,
|
||||||
|
[type="button"],
|
||||||
|
[type="reset"],
|
||||||
|
[type="submit"] {
|
||||||
|
-webkit-appearance: button;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:not(:disabled),
|
||||||
|
[type="button"]:not(:disabled),
|
||||||
|
[type="reset"]:not(:disabled),
|
||||||
|
[type="submit"]:not(:disabled) {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
button::-moz-focus-inner,
|
||||||
|
[type="button"]::-moz-focus-inner,
|
||||||
|
[type="reset"]::-moz-focus-inner,
|
||||||
|
[type="submit"]::-moz-focus-inner {
|
||||||
|
padding: 0;
|
||||||
|
border-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
input[type="radio"],
|
||||||
|
input[type="checkbox"] {
|
||||||
|
box-sizing: border-box;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
input[type="date"],
|
||||||
|
input[type="time"],
|
||||||
|
input[type="datetime-local"],
|
||||||
|
input[type="month"] {
|
||||||
|
-webkit-appearance: listbox;
|
||||||
|
}
|
||||||
|
|
||||||
|
textarea {
|
||||||
|
overflow: auto;
|
||||||
|
resize: vertical;
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldset {
|
||||||
|
min-width: 0;
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
border: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
legend {
|
||||||
|
display: block;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 100%;
|
||||||
|
padding: 0;
|
||||||
|
margin-bottom: .5rem;
|
||||||
|
font-size: 1.5rem;
|
||||||
|
line-height: inherit;
|
||||||
|
color: inherit;
|
||||||
|
white-space: normal;
|
||||||
|
}
|
||||||
|
|
||||||
|
progress {
|
||||||
|
vertical-align: baseline;
|
||||||
|
}
|
||||||
|
|
||||||
|
[type="number"]::-webkit-inner-spin-button,
|
||||||
|
[type="number"]::-webkit-outer-spin-button {
|
||||||
|
height: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
[type="search"] {
|
||||||
|
outline-offset: -2px;
|
||||||
|
-webkit-appearance: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
[type="search"]::-webkit-search-decoration {
|
||||||
|
-webkit-appearance: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
::-webkit-file-upload-button {
|
||||||
|
font: inherit;
|
||||||
|
-webkit-appearance: button;
|
||||||
|
}
|
||||||
|
|
||||||
|
output {
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
|
||||||
|
summary {
|
||||||
|
display: list-item;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
template {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
[hidden] {
|
||||||
|
display: none !important;
|
||||||
|
}
|
||||||
|
/*# sourceMappingURL=bootstrap-reboot.css.map */
|
1
src/root/static/bootstrap/css/bootstrap-reboot.css.map
Normal file
1
src/root/static/bootstrap/css/bootstrap-reboot.css.map
Normal file
File diff suppressed because one or more lines are too long
8
src/root/static/bootstrap/css/bootstrap-reboot.min.css
vendored
Normal file
8
src/root/static/bootstrap/css/bootstrap-reboot.min.css
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
/*!
|
||||||
|
* Bootstrap Reboot v4.3.1 (https://getbootstrap.com/)
|
||||||
|
* Copyright 2011-2019 The Bootstrap Authors
|
||||||
|
* Copyright 2011-2019 Twitter, Inc.
|
||||||
|
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||||
|
* Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
|
||||||
|
*/*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus,a:not([href]):not([tabindex]):hover{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}
|
||||||
|
/*# sourceMappingURL=bootstrap-reboot.min.css.map */
|
File diff suppressed because one or more lines are too long
10038
src/root/static/bootstrap/css/bootstrap.css
vendored
Normal file
10038
src/root/static/bootstrap/css/bootstrap.css
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
src/root/static/bootstrap/css/bootstrap.css.map
Normal file
1
src/root/static/bootstrap/css/bootstrap.css.map
Normal file
File diff suppressed because one or more lines are too long
7
src/root/static/bootstrap/css/bootstrap.min.css
vendored
Normal file
7
src/root/static/bootstrap/css/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
src/root/static/bootstrap/css/bootstrap.min.css.map
Normal file
1
src/root/static/bootstrap/css/bootstrap.min.css.map
Normal file
File diff suppressed because one or more lines are too long
7013
src/root/static/bootstrap/js/bootstrap.bundle.js
vendored
Normal file
7013
src/root/static/bootstrap/js/bootstrap.bundle.js
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
src/root/static/bootstrap/js/bootstrap.bundle.js.map
Normal file
1
src/root/static/bootstrap/js/bootstrap.bundle.js.map
Normal file
File diff suppressed because one or more lines are too long
7
src/root/static/bootstrap/js/bootstrap.bundle.min.js
vendored
Normal file
7
src/root/static/bootstrap/js/bootstrap.bundle.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
src/root/static/bootstrap/js/bootstrap.bundle.min.js.map
Normal file
1
src/root/static/bootstrap/js/bootstrap.bundle.min.js.map
Normal file
File diff suppressed because one or more lines are too long
4435
src/root/static/bootstrap/js/bootstrap.js
vendored
Normal file
4435
src/root/static/bootstrap/js/bootstrap.js
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
src/root/static/bootstrap/js/bootstrap.js.map
Normal file
1
src/root/static/bootstrap/js/bootstrap.js.map
Normal file
File diff suppressed because one or more lines are too long
7
src/root/static/bootstrap/js/bootstrap.min.js
vendored
Normal file
7
src/root/static/bootstrap/js/bootstrap.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
src/root/static/bootstrap/js/bootstrap.min.js.map
Normal file
1
src/root/static/bootstrap/js/bootstrap.min.js.map
Normal file
File diff suppressed because one or more lines are too long
34
src/root/static/fontawesome/LICENSE.txt
Normal file
34
src/root/static/fontawesome/LICENSE.txt
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
Font Awesome Free License
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Font Awesome Free is free, open source, and GPL friendly. You can use it for
|
||||||
|
commercial projects, open source projects, or really almost whatever you want.
|
||||||
|
Full Font Awesome Free license: https://fontawesome.com/license/free.
|
||||||
|
|
||||||
|
# Icons: CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/)
|
||||||
|
In the Font Awesome Free download, the CC BY 4.0 license applies to all icons
|
||||||
|
packaged as SVG and JS file types.
|
||||||
|
|
||||||
|
# Fonts: SIL OFL 1.1 License (https://scripts.sil.org/OFL)
|
||||||
|
In the Font Awesome Free download, the SIL OFL license applies to all icons
|
||||||
|
packaged as web and desktop font files.
|
||||||
|
|
||||||
|
# Code: MIT License (https://opensource.org/licenses/MIT)
|
||||||
|
In the Font Awesome Free download, the MIT license applies to all non-font and
|
||||||
|
non-icon files.
|
||||||
|
|
||||||
|
# Attribution
|
||||||
|
Attribution is required by MIT, SIL OFL, and CC BY licenses. Downloaded Font
|
||||||
|
Awesome Free files already contain embedded comments with sufficient
|
||||||
|
attribution, so you shouldn't need to do anything additional when using these
|
||||||
|
files normally.
|
||||||
|
|
||||||
|
We've kept attribution comments terse, so we ask that you do not actively work
|
||||||
|
to remove them from files, especially code. They're a great way for folks to
|
||||||
|
learn about Font Awesome.
|
||||||
|
|
||||||
|
# Brand Icons
|
||||||
|
All brand icons are trademarks of their respective owners. The use of these
|
||||||
|
trademarks does not indicate endorsement of the trademark holder by Font
|
||||||
|
Awesome, nor vice versa. **Please do not use brand logos for any purpose except
|
||||||
|
to represent the company, product, or service to which they refer.**
|
4396
src/root/static/fontawesome/css/all.css
vendored
Normal file
4396
src/root/static/fontawesome/css/all.css
vendored
Normal file
File diff suppressed because it is too large
Load diff
5
src/root/static/fontawesome/css/all.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/all.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
14
src/root/static/fontawesome/css/brands.css
vendored
Normal file
14
src/root/static/fontawesome/css/brands.css
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face {
|
||||||
|
font-family: 'Font Awesome 5 Brands';
|
||||||
|
font-style: normal;
|
||||||
|
font-weight: normal;
|
||||||
|
font-display: auto;
|
||||||
|
src: url("../webfonts/fa-brands-400.eot");
|
||||||
|
src: url("../webfonts/fa-brands-400.eot?#iefix") format("embedded-opentype"), url("../webfonts/fa-brands-400.woff2") format("woff2"), url("../webfonts/fa-brands-400.woff") format("woff"), url("../webfonts/fa-brands-400.ttf") format("truetype"), url("../webfonts/fa-brands-400.svg#fontawesome") format("svg"); }
|
||||||
|
|
||||||
|
.fab {
|
||||||
|
font-family: 'Font Awesome 5 Brands'; }
|
5
src/root/static/fontawesome/css/brands.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/brands.min.css
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:normal;font-display:auto;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}
|
4363
src/root/static/fontawesome/css/fontawesome.css
vendored
Normal file
4363
src/root/static/fontawesome/css/fontawesome.css
vendored
Normal file
File diff suppressed because it is too large
Load diff
5
src/root/static/fontawesome/css/fontawesome.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/fontawesome.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
15
src/root/static/fontawesome/css/regular.css
vendored
Normal file
15
src/root/static/fontawesome/css/regular.css
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face {
|
||||||
|
font-family: 'Font Awesome 5 Free';
|
||||||
|
font-style: normal;
|
||||||
|
font-weight: 400;
|
||||||
|
font-display: auto;
|
||||||
|
src: url("../webfonts/fa-regular-400.eot");
|
||||||
|
src: url("../webfonts/fa-regular-400.eot?#iefix") format("embedded-opentype"), url("../webfonts/fa-regular-400.woff2") format("woff2"), url("../webfonts/fa-regular-400.woff") format("woff"), url("../webfonts/fa-regular-400.ttf") format("truetype"), url("../webfonts/fa-regular-400.svg#fontawesome") format("svg"); }
|
||||||
|
|
||||||
|
.far {
|
||||||
|
font-family: 'Font Awesome 5 Free';
|
||||||
|
font-weight: 400; }
|
5
src/root/static/fontawesome/css/regular.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/regular.min.css
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:auto;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.far{font-family:"Font Awesome 5 Free";font-weight:400}
|
16
src/root/static/fontawesome/css/solid.css
vendored
Normal file
16
src/root/static/fontawesome/css/solid.css
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face {
|
||||||
|
font-family: 'Font Awesome 5 Free';
|
||||||
|
font-style: normal;
|
||||||
|
font-weight: 900;
|
||||||
|
font-display: auto;
|
||||||
|
src: url("../webfonts/fa-solid-900.eot");
|
||||||
|
src: url("../webfonts/fa-solid-900.eot?#iefix") format("embedded-opentype"), url("../webfonts/fa-solid-900.woff2") format("woff2"), url("../webfonts/fa-solid-900.woff") format("woff"), url("../webfonts/fa-solid-900.ttf") format("truetype"), url("../webfonts/fa-solid-900.svg#fontawesome") format("svg"); }
|
||||||
|
|
||||||
|
.fa,
|
||||||
|
.fas {
|
||||||
|
font-family: 'Font Awesome 5 Free';
|
||||||
|
font-weight: 900; }
|
5
src/root/static/fontawesome/css/solid.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/solid.min.css
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:auto;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.fas{font-family:"Font Awesome 5 Free";font-weight:900}
|
371
src/root/static/fontawesome/css/svg-with-js.css
vendored
Normal file
371
src/root/static/fontawesome/css/svg-with-js.css
vendored
Normal file
|
@ -0,0 +1,371 @@
|
||||||
|
/*!
|
||||||
|
* Font Awesome Free 5.10.2 by @fontawesome - https://fontawesome.com
|
||||||
|
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||||
|
*/
|
||||||
|
svg:not(:root).svg-inline--fa {
|
||||||
|
overflow: visible; }
|
||||||
|
|
||||||
|
.svg-inline--fa {
|
||||||
|
display: inline-block;
|
||||||
|
font-size: inherit;
|
||||||
|
height: 1em;
|
||||||
|
overflow: visible;
|
||||||
|
vertical-align: -.125em; }
|
||||||
|
.svg-inline--fa.fa-lg {
|
||||||
|
vertical-align: -.225em; }
|
||||||
|
.svg-inline--fa.fa-w-1 {
|
||||||
|
width: 0.0625em; }
|
||||||
|
.svg-inline--fa.fa-w-2 {
|
||||||
|
width: 0.125em; }
|
||||||
|
.svg-inline--fa.fa-w-3 {
|
||||||
|
width: 0.1875em; }
|
||||||
|
.svg-inline--fa.fa-w-4 {
|
||||||
|
width: 0.25em; }
|
||||||
|
.svg-inline--fa.fa-w-5 {
|
||||||
|
width: 0.3125em; }
|
||||||
|
.svg-inline--fa.fa-w-6 {
|
||||||
|
width: 0.375em; }
|
||||||
|
.svg-inline--fa.fa-w-7 {
|
||||||
|
width: 0.4375em; }
|
||||||
|
.svg-inline--fa.fa-w-8 {
|
||||||
|
width: 0.5em; }
|
||||||
|
.svg-inline--fa.fa-w-9 {
|
||||||
|
width: 0.5625em; }
|
||||||
|
.svg-inline--fa.fa-w-10 {
|
||||||
|
width: 0.625em; }
|
||||||
|
.svg-inline--fa.fa-w-11 {
|
||||||
|
width: 0.6875em; }
|
||||||
|
.svg-inline--fa.fa-w-12 {
|
||||||
|
width: 0.75em; }
|
||||||
|
.svg-inline--fa.fa-w-13 {
|
||||||
|
width: 0.8125em; }
|
||||||
|
.svg-inline--fa.fa-w-14 {
|
||||||
|
width: 0.875em; }
|
||||||
|
.svg-inline--fa.fa-w-15 {
|
||||||
|
width: 0.9375em; }
|
||||||
|
.svg-inline--fa.fa-w-16 {
|
||||||
|
width: 1em; }
|
||||||
|
.svg-inline--fa.fa-w-17 {
|
||||||
|
width: 1.0625em; }
|
||||||
|
.svg-inline--fa.fa-w-18 {
|
||||||
|
width: 1.125em; }
|
||||||
|
.svg-inline--fa.fa-w-19 {
|
||||||
|
width: 1.1875em; }
|
||||||
|
.svg-inline--fa.fa-w-20 {
|
||||||
|
width: 1.25em; }
|
||||||
|
.svg-inline--fa.fa-pull-left {
|
||||||
|
margin-right: .3em;
|
||||||
|
width: auto; }
|
||||||
|
.svg-inline--fa.fa-pull-right {
|
||||||
|
margin-left: .3em;
|
||||||
|
width: auto; }
|
||||||
|
.svg-inline--fa.fa-border {
|
||||||
|
height: 1.5em; }
|
||||||
|
.svg-inline--fa.fa-li {
|
||||||
|
width: 2em; }
|
||||||
|
.svg-inline--fa.fa-fw {
|
||||||
|
width: 1.25em; }
|
||||||
|
|
||||||
|
.fa-layers svg.svg-inline--fa {
|
||||||
|
bottom: 0;
|
||||||
|
left: 0;
|
||||||
|
margin: auto;
|
||||||
|
position: absolute;
|
||||||
|
right: 0;
|
||||||
|
top: 0; }
|
||||||
|
|
||||||
|
.fa-layers {
|
||||||
|
display: inline-block;
|
||||||
|
height: 1em;
|
||||||
|
position: relative;
|
||||||
|
text-align: center;
|
||||||
|
vertical-align: -.125em;
|
||||||
|
width: 1em; }
|
||||||
|
.fa-layers svg.svg-inline--fa {
|
||||||
|
-webkit-transform-origin: center center;
|
||||||
|
transform-origin: center center; }
|
||||||
|
|
||||||
|
.fa-layers-text, .fa-layers-counter {
|
||||||
|
display: inline-block;
|
||||||
|
position: absolute;
|
||||||
|
text-align: center; }
|
||||||
|
|
||||||
|
.fa-layers-text {
|
||||||
|
left: 50%;
|
||||||
|
top: 50%;
|
||||||
|
-webkit-transform: translate(-50%, -50%);
|
||||||
|
transform: translate(-50%, -50%);
|
||||||
|
-webkit-transform-origin: center center;
|
||||||
|
transform-origin: center center; }
|
||||||
|
|
||||||
|
.fa-layers-counter {
|
||||||
|
background-color: #ff253a;
|
||||||
|
border-radius: 1em;
|
||||||
|
-webkit-box-sizing: border-box;
|
||||||
|
box-sizing: border-box;
|
||||||
|
color: #fff;
|
||||||
|
height: 1.5em;
|
||||||
|
line-height: 1;
|
||||||
|
max-width: 5em;
|
||||||
|
min-width: 1.5em;
|
||||||
|
overflow: hidden;
|
||||||
|
padding: .25em;
|
||||||
|
right: 0;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
top: 0;
|
||||||
|
-webkit-transform: scale(0.25);
|
||||||
|
transform: scale(0.25);
|
||||||
|
-webkit-transform-origin: top right;
|
||||||
|
transform-origin: top right; }
|
||||||
|
|
||||||
|
.fa-layers-bottom-right {
|
||||||
|
bottom: 0;
|
||||||
|
right: 0;
|
||||||
|
top: auto;
|
||||||
|
-webkit-transform: scale(0.25);
|
||||||
|
transform: scale(0.25);
|
||||||
|
-webkit-transform-origin: bottom right;
|
||||||
|
transform-origin: bottom right; }
|
||||||
|
|
||||||
|
.fa-layers-bottom-left {
|
||||||
|
bottom: 0;
|
||||||
|
left: 0;
|
||||||
|
right: auto;
|
||||||
|
top: auto;
|
||||||
|
-webkit-transform: scale(0.25);
|
||||||
|
transform: scale(0.25);
|
||||||
|
-webkit-transform-origin: bottom left;
|
||||||
|
transform-origin: bottom left; }
|
||||||
|
|
||||||
|
.fa-layers-top-right {
|
||||||
|
right: 0;
|
||||||
|
top: 0;
|
||||||
|
-webkit-transform: scale(0.25);
|
||||||
|
transform: scale(0.25);
|
||||||
|
-webkit-transform-origin: top right;
|
||||||
|
transform-origin: top right; }
|
||||||
|
|
||||||
|
.fa-layers-top-left {
|
||||||
|
left: 0;
|
||||||
|
right: auto;
|
||||||
|
top: 0;
|
||||||
|
-webkit-transform: scale(0.25);
|
||||||
|
transform: scale(0.25);
|
||||||
|
-webkit-transform-origin: top left;
|
||||||
|
transform-origin: top left; }
|
||||||
|
|
||||||
|
.fa-lg {
|
||||||
|
font-size: 1.33333em;
|
||||||
|
line-height: 0.75em;
|
||||||
|
vertical-align: -.0667em; }
|
||||||
|
|
||||||
|
.fa-xs {
|
||||||
|
font-size: .75em; }
|
||||||
|
|
||||||
|
.fa-sm {
|
||||||
|
font-size: .875em; }
|
||||||
|
|
||||||
|
.fa-1x {
|
||||||
|
font-size: 1em; }
|
||||||
|
|
||||||
|
.fa-2x {
|
||||||
|
font-size: 2em; }
|
||||||
|
|
||||||
|
.fa-3x {
|
||||||
|
font-size: 3em; }
|
||||||
|
|
||||||
|
.fa-4x {
|
||||||
|
font-size: 4em; }
|
||||||
|
|
||||||
|
.fa-5x {
|
||||||
|
font-size: 5em; }
|
||||||
|
|
||||||
|
.fa-6x {
|
||||||
|
font-size: 6em; }
|
||||||
|
|
||||||
|
.fa-7x {
|
||||||
|
font-size: 7em; }
|
||||||
|
|
||||||
|
.fa-8x {
|
||||||
|
font-size: 8em; }
|
||||||
|
|
||||||
|
.fa-9x {
|
||||||
|
font-size: 9em; }
|
||||||
|
|
||||||
|
.fa-10x {
|
||||||
|
font-size: 10em; }
|
||||||
|
|
||||||
|
.fa-fw {
|
||||||
|
text-align: center;
|
||||||
|
width: 1.25em; }
|
||||||
|
|
||||||
|
.fa-ul {
|
||||||
|
list-style-type: none;
|
||||||
|
margin-left: 2.5em;
|
||||||
|
padding-left: 0; }
|
||||||
|
.fa-ul > li {
|
||||||
|
position: relative; }
|
||||||
|
|
||||||
|
.fa-li {
|
||||||
|
left: -2em;
|
||||||
|
position: absolute;
|
||||||
|
text-align: center;
|
||||||
|
width: 2em;
|
||||||
|
line-height: inherit; }
|
||||||
|
|
||||||
|
.fa-border {
|
||||||
|
border: solid 0.08em #eee;
|
||||||
|
border-radius: .1em;
|
||||||
|
padding: .2em .25em .15em; }
|
||||||
|
|
||||||
|
.fa-pull-left {
|
||||||
|
float: left; }
|
||||||
|
|
||||||
|
.fa-pull-right {
|
||||||
|
float: right; }
|
||||||
|
|
||||||
|
.fa.fa-pull-left,
|
||||||
|
.fas.fa-pull-left,
|
||||||
|
.far.fa-pull-left,
|
||||||
|
.fal.fa-pull-left,
|
||||||
|
.fab.fa-pull-left {
|
||||||
|
margin-right: .3em; }
|
||||||
|
|
||||||
|
.fa.fa-pull-right,
|
||||||
|
.fas.fa-pull-right,
|
||||||
|
.far.fa-pull-right,
|
||||||
|
.fal.fa-pull-right,
|
||||||
|
.fab.fa-pull-right {
|
||||||
|
margin-left: .3em; }
|
||||||
|
|
||||||
|
.fa-spin {
|
||||||
|
-webkit-animation: fa-spin 2s infinite linear;
|
||||||
|
animation: fa-spin 2s infinite linear; }
|
||||||
|
|
||||||
|
.fa-pulse {
|
||||||
|
-webkit-animation: fa-spin 1s infinite steps(8);
|
||||||
|
animation: fa-spin 1s infinite steps(8); }
|
||||||
|
|
||||||
|
@-webkit-keyframes fa-spin {
|
||||||
|
0% {
|
||||||
|
-webkit-transform: rotate(0deg);
|
||||||
|
transform: rotate(0deg); }
|
||||||
|
100% {
|
||||||
|
-webkit-transform: rotate(360deg);
|
||||||
|
transform: rotate(360deg); } }
|
||||||
|
|
||||||
|
@keyframes fa-spin {
|
||||||
|
0% {
|
||||||
|
-webkit-transform: rotate(0deg);
|
||||||
|
transform: rotate(0deg); }
|
||||||
|
100% {
|
||||||
|
-webkit-transform: rotate(360deg);
|
||||||
|
transform: rotate(360deg); } }
|
||||||
|
|
||||||
|
.fa-rotate-90 {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";
|
||||||
|
-webkit-transform: rotate(90deg);
|
||||||
|
transform: rotate(90deg); }
|
||||||
|
|
||||||
|
.fa-rotate-180 {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";
|
||||||
|
-webkit-transform: rotate(180deg);
|
||||||
|
transform: rotate(180deg); }
|
||||||
|
|
||||||
|
.fa-rotate-270 {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";
|
||||||
|
-webkit-transform: rotate(270deg);
|
||||||
|
transform: rotate(270deg); }
|
||||||
|
|
||||||
|
.fa-flip-horizontal {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";
|
||||||
|
-webkit-transform: scale(-1, 1);
|
||||||
|
transform: scale(-1, 1); }
|
||||||
|
|
||||||
|
.fa-flip-vertical {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";
|
||||||
|
-webkit-transform: scale(1, -1);
|
||||||
|
transform: scale(1, -1); }
|
||||||
|
|
||||||
|
.fa-flip-both, .fa-flip-horizontal.fa-flip-vertical {
|
||||||
|
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";
|
||||||
|
-webkit-transform: scale(-1, -1);
|
||||||
|
transform: scale(-1, -1); }
|
||||||
|
|
||||||
|
:root .fa-rotate-90,
|
||||||
|
:root .fa-rotate-180,
|
||||||
|
:root .fa-rotate-270,
|
||||||
|
:root .fa-flip-horizontal,
|
||||||
|
:root .fa-flip-vertical,
|
||||||
|
:root .fa-flip-both {
|
||||||
|
-webkit-filter: none;
|
||||||
|
filter: none; }
|
||||||
|
|
||||||
|
.fa-stack {
|
||||||
|
display: inline-block;
|
||||||
|
height: 2em;
|
||||||
|
position: relative;
|
||||||
|
width: 2.5em; }
|
||||||
|
|
||||||
|
.fa-stack-1x,
|
||||||
|
.fa-stack-2x {
|
||||||
|
bottom: 0;
|
||||||
|
left: 0;
|
||||||
|
margin: auto;
|
||||||
|
position: absolute;
|
||||||
|
right: 0;
|
||||||
|
top: 0; }
|
||||||
|
|
||||||
|
.svg-inline--fa.fa-stack-1x {
|
||||||
|
height: 1em;
|
||||||
|
width: 1.25em; }
|
||||||
|
|
||||||
|
.svg-inline--fa.fa-stack-2x {
|
||||||
|
height: 2em;
|
||||||
|
width: 2.5em; }
|
||||||
|
|
||||||
|
.fa-inverse {
|
||||||
|
color: #fff; }
|
||||||
|
|
||||||
|
.sr-only {
|
||||||
|
border: 0;
|
||||||
|
clip: rect(0, 0, 0, 0);
|
||||||
|
height: 1px;
|
||||||
|
margin: -1px;
|
||||||
|
overflow: hidden;
|
||||||
|
padding: 0;
|
||||||
|
position: absolute;
|
||||||
|
width: 1px; }
|
||||||
|
|
||||||
|
.sr-only-focusable:active, .sr-only-focusable:focus {
|
||||||
|
clip: auto;
|
||||||
|
height: auto;
|
||||||
|
margin: 0;
|
||||||
|
overflow: visible;
|
||||||
|
position: static;
|
||||||
|
width: auto; }
|
||||||
|
|
||||||
|
.svg-inline--fa .fa-primary {
|
||||||
|
fill: var(--fa-primary-color, currentColor);
|
||||||
|
opacity: 1;
|
||||||
|
opacity: var(--fa-primary-opacity, 1); }
|
||||||
|
|
||||||
|
.svg-inline--fa .fa-secondary {
|
||||||
|
fill: var(--fa-secondary-color, currentColor);
|
||||||
|
opacity: 0.4;
|
||||||
|
opacity: var(--fa-secondary-opacity, 0.4); }
|
||||||
|
|
||||||
|
.svg-inline--fa.fa-swap-opacity .fa-primary {
|
||||||
|
opacity: 0.4;
|
||||||
|
opacity: var(--fa-secondary-opacity, 0.4); }
|
||||||
|
|
||||||
|
.svg-inline--fa.fa-swap-opacity .fa-secondary {
|
||||||
|
opacity: 1;
|
||||||
|
opacity: var(--fa-primary-opacity, 1); }
|
||||||
|
|
||||||
|
.svg-inline--fa mask .fa-primary,
|
||||||
|
.svg-inline--fa mask .fa-secondary {
|
||||||
|
fill: black; }
|
||||||
|
|
||||||
|
.fad.fa-inverse {
|
||||||
|
color: #fff; }
|
5
src/root/static/fontawesome/css/svg-with-js.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/svg-with-js.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
2166
src/root/static/fontawesome/css/v4-shims.css
vendored
Normal file
2166
src/root/static/fontawesome/css/v4-shims.css
vendored
Normal file
File diff suppressed because it is too large
Load diff
5
src/root/static/fontawesome/css/v4-shims.min.css
vendored
Normal file
5
src/root/static/fontawesome/css/v4-shims.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.eot
Normal file
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.eot
Normal file
Binary file not shown.
3449
src/root/static/fontawesome/webfonts/fa-brands-400.svg
Normal file
3449
src/root/static/fontawesome/webfonts/fa-brands-400.svg
Normal file
File diff suppressed because it is too large
Load diff
After Width: | Height: | Size: 675 KiB |
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.ttf
Normal file
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.ttf
Normal file
Binary file not shown.
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.woff
Normal file
BIN
src/root/static/fontawesome/webfonts/fa-brands-400.woff
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue