Merge remote-tracking branch 'upstream/master' into split-buildRemote

This commit is contained in:
John Ericson 2023-11-30 11:27:40 -05:00
commit 831a2d9bd5
30 changed files with 380 additions and 273 deletions

View file

@ -4,7 +4,7 @@ on:
push: push:
jobs: jobs:
tests: tests:
runs-on: ubuntu-18.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:

View file

@ -10,8 +10,6 @@ AC_PROG_LN_S
AC_PROG_LIBTOOL AC_PROG_LIBTOOL
AC_PROG_CXX AC_PROG_CXX
CXXFLAGS+=" -std=c++17"
AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_ARG_WITH([docbook-xsl], AC_ARG_WITH([docbook-xsl],

View file

@ -131,8 +131,8 @@ use LDAP to manage roles and users.
This is configured by defining the `<ldap>` block in the configuration file. This is configured by defining the `<ldap>` block in the configuration file.
In this block it's possible to configure the authentication plugin in the In this block it's possible to configure the authentication plugin in the
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`. `<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
The documentation for the available settings can be found [here] The documentation for the available settings can be found
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS). [here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
Note that the bind password (if needed) should be supplied as an included file to Note that the bind password (if needed) should be supplied as an included file to
prevent it from leaking to the Nix store. prevent it from leaking to the Nix store.
@ -179,6 +179,7 @@ Example configuration:
<role_search_options> <role_search_options>
deref = always deref = always
</role_search_options> </role_search_options>
</store>
</config> </config>
<role_mapping> <role_mapping>
# Make all users in the hydra_admin group Hydra admins # Make all users in the hydra_admin group Hydra admins

View file

@ -1,5 +1,21 @@
{ {
"nodes": { "nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"lowdown-src": { "lowdown-src": {
"flake": false, "flake": false,
"locked": { "locked": {
@ -18,37 +34,40 @@
}, },
"nix": { "nix": {
"inputs": { "inputs": {
"flake-compat": "flake-compat",
"lowdown-src": "lowdown-src", "lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs", "nixpkgs": [
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression" "nixpkgs-regression": "nixpkgs-regression"
}, },
"locked": { "locked": {
"lastModified": 1661606874, "lastModified": 1690219894,
"narHash": "sha256-9+rpYzI+SmxJn+EbYxjGv68Ucp22bdFUSy/4LkHkkDQ=", "narHash": "sha256-QMYAkdtU+g9HlZKtoJ+AI6TbWzzovKGnPZJHfZdclc8=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nix", "repo": "nix",
"rev": "11e45768b34fdafdcf019ddbd337afa16127ff0f", "rev": "a212300a1d9f9c7b0daf19c00c87fc50480f54f4",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "2.11.0", "ref": "2.17.0",
"repo": "nix", "repo": "nix",
"type": "github" "type": "github"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1657693803, "lastModified": 1687379288,
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=", "narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2", "rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-22.05-small", "ref": "nixos-23.05",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }
@ -72,10 +91,7 @@
"root": { "root": {
"inputs": { "inputs": {
"nix": "nix", "nix": "nix",
"nixpkgs": [ "nixpkgs": "nixpkgs"
"nix",
"nixpkgs"
]
} }
} }
}, },

View file

@ -1,18 +1,21 @@
{ {
description = "A Nix-based continuous build system"; description = "A Nix-based continuous build system";
inputs.nixpkgs.follows = "nix/nixpkgs"; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05";
inputs.nix.url = "github:NixOS/nix/2.11.0"; inputs.nix.url = "github:NixOS/nix/2.17.0";
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
outputs = { self, nixpkgs, nix }: outputs = { self, nixpkgs, nix }:
let let
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}"; version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
pkgs = import nixpkgs { systems = [ "x86_64-linux" "aarch64-linux" ];
system = "x86_64-linux"; forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsBySystem = forEachSystem (system: import nixpkgs {
inherit system;
overlays = [ self.overlays.default nix.overlays.default ]; overlays = [ self.overlays.default nix.overlays.default ];
}; });
# NixOS configuration used for VM tests. # NixOS configuration used for VM tests.
hydraServer = hydraServer =
@ -254,9 +257,10 @@
hydraJobs = { hydraJobs = {
build.x86_64-linux = packages.x86_64-linux.hydra; build = forEachSystem (system: packages.${system}.hydra);
manual = manual = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "hydra-manual-${version}" { } pkgs.runCommand "hydra-manual-${version}" { }
'' ''
mkdir -p $out/share mkdir -p $out/share
@ -264,11 +268,12 @@
mkdir $out/nix-support mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
''; '');
tests.install.x86_64-linux = tests.install = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest { simpleTest {
name = "hydra-install";
nodes.machine = hydraServer; nodes.machine = hydraServer;
testScript = testScript =
'' ''
@ -276,14 +281,16 @@
machine.wait_for_job("hydra-server") machine.wait_for_job("hydra-server")
machine.wait_for_job("hydra-evaluator") machine.wait_for_job("hydra-evaluator")
machine.wait_for_job("hydra-queue-runner") machine.wait_for_job("hydra-queue-runner")
machine.wait_for_open_port("3000") machine.wait_for_open_port(3000)
machine.succeed("curl --fail http://localhost:3000/") machine.succeed("curl --fail http://localhost:3000/")
''; '';
}; });
tests.notifications.x86_64-linux = tests.notifications = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest { simpleTest {
name = "hydra-notifications";
nodes.machine = { pkgs, ... }: { nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ]; imports = [ hydraServer ];
services.hydra-dev.extraConfig = '' services.hydra-dev.extraConfig = ''
@ -311,7 +318,7 @@
# Wait until InfluxDB can receive web requests # Wait until InfluxDB can receive web requests
machine.wait_for_job("influxdb") machine.wait_for_job("influxdb")
machine.wait_for_open_port("8086") machine.wait_for_open_port(8086)
# Create an InfluxDB database where hydra will write to # Create an InfluxDB database where hydra will write to
machine.succeed( machine.succeed(
@ -321,7 +328,7 @@
# Wait until hydra-server can receive HTTP requests # Wait until hydra-server can receive HTTP requests
machine.wait_for_job("hydra-server") machine.wait_for_job("hydra-server")
machine.wait_for_open_port("3000") machine.wait_for_open_port(3000)
# Setup the project and jobset # Setup the project and jobset
machine.succeed( machine.succeed(
@ -336,11 +343,13 @@
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success" + "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
) )
''; '';
}; });
tests.gitea.x86_64-linux = tests.gitea = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; }; let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest { makeTest {
name = "hydra-gitea";
nodes.machine = { pkgs, ... }: { nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ]; imports = [ hydraServer ];
services.hydra-dev.extraConfig = '' services.hydra-dev.extraConfig = ''
@ -352,7 +361,7 @@
distributedBuilds = true; distributedBuilds = true;
buildMachines = [{ buildMachines = [{
hostName = "localhost"; hostName = "localhost";
systems = [ "x86_64-linux" ]; systems = [ system ];
}]; }];
binaryCaches = [ ]; binaryCaches = [ ];
}; };
@ -467,7 +476,7 @@
smallDrv = pkgs.writeText "jobset.nix" '' smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation { { trivial = builtins.derivation {
name = "trivial"; name = "trivial";
system = "x86_64-linux"; system = "${system}";
builder = "/bin/sh"; builder = "/bin/sh";
allowSubstitutes = false; allowSubstitutes = false;
preferLocalBuild = true; preferLocalBuild = true;
@ -531,31 +540,37 @@
machine.shutdown() machine.shutdown()
''; '';
}; });
tests.validate-openapi = pkgs.runCommand "validate-openapi" tests.validate-openapi = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "validate-openapi"
{ buildInputs = [ pkgs.openapi-generator-cli ]; } { buildInputs = [ pkgs.openapi-generator-cli ]; }
'' ''
openapi-generator-cli validate -i ${./hydra-api.yaml} openapi-generator-cli validate -i ${./hydra-api.yaml}
touch $out touch $out
''; '');
container = nixosConfigurations.container.config.system.build.toplevel; container = nixosConfigurations.container.config.system.build.toplevel;
}; };
checks.x86_64-linux.build = hydraJobs.build.x86_64-linux; checks = forEachSystem (system: {
checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux; build = hydraJobs.build.${system};
checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi; install = hydraJobs.tests.install.${system};
validate-openapi = hydraJobs.tests.validate-openapi.${system};
});
packages.x86_64-linux.hydra = pkgs.hydra; packages = forEachSystem (system: {
packages.x86_64-linux.default = pkgs.hydra; hydra = pkgsBySystem.${system}.hydra;
default = pkgsBySystem.${system}.hydra;
});
nixosModules.hydra = { nixosModules.hydra = {
imports = [ ./hydra-module.nix ]; imports = [ ./hydra-module.nix ];
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ]; nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
}; };
nixosModules.hydraTest = { nixosModules.hydraTest = { pkgs, ... }: {
imports = [ self.nixosModules.hydra ]; imports = [ self.nixosModules.hydra ];
services.hydra-dev.enable = true; services.hydra-dev.enable = true;

View file

@ -533,13 +533,13 @@ paths:
schema: schema:
$ref: '#/components/schemas/Error' $ref: '#/components/schemas/Error'
/eval/{build-id}: /eval/{eval-id}:
get: get:
summary: Retrieves evaluations identified by build id summary: Retrieves evaluations identified by eval id
parameters: parameters:
- name: build-id - name: eval-id
in: path in: path
description: build identifier description: eval identifier
required: true required: true
schema: schema:
type: integer type: integer
@ -551,6 +551,24 @@ paths:
schema: schema:
$ref: '#/components/schemas/JobsetEval' $ref: '#/components/schemas/JobsetEval'
/eval/{eval-id}/builds:
get:
summary: Retrieves all builds belonging to an evaluation identified by eval id
parameters:
- name: eval-id
in: path
description: eval identifier
required: true
schema:
type: integer
responses:
'200':
description: builds
content:
application/json:
schema:
$ref: '#/components/schemas/JobsetEvalBuilds'
components: components:
schemas: schemas:
@ -796,6 +814,13 @@ components:
additionalProperties: additionalProperties:
$ref: '#/components/schemas/JobsetEvalInput' $ref: '#/components/schemas/JobsetEvalInput'
JobsetEvalBuilds:
type: array
items:
type: object
additionalProperties:
$ref: '#/components/schemas/Build'
JobsetOverview: JobsetOverview:
type: array type: array
items: items:
@ -870,7 +895,7 @@ components:
description: Size of the produced file description: Size of the produced file
type: integer type: integer
defaultpath: defaultpath:
description: This is a Git/Mercurial commit hash or a Subversion revision number description: if path is a directory, the default file relative to path to be served
type: string type: string
'type': 'type':
description: Types of build product (user defined) description: Types of build product (user defined)

View file

@ -340,7 +340,7 @@ in
systemd.services.hydra-queue-runner = systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ]; { wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ]; requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ]; after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ]; path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ]; restartTriggers = [ hydraConf ];
environment = env // { environment = env // {

View file

@ -25,7 +25,8 @@
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
void check_pid_status_nonblocking(pid_t check_pid) { void check_pid_status_nonblocking(pid_t check_pid)
{
// Only check 'initialized' and known PID's // Only check 'initialized' and known PID's
if (check_pid <= 0) { return; } if (check_pid <= 0) { return; }
@ -100,7 +101,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
else if (v.type() == nAttrs) { else if (v.type() == nAttrs) {
auto a = v.attrs->find(state.symbols.create(subAttribute)); auto a = v.attrs->find(state.symbols.create(subAttribute));
if (a != v.attrs->end()) if (a != v.attrs->end())
res.push_back(std::string(state.forceString(*a->value))); res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
} }
}; };
@ -129,7 +130,7 @@ static void worker(
LockFlags { LockFlags {
.updateLockFile = false, .updateLockFile = false,
.useRegistries = false, .useRegistries = false,
.allowMutable = false, .allowUnlocked = false,
}); });
callFlake(state, lockedFlake, *vFlake); callFlake(state, lockedFlake, *vFlake);
@ -197,26 +198,30 @@ static void worker(
/* If this is an aggregate, then get its constituents. */ /* If this is an aggregate, then get its constituents. */
auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*a->value, a->pos)) { if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
auto a = v->attrs->get(state.symbols.create("constituents")); auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a) if (!a)
throw EvalError("derivation must have a constituents attribute"); throw EvalError("derivation must have a constituents attribute");
NixStringContext context;
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
for (auto & c : context)
std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
job["constituents"].push_back(state.store->printStorePath(b.drvPath));
},
[&](const NixStringContextElem::Opaque & o) {
},
[&](const NixStringContextElem::DrvDeep & d) {
},
}, c.raw());
PathSet context; state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
state.coerceToString(a->pos, *a->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
job["constituents"].push_back(std::string(i, index + 1));
}
state.forceList(*a->value, a->pos);
for (unsigned int n = 0; n < a->value->listSize(); ++n) { for (unsigned int n = 0; n < a->value->listSize(); ++n) {
auto v = a->value->listElems()[n]; auto v = a->value->listElems()[n];
state.forceValue(*v, noPos); state.forceValue(*v, noPos);
if (v->type() == nString) if (v->type() == nString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); job["namedConstituents"].push_back(v->str());
} }
} }
@ -245,7 +250,7 @@ static void worker(
StringSet ss; StringSet ss;
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) { for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
std::string name(state.symbols[i->name]); std::string name(state.symbols[i->name]);
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) { if (name.find(' ') != std::string::npos) {
printError("skipping job with illegal name '%s'", name); printError("skipping job with illegal name '%s'", name);
continue; continue;
} }
@ -416,7 +421,11 @@ int main(int argc, char * * argv)
if (response.find("attrs") != response.end()) { if (response.find("attrs") != response.end()) {
for (auto & i : response["attrs"]) { for (auto & i : response["attrs"]) {
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i; std::string path = i;
if (path.find(".") != std::string::npos){
path = "\"" + path + "\"";
}
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
newAttrs.insert(s); newAttrs.insert(s);
} }
} }

View file

@ -366,6 +366,9 @@ struct Evaluator
printInfo("received jobset event"); printInfo("received jobset event");
} }
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) { } catch (std::exception & e) {
printError("exception in database monitor thread: %s", e.what()); printError("exception in database monitor thread: %s", e.what());
sleep(30); sleep(30);
@ -473,6 +476,9 @@ struct Evaluator
while (true) { while (true) {
try { try {
loop(); loop();
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) { } catch (std::exception & e) {
printError("exception in main loop: %s", e.what()); printError("exception in main loop: %s", e.what());
sleep(30); sleep(30);

View file

@ -6,10 +6,12 @@
#include <fcntl.h> #include <fcntl.h>
#include "build-result.hh" #include "build-result.hh"
#include "path.hh"
#include "serve-protocol.hh" #include "serve-protocol.hh"
#include "state.hh" #include "state.hh"
#include "util.hh" #include "util.hh"
#include "worker-protocol.hh" #include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "finally.hh" #include "finally.hh"
#include "url.hh" #include "url.hh"
@ -112,18 +114,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
StorePathSet closure; StorePathSet closure;
destStore.computeFSClosure(paths, closure); destStore.computeFSClosure(paths, closure);
WorkerProto::WriteConn wconn { .to = to };
WorkerProto::ReadConn rconn { .from = from };
/* Send the "query valid paths" command with the "lock" option /* Send the "query valid paths" command with the "lock" option
enabled. This prevents a race where the remote host enabled. This prevents a race where the remote host
garbage-collect paths that are already there. Optionally, ask garbage-collect paths that are already there. Optionally, ask
the remote host to substitute missing paths. */ the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log // FIXME: substitute output pollutes our build log
to << cmdQueryValidPaths << 1 << useSubstitutes; to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes;
worker_proto::write(destStore, to, closure); WorkerProto::write(destStore, wconn, closure);
to.flush(); to.flush();
/* Get back the set of paths that are already valid on the remote /* Get back the set of paths that are already valid on the remote
host. */ host. */
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {}); auto present = WorkerProto::Serialise<StorePathSet>::read(destStore, rconn);
if (present.size() == closure.size()) return; if (present.size() == closure.size()) return;
@ -138,7 +142,7 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
std::unique_lock<std::timed_mutex> sendLock(sendMutex, std::unique_lock<std::timed_mutex> sendLock(sendMutex,
std::chrono::seconds(600)); std::chrono::seconds(600));
to << cmdImportPaths; to << ServeProto::Command::ImportPaths;
destStore.exportPaths(missing, to); destStore.exportPaths(missing, to);
to.flush(); to.flush();
@ -179,7 +183,7 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath) std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
{ {
std::string base(drvPath.to_string()); std::string base(drvPath.to_string());
auto logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2); auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
createDirs(dirOf(logFile)); createDirs(dirOf(logFile));
@ -231,7 +235,7 @@ BasicDerivation sendInputs(
a no-op for regular stores, but for the binary cache store, a no-op for regular stores, but for the binary cache store,
this will copy the inputs to the binary cache from the local this will copy the inputs to the binary cache from the local
store. */ store. */
if (localStore != destStore) { if (&localStore != &destStore) {
copyClosure(localStore, destStore, copyClosure(localStore, destStore,
step.drv->inputSrcs, step.drv->inputSrcs,
NoRepair, NoCheckSigs, NoSubstitute); NoRepair, NoCheckSigs, NoSubstitute);
@ -276,14 +280,15 @@ BuildResult performBuild(
BuildResult result; BuildResult result;
conn.to << cmdBuildDerivation << localStore.printStorePath(drvPath); conn.to << ServeProto::Command::BuildDerivation << localStore.printStorePath(drvPath);
writeDerivation(conn.to, localStore, drv); writeDerivation(conn.to, localStore, drv);
conn.to << options.maxSilentTime << options.buildTimeout; conn.to << options.maxSilentTime << options.buildTimeout;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2) if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2)
conn.to << options.maxLogSize; conn.to << options.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) { if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) {
conn.to << options.repeats // == build-repeat conn.to
<< options.enforceDeterminism; << options.repeats // == build-repeat
<< options.enforceDeterminism;
} }
conn.to.flush(); conn.to.flush();
@ -310,7 +315,7 @@ BuildResult performBuild(
} }
} }
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) { if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) {
result.builtOutputs = worker_proto::read(localStore, conn.from, Phantom<DrvOutputs> {}); WorkerProto::Serialise<DrvOutputs>::read(localStore, conn);
} }
return result; return result;
@ -326,18 +331,18 @@ std::map<StorePath, ValidPathInfo> queryPathInfos(
/* Get info about each output path. */ /* Get info about each output path. */
std::map<StorePath, ValidPathInfo> infos; std::map<StorePath, ValidPathInfo> infos;
conn.to << cmdQueryPathInfos; conn.to << ServeProto::Command::QueryPathInfos;
worker_proto::write(localStore, conn.to, outputs); WorkerProto::write(localStore, conn, outputs);
conn.to.flush(); conn.to.flush();
while (true) { while (true) {
auto storePathS = readString(conn.from); auto storePathS = readString(conn.from);
if (storePathS == "") break; if (storePathS == "") break;
auto deriver = readString(conn.from); // deriver auto deriver = readString(conn.from); // deriver
auto references = worker_proto::read(localStore, conn.from, Phantom<StorePathSet> {}); auto references = WorkerProto::Serialise<StorePathSet>::read(localStore, conn);
readLongLong(conn.from); // download size readLongLong(conn.from); // download size
auto narSize = readLongLong(conn.from); auto narSize = readLongLong(conn.from);
auto narHash = Hash::parseAny(readString(conn.from), htSHA256); auto narHash = Hash::parseAny(readString(conn.from), htSHA256);
auto ca = parseContentAddressOpt(readString(conn.from)); auto ca = ContentAddress::parseOpt(readString(conn.from));
readStrings<StringSet>(conn.from); // sigs readStrings<StringSet>(conn.from); // sigs
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash); ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
assert(outputs.count(info.path)); assert(outputs.count(info.path));
@ -374,7 +379,7 @@ void copyPathFromRemote(
lambda function only gets executed if someone tries to read lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather from source2, we will send the command from here rather
than outside the lambda. */ than outside the lambda. */
conn.to << cmdDumpStorePath << localStore.printStorePath(info.path); conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
conn.to.flush(); conn.to.flush();
TeeSource tee(conn.from, sink); TeeSource tee(conn.from, sink);

View file

@ -325,7 +325,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
pqxx::work txn(*conn); pqxx::work txn(*conn);
for (auto & b : direct) { for (auto & b : direct) {
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id); printInfo("marking build %1% as succeeded", b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached, markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime); result.startTime, result.stopTime);
} }
@ -453,7 +453,7 @@ void State::failStep(
/* Mark all builds that depend on this derivation as failed. */ /* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) { for (auto & build : indirect) {
if (build->finishedInDB) continue; if (build->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build->id); printError("marking build %1% as failed", build->id);
txn.exec_params0 txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0", ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id, build->id,

View file

@ -52,7 +52,7 @@ void State::dispatcher()
{ {
auto dispatcherWakeup_(dispatcherWakeup.lock()); auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) { if (!*dispatcherWakeup_) {
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") % debug("dispatcher sleeping for %1%s",
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count()); std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil); dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
} }
@ -60,7 +60,7 @@ void State::dispatcher()
} }
} catch (std::exception & e) { } catch (std::exception & e) {
printMsg(lvlError, format("dispatcher: %1%") % e.what()); printError("dispatcher: %s", e.what());
sleep(1); sleep(1);
} }
@ -80,8 +80,8 @@ system_time State::doDispatch()
jobset.second->pruneSteps(); jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed(); auto s2 = jobset.second->shareUsed();
if (s1 != s2) if (s1 != s2)
printMsg(lvlDebug, format("pruned scheduling window of %1%:%2% from %3% to %4%") debug("pruned scheduling window of %1%:%2% from %3% to %4%",
% jobset.first.first % jobset.first.second % s1 % s2); jobset.first.first, jobset.first.second, s1, s2);
} }
} }

View file

@ -8,6 +8,8 @@
#include <prometheus/exposer.h> #include <prometheus/exposer.h>
#include <nlohmann/json.hpp>
#include "state.hh" #include "state.hh"
#include "hydra-build-result.hh" #include "hydra-build-result.hh"
#include "store-api.hh" #include "store-api.hh"
@ -15,20 +17,11 @@
#include "globals.hh" #include "globals.hh"
#include "hydra-config.hh" #include "hydra-config.hh"
#include "json.hh"
#include "s3-binary-cache-store.hh" #include "s3-binary-cache-store.hh"
#include "shared.hh" #include "shared.hh"
using namespace nix; using namespace nix;
using nlohmann::json;
namespace nix {
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
}
std::string getEnvOrDie(const std::string & key) std::string getEnvOrDie(const std::string & key)
@ -168,9 +161,9 @@ void State::parseMachines(const std::string & contents)
same name. */ same name. */
auto i = oldMachines.find(machine->sshName); auto i = oldMachines.find(machine->sshName);
if (i == oldMachines.end()) if (i == oldMachines.end())
printMsg(lvlChatty, format("adding new machine %1%") % machine->sshName); printMsg(lvlChatty, "adding new machine %1%", machine->sshName);
else else
printMsg(lvlChatty, format("updating machine %1%") % machine->sshName); printMsg(lvlChatty, "updating machine %1%", machine->sshName);
machine->state = i == oldMachines.end() machine->state = i == oldMachines.end()
? std::make_shared<Machine::State>() ? std::make_shared<Machine::State>()
: i->second->state; : i->second->state;
@ -180,7 +173,7 @@ void State::parseMachines(const std::string & contents)
for (auto & m : oldMachines) for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) { if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled) if (m.second->enabled)
printMsg(lvlInfo, format("removing machine %1%") % m.first); printInfo("removing machine %1%", m.first);
/* Add a disabled Machine object to make sure stats are /* Add a disabled Machine object to make sure stats are
maintained. */ maintained. */
auto machine = std::make_shared<Machine>(*(m.second)); auto machine = std::make_shared<Machine>(*(m.second));
@ -542,181 +535,168 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
void State::dumpStatus(Connection & conn) void State::dumpStatus(Connection & conn)
{ {
std::ostringstream out; time_t now = time(0);
json statusJson = {
{"status", "up"},
{"time", time(0)},
{"uptime", now - startedAt},
{"pid", getpid()},
{"nrQueuedBuilds", builds.lock()->size()},
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
{"nrStepsWaiting", nrStepsWaiting.load()},
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
{"bytesSent", bytesSent.load()},
{"bytesReceived", bytesReceived.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
{"nrBuildsDone", nrBuildsDone.load()},
{"nrStepsStarted", nrStepsStarted.load()},
{"nrStepsDone", nrStepsDone.load()},
{"nrRetries", nrRetries.load()},
{"maxNrRetries", maxNrRetries.load()},
{"nrQueueWakeups", nrQueueWakeups.load()},
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
{"dispatchTimeMs", dispatchTimeMs.load()},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
{"nrDbConnections", dbPool.count()},
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
};
{ {
JSONObject root(out);
time_t now = time(0);
root.attr("status", "up");
root.attr("time", time(0));
root.attr("uptime", now - startedAt);
root.attr("pid", getpid());
{
auto builds_(builds.lock());
root.attr("nrQueuedBuilds", builds_->size());
}
{ {
auto steps_(steps.lock()); auto steps_(steps.lock());
for (auto i = steps_->begin(); i != steps_->end(); ) for (auto i = steps_->begin(); i != steps_->end(); )
if (i->second.lock()) ++i; else i = steps_->erase(i); if (i->second.lock()) ++i; else i = steps_->erase(i);
root.attr("nrUnfinishedSteps", steps_->size()); statusJson["nrUnfinishedSteps"] = steps_->size();
} }
{ {
auto runnable_(runnable.lock()); auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); ) for (auto i = runnable_->begin(); i != runnable_->end(); )
if (i->lock()) ++i; else i = runnable_->erase(i); if (i->lock()) ++i; else i = runnable_->erase(i);
root.attr("nrRunnableSteps", runnable_->size()); statusJson["nrRunnableSteps"] = runnable_->size();
} }
root.attr("nrActiveSteps", activeSteps_.lock()->size());
root.attr("nrStepsBuilding", nrStepsBuilding);
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
root.attr("nrStepsWaiting", nrStepsWaiting);
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
root.attr("bytesSent", bytesSent);
root.attr("bytesReceived", bytesReceived);
root.attr("nrBuildsRead", nrBuildsRead);
root.attr("buildReadTimeMs", buildReadTimeMs);
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
root.attr("nrBuildsDone", nrBuildsDone);
root.attr("nrStepsStarted", nrStepsStarted);
root.attr("nrStepsDone", nrStepsDone);
root.attr("nrRetries", nrRetries);
root.attr("maxNrRetries", maxNrRetries);
if (nrStepsDone) { if (nrStepsDone) {
root.attr("totalStepTime", totalStepTime); statusJson["totalStepTime"] = totalStepTime.load();
root.attr("totalStepBuildTime", totalStepBuildTime); statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone); statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone); statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
} }
root.attr("nrQueueWakeups", nrQueueWakeups);
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
root.attr("dispatchTimeMs", dispatchTimeMs);
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
root.attr("nrDbConnections", dbPool.count());
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
{ {
auto nested = root.object("machines");
auto machines_(machines.lock()); auto machines_(machines.lock());
for (auto & i : *machines_) { for (auto & i : *machines_) {
auto & m(i.second); auto & m(i.second);
auto & s(m->state); auto & s(m->state);
auto nested2 = nested.object(m->sshName);
nested2.attr("enabled", m->enabled);
{
auto list = nested2.list("systemTypes");
for (auto & s : m->systemTypes)
list.elem(s);
}
{
auto list = nested2.list("supportedFeatures");
for (auto & s : m->supportedFeatures)
list.elem(s);
}
{
auto list = nested2.list("mandatoryFeatures");
for (auto & s : m->mandatoryFeatures)
list.elem(s);
}
nested2.attr("currentJobs", s->currentJobs);
if (s->currentJobs == 0)
nested2.attr("idleSince", s->idleSince);
nested2.attr("nrStepsDone", s->nrStepsDone);
if (m->state->nrStepsDone) {
nested2.attr("totalStepTime", s->totalStepTime);
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
}
auto info(m->state->connectInfo.lock()); auto info(m->state->connectInfo.lock());
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
nested2.attr("consecutiveFailures", info->consecutiveFailures);
json machine = {
{"enabled", m->enabled},
{"systemTypes", m->systemTypes},
{"supportedFeatures", m->supportedFeatures},
{"mandatoryFeatures", m->mandatoryFeatures},
{"nrStepsDone", s->nrStepsDone.load()},
{"currentJobs", s->currentJobs.load()},
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
{"consecutiveFailures", info->consecutiveFailures},
};
if (s->currentJobs == 0)
machine["idleSince"] = s->idleSince.load();
if (m->state->nrStepsDone) {
machine["totalStepTime"] = s->totalStepTime.load();
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
}
statusJson["machines"][m->sshName] = machine;
} }
} }
{ {
auto nested = root.object("jobsets"); auto jobsets_json = json::object();
auto jobsets_(jobsets.lock()); auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) { for (auto & jobset : *jobsets_) {
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second); jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
nested2.attr("shareUsed", jobset.second->shareUsed()); {"shareUsed", jobset.second->shareUsed()},
nested2.attr("seconds", jobset.second->getSeconds()); {"seconds", jobset.second->getSeconds()},
};
} }
statusJson["jobsets"] = jobsets_json;
} }
{ {
auto nested = root.object("machineTypes"); auto machineTypesJson = json::object();
auto machineTypes_(machineTypes.lock()); auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) { for (auto & i : *machineTypes_) {
auto nested2 = nested.object(i.first); auto machineTypeJson = machineTypesJson[i.first] = {
nested2.attr("runnable", i.second.runnable); {"runnable", i.second.runnable},
nested2.attr("running", i.second.running); {"running", i.second.running},
};
if (i.second.runnable > 0) if (i.second.runnable > 0)
nested2.attr("waitTime", i.second.waitTime.count() + machineTypeJson["waitTime"] = i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck)); i.second.runnable * (time(0) - lastDispatcherCheck);
if (i.second.running == 0) if (i.second.running == 0)
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive)); machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
} }
statusJson["machineTypes"] = machineTypesJson;
} }
auto store = getDestStore(); auto store = getDestStore();
auto nested = root.object("store");
auto & stats = store->getStats(); auto & stats = store->getStats();
nested.attr("narInfoRead", stats.narInfoRead); statusJson["store"] = {
nested.attr("narInfoReadAverted", stats.narInfoReadAverted); {"narInfoRead", stats.narInfoRead.load()},
nested.attr("narInfoMissing", stats.narInfoMissing); {"narInfoReadAverted", stats.narInfoReadAverted.load()},
nested.attr("narInfoWrite", stats.narInfoWrite); {"narInfoMissing", stats.narInfoMissing.load()},
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize); {"narInfoWrite", stats.narInfoWrite.load()},
nested.attr("narRead", stats.narRead); {"narInfoCacheSize", stats.pathInfoCacheSize.load()},
nested.attr("narReadBytes", stats.narReadBytes); {"narRead", stats.narRead.load()},
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); {"narReadBytes", stats.narReadBytes.load()},
nested.attr("narWrite", stats.narWrite); {"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
nested.attr("narWriteAverted", stats.narWriteAverted); {"narWrite", stats.narWrite.load()},
nested.attr("narWriteBytes", stats.narWriteBytes); {"narWriteAverted", stats.narWriteAverted.load()},
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); {"narWriteBytes", stats.narWriteBytes.load()},
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); {"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
nested.attr("narCompressionSavings", {"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
stats.narWriteBytes {"narCompressionSavings",
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes stats.narWriteBytes
: 0.0); ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
nested.attr("narCompressionSpeed", // MiB/s : 0.0},
{"narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0); : 0.0},
};
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store); auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) { if (s3Store) {
auto nested2 = nested.object("s3");
auto & s3Stats = s3Store->getS3Stats(); auto & s3Stats = s3Store->getS3Stats();
nested2.attr("put", s3Stats.put); auto jsonS3 = statusJson["s3"] = {
nested2.attr("putBytes", s3Stats.putBytes); {"put", s3Stats.put.load()},
nested2.attr("putTimeMs", s3Stats.putTimeMs); {"putBytes", s3Stats.putBytes.load()},
nested2.attr("putSpeed", {"putTimeMs", s3Stats.putTimeMs.load()},
s3Stats.putTimeMs {"putSpeed",
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) s3Stats.putTimeMs
: 0.0); ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
nested2.attr("get", s3Stats.get); : 0.0},
nested2.attr("getBytes", s3Stats.getBytes); {"get", s3Stats.get.load()},
nested2.attr("getTimeMs", s3Stats.getTimeMs); {"getBytes", s3Stats.getBytes.load()},
nested2.attr("getSpeed", {"getTimeMs", s3Stats.getTimeMs.load()},
s3Stats.getTimeMs {"getSpeed",
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) s3Stats.getTimeMs
: 0.0); ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
nested2.attr("head", s3Stats.head); : 0.0},
nested2.attr("costDollarApprox", {"head", s3Stats.head.load()},
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004 {"costDollarApprox",
+ s3Stats.put / 1000.0 * 0.005 + (s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); + s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
} }
} }
@ -725,7 +705,7 @@ void State::dumpStatus(Connection & conn)
pqxx::work txn(conn); pqxx::work txn(conn);
// FIXME: use PostgreSQL 9.5 upsert. // FIXME: use PostgreSQL 9.5 upsert.
txn.exec("delete from SystemStatus where what = 'queue-runner'"); txn.exec("delete from SystemStatus where what = 'queue-runner'");
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str()); txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
txn.exec("notify status_dumped"); txn.exec("notify status_dumped");
txn.commit(); txn.commit();
} }
@ -950,7 +930,6 @@ int main(int argc, char * * argv)
}); });
settings.verboseBuild = true; settings.verboseBuild = true;
settings.lockCPU = false;
State state{metricsAddrOpt}; State state{metricsAddrOpt};
if (status) if (status)

View file

@ -13,7 +13,7 @@ void State::queueMonitor()
try { try {
queueMonitorLoop(); queueMonitorLoop();
} catch (std::exception & e) { } catch (std::exception & e) {
printMsg(lvlError, format("queue monitor: %1%") % e.what()); printError("queue monitor: %s", e.what());
sleep(10); // probably a DB problem, so don't retry right away sleep(10); // probably a DB problem, so don't retry right away
} }
} }
@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) { createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment(); prom.queue_build_loads.Increment();
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
nrAdded++; nrAdded++;
newBuildsByID.erase(build->id); newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) { if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */ /* Derivation has been GC'ed prematurely. */
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id); printError("aborting GC'ed build %1%", build->id);
if (!build->finishedInDB) { if (!build->finishedInDB) {
auto mc = startDbUpdate(); auto mc = startDbUpdate();
pqxx::work txn(conn); pqxx::work txn(conn);
@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to runnable and wake up /* Add the new runnable build steps to runnable and wake up
the builder threads. */ the builder threads. */
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded); printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
for (auto & r : newRunnable) for (auto & r : newRunnable)
makeRunnable(r); makeRunnable(r);
@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) { for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first); auto b = currentIds.find(i->first);
if (b == currentIds.end()) { if (b == currentIds.end()) {
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first); printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i); i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here. // FIXME: ideally we would interrupt active build steps here.
continue; continue;
} }
if (i->second->globalPriority < b->second) { if (i->second->globalPriority < b->second) {
printMsg(lvlInfo, format("priority of build %1% increased") % i->first); printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second; i->second->globalPriority = b->second;
i->second->propagatePriorities(); i->second->propagatePriorities();
} }
@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
if (r.empty()) continue; if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>(); BuildID id = r[0][0].as<BuildID>();
printMsg(lvlInfo, format("reusing build %d") % id); printInfo("reusing build %d", id);
BuildOutput res; BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput; res.failed = r[0][1].as<int>() == bsFailedWithOutput;

View file

@ -21,6 +21,7 @@
#include "store-api.hh" #include "store-api.hh"
#include "sync.hh" #include "sync.hh"
#include "nar-extractor.hh" #include "nar-extractor.hh"
#include "worker-protocol.hh"
typedef unsigned int BuildID; typedef unsigned int BuildID;
@ -308,6 +309,16 @@ struct Machine
// Backpointer to the machine // Backpointer to the machine
ptr machine; ptr machine;
operator nix::WorkerProto::ReadConn ()
{
return { .from = from };
}
operator nix::WorkerProto::WriteConn ()
{
return { .to = to };
}
}; };
}; };

View file

@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
} elsif ($type eq "git") { } elsif ($type eq "git") {
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri); my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
die if ! -d $clonePath; die if ! -d $clonePath;
$diff .= `(cd $clonePath; git log $rev1..$rev2)`; $diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`; $diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
} }
$c->stash->{'plain'} = { data => (scalar $diff) || " " }; $c->stash->{'plain'} = { data => (scalar $diff) || " " };

View file

@ -238,9 +238,17 @@ sub serveFile {
"store", "cat", "--store", getStoreUri(), "$path"]) }; "store", "cat", "--store", getStoreUri(), "$path"]) };
# Detect MIME type. # Detect MIME type.
state $magic = File::LibMagic->new(follow_symlinks => 1); my $type = "text/plain";
my $info = $magic->info_from_filename($path); if ($path =~ /.*\.(\S{1,})$/xms) {
my $type = $info->{mime_with_encoding}; my $ext = $1;
my $mimeTypes = MIME::Types->new(only_complete => 1);
my $t = $mimeTypes->mimeTypeOf($ext);
$type = ref $t ? $t->type : $t if $t;
} else {
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
$type = $info->{mime_with_encoding};
}
$c->response->content_type($type); $c->response->content_type($type);
$c->forward('Hydra::View::Plain'); $c->forward('Hydra::View::Plain');
} }

View file

@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
, "jobset.enabled" => 1 , "jobset.enabled" => 1
}, },
{ order_by => ["project", "jobset", "job"] { order_by => ["project", "jobset", "job"]
, join => ["project", "jobset"] , join => {"jobset" => "project"}
})]; })];
} }

View file

@ -216,7 +216,7 @@ sub json_hint {
sub _authenticator() { sub _authenticator() {
my $authenticator = Crypt::Passphrase->new( my $authenticator = Crypt::Passphrase->new(
encoder => 'Argon2', encoder => { module => 'Argon2', output_size => 16 },
validators => [ validators => [
(sub { (sub {
my ($password, $hash) = @_; my ($password, $hash) = @_;

View file

@ -82,7 +82,7 @@
function onGoogleSignIn(googleUser) { function onGoogleSignIn(googleUser) {
requestJSON({ requestJSON({
url: "[% c.uri_for('/google-login') %]", url: "[% c.uri_for('/google-login') %]",
data: "id_token=" + googleUser.getAuthResponse().id_token, data: "id_token=" + googleUser.credential,
type: 'POST', type: 'POST',
success: function(data) { success: function(data) {
window.location.reload(); window.location.reload();
@ -91,9 +91,6 @@
return false; return false;
}; };
$("#google-signin").click(function() {
$(".g-signin2:first-child > div").click();
});
</script> </script>
[% END %] [% END %]

View file

@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %] [% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
[% IF bi1.type == "git" %] [% IF bi1.type == "git" %]
<tr><td> <tr><td>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt> <b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
</td></tr> </td></tr>
[% ELSE %] [% ELSE %]
<tr><td> <tr><td>

View file

@ -133,8 +133,10 @@
[% ELSE %] [% ELSE %]
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %] [% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
[% IF c.config.enable_google_login %] [% IF c.config.enable_google_login %]
<div style="display: none" class="g-signin2" data-onsuccess="onGoogleSignIn" data-theme="dark"></div> <script src="https://accounts.google.com/gsi/client" async defer></script>
<a class="dropdown-item" href="#" id="google-signin">Sign in with Google</a> <div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-callback="onGoogleSignIn">
</div>
<div class="g_id_signin" data-type="standard"></div>
<div class="dropdown-divider"></div> <div class="dropdown-divider"></div>
[% END %] [% END %]
[% IF c.config.github_client_id %] [% IF c.config.github_client_id %]

3
src/sql/upgrade-83.sql Normal file
View file

@ -0,0 +1,3 @@
-- This index was introduced in a migration but was never recorded in
-- hydra.sql (the source of truth), which is why `if exists` is required.
drop index if exists IndexBuildOutputsOnPath;

View file

@ -0,0 +1,30 @@
use strict;
use warnings;
use Setup;
my $ctx = test_context();
use HTTP::Request::Common;
use Test2::V0;
use Catalyst::Test ();
Catalyst::Test->import('Hydra');
require Hydra::Schema;
require Hydra::Model::DB;
my $db = $ctx->db();
my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' });
$user->setPassword('foobar');
my $builds = $ctx->makeAndEvaluateJobset(
expression => "basic.nix",
build => 1
);
my $login = request(POST '/login', Referer => 'http://localhost', Content => {
username => 'alice',
password => 'foobar',
});
is($login->code, 302);
my $cookie = $login->header("set-cookie");
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
ok($my_jobs->is_success);
my $content = $my_jobs->content();
ok($content =~ /empty_dir/);
ok(!($content =~ /fails/));
ok(!($content =~ /succeed_with_failed/));
done_testing;

View file

@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_succeed(), "The process did succeed."); ok($runlog->did_succeed(), "The process did succeed.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved."); is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, 0, "This command should have succeeded."); is($runlog->exit_code, 0, "This command should have succeeded.");
subtest "Validate the run log file exists" => sub { subtest "Validate the run log file exists" => sub {

View file

@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved."); is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, undef, "This command should not have executed."); is($runlog->exit_code, undef, "This command should not have executed.");
is($runlog->error_number, 2, "This command failed to exec."); is($runlog->error_number, 2, "This command failed to exec.");
}; };

View file

@ -55,7 +55,7 @@ subtest "Starting a process" => sub {
ok($runlog->is_running(), "The process is running."); ok($runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, undef, "The end time is undefined."); is($runlog->end_time, undef, "The end time is undefined.");
is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined."); is($runlog->signal, undef, "The signal is undefined.");
@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub {
ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined"); is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 0, "The exit code is 0."); is($runlog->exit_code, 0, "The exit code is 0.");
is($runlog->signal, undef, "The signal is undefined."); is($runlog->signal, undef, "The signal is undefined.");
@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub {
ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined"); is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 85, "The exit code is 85."); is($runlog->exit_code, 85, "The exit code is 85.");
is($runlog->signal, undef, "The signal is undefined."); is($runlog->signal, undef, "The signal is undefined.");
@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub {
ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined"); is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 15, "Signal 15 was sent."); is($runlog->signal, 15, "Signal 15 was sent.");
@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub {
ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined"); is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 9, "The signal is 9."); is($runlog->signal, 9, "The signal is 9.");
@ -134,8 +134,8 @@ subtest "The process failed to start" => sub {
ok(!$runlog->is_running(), "The process is running."); ok(!$runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, 2, "The error number is saved"); is($runlog->error_number, 2, "The error number is saved");
is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined."); is($runlog->signal, undef, "The signal is undefined.");

View file

@ -25,11 +25,11 @@ subtest "requeue" => sub {
$task->requeue(); $task->requeue();
is($task->attempts, 2, "We should have stored a second retry"); is($task->attempts, 2, "We should have stored a second retry");
is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step"); is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step");
$task->requeue(); $task->requeue();
is($task->attempts, 3, "We should have stored a third retry"); is($task->attempts, 3, "We should have stored a third retry");
is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step"); is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step");
}; };
done_testing; done_testing;

View file

@ -101,7 +101,7 @@ subtest "save_task" => sub {
is($retry->pluginname, "FooPluginName", "Plugin name should match"); is($retry->pluginname, "FooPluginName", "Plugin name should match");
is($retry->payload, "1", "Payload should match"); is($retry->payload, "1", "Payload should match");
is($retry->attempts, 1, "We've had one attempt"); is($retry->attempts, 1, "We've had one attempt");
is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away"); is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away");
}; };
done_testing; done_testing;

View file

@ -4,6 +4,8 @@ with import ./config.nix;
mkDerivation { mkDerivation {
name = "empty-dir"; name = "empty-dir";
builder = ./empty-dir-builder.sh; builder = ./empty-dir-builder.sh;
meta.maintainers = [ "alice@invalid.org" ];
meta.outPath = "${placeholder "out"}";
}; };
fails = fails =