Merge remote-tracking branch 'upstream/master' into split-buildRemote

This commit is contained in:
John Ericson 2023-11-30 11:27:40 -05:00
commit 831a2d9bd5
30 changed files with 380 additions and 273 deletions

View file

@ -4,7 +4,7 @@ on:
push:
jobs:
tests:
runs-on: ubuntu-18.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:

View file

@ -10,8 +10,6 @@ AC_PROG_LN_S
AC_PROG_LIBTOOL
AC_PROG_CXX
CXXFLAGS+=" -std=c++17"
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_ARG_WITH([docbook-xsl],

View file

@ -131,8 +131,8 @@ use LDAP to manage roles and users.
This is configured by defining the `<ldap>` block in the configuration file.
In this block it's possible to configure the authentication plugin in the
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
The documentation for the available settings can be found [here]
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
The documentation for the available settings can be found
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
Note that the bind password (if needed) should be supplied as an included file to
prevent it from leaking to the Nix store.
@ -179,6 +179,7 @@ Example configuration:
<role_search_options>
deref = always
</role_search_options>
</store>
</config>
<role_mapping>
# Make all users in the hydra_admin group Hydra admins

View file

@ -1,5 +1,21 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"lowdown-src": {
"flake": false,
"locked": {
@ -18,37 +34,40 @@
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs",
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1661606874,
"narHash": "sha256-9+rpYzI+SmxJn+EbYxjGv68Ucp22bdFUSy/4LkHkkDQ=",
"lastModified": 1690219894,
"narHash": "sha256-QMYAkdtU+g9HlZKtoJ+AI6TbWzzovKGnPZJHfZdclc8=",
"owner": "NixOS",
"repo": "nix",
"rev": "11e45768b34fdafdcf019ddbd337afa16127ff0f",
"rev": "a212300a1d9f9c7b0daf19c00c87fc50480f54f4",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "2.11.0",
"ref": "2.17.0",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1657693803,
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=",
"lastModified": 1687379288,
"narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2",
"rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.05-small",
"ref": "nixos-23.05",
"repo": "nixpkgs",
"type": "github"
}
@ -72,10 +91,7 @@
"root": {
"inputs": {
"nix": "nix",
"nixpkgs": [
"nix",
"nixpkgs"
]
"nixpkgs": "nixpkgs"
}
}
},

View file

@ -1,18 +1,21 @@
{
description = "A Nix-based continuous build system";
inputs.nixpkgs.follows = "nix/nixpkgs";
inputs.nix.url = "github:NixOS/nix/2.11.0";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05";
inputs.nix.url = "github:NixOS/nix/2.17.0";
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
outputs = { self, nixpkgs, nix }:
let
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
pkgs = import nixpkgs {
system = "x86_64-linux";
systems = [ "x86_64-linux" "aarch64-linux" ];
forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsBySystem = forEachSystem (system: import nixpkgs {
inherit system;
overlays = [ self.overlays.default nix.overlays.default ];
};
});
# NixOS configuration used for VM tests.
hydraServer =
@ -254,9 +257,10 @@
hydraJobs = {
build.x86_64-linux = packages.x86_64-linux.hydra;
build = forEachSystem (system: packages.${system}.hydra);
manual =
manual = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "hydra-manual-${version}" { }
''
mkdir -p $out/share
@ -264,11 +268,12 @@
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
'';
'');
tests.install.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.install = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-install";
nodes.machine = hydraServer;
testScript =
''
@ -276,14 +281,16 @@
machine.wait_for_job("hydra-server")
machine.wait_for_job("hydra-evaluator")
machine.wait_for_job("hydra-queue-runner")
machine.wait_for_open_port("3000")
machine.wait_for_open_port(3000)
machine.succeed("curl --fail http://localhost:3000/")
'';
};
});
tests.notifications.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.notifications = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-notifications";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
@ -311,7 +318,7 @@
# Wait until InfluxDB can receive web requests
machine.wait_for_job("influxdb")
machine.wait_for_open_port("8086")
machine.wait_for_open_port(8086)
# Create an InfluxDB database where hydra will write to
machine.succeed(
@ -321,7 +328,7 @@
# Wait until hydra-server can receive HTTP requests
machine.wait_for_job("hydra-server")
machine.wait_for_open_port("3000")
machine.wait_for_open_port(3000)
# Setup the project and jobset
machine.succeed(
@ -336,11 +343,13 @@
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
)
'';
};
});
tests.gitea.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.gitea = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest {
name = "hydra-gitea";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
@ -352,7 +361,7 @@
distributedBuilds = true;
buildMachines = [{
hostName = "localhost";
systems = [ "x86_64-linux" ];
systems = [ system ];
}];
binaryCaches = [ ];
};
@ -467,7 +476,7 @@
smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation {
name = "trivial";
system = "x86_64-linux";
system = "${system}";
builder = "/bin/sh";
allowSubstitutes = false;
preferLocalBuild = true;
@ -531,31 +540,37 @@
machine.shutdown()
'';
};
});
tests.validate-openapi = pkgs.runCommand "validate-openapi"
tests.validate-openapi = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "validate-openapi"
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
''
openapi-generator-cli validate -i ${./hydra-api.yaml}
touch $out
'';
'');
container = nixosConfigurations.container.config.system.build.toplevel;
};
checks.x86_64-linux.build = hydraJobs.build.x86_64-linux;
checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux;
checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi;
checks = forEachSystem (system: {
build = hydraJobs.build.${system};
install = hydraJobs.tests.install.${system};
validate-openapi = hydraJobs.tests.validate-openapi.${system};
});
packages.x86_64-linux.hydra = pkgs.hydra;
packages.x86_64-linux.default = pkgs.hydra;
packages = forEachSystem (system: {
hydra = pkgsBySystem.${system}.hydra;
default = pkgsBySystem.${system}.hydra;
});
nixosModules.hydra = {
imports = [ ./hydra-module.nix ];
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
};
nixosModules.hydraTest = {
nixosModules.hydraTest = { pkgs, ... }: {
imports = [ self.nixosModules.hydra ];
services.hydra-dev.enable = true;

View file

@ -533,13 +533,13 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/eval/{build-id}:
/eval/{eval-id}:
get:
summary: Retrieves evaluations identified by build id
summary: Retrieves evaluations identified by eval id
parameters:
- name: build-id
- name: eval-id
in: path
description: build identifier
description: eval identifier
required: true
schema:
type: integer
@ -551,6 +551,24 @@ paths:
schema:
$ref: '#/components/schemas/JobsetEval'
/eval/{eval-id}/builds:
get:
summary: Retrieves all builds belonging to an evaluation identified by eval id
parameters:
- name: eval-id
in: path
description: eval identifier
required: true
schema:
type: integer
responses:
'200':
description: builds
content:
application/json:
schema:
$ref: '#/components/schemas/JobsetEvalBuilds'
components:
schemas:
@ -796,6 +814,13 @@ components:
additionalProperties:
$ref: '#/components/schemas/JobsetEvalInput'
JobsetEvalBuilds:
type: array
items:
type: object
additionalProperties:
$ref: '#/components/schemas/Build'
JobsetOverview:
type: array
items:
@ -870,7 +895,7 @@ components:
description: Size of the produced file
type: integer
defaultpath:
description: This is a Git/Mercurial commit hash or a Subversion revision number
description: if path is a directory, the default file relative to path to be served
type: string
'type':
description: Types of build product (user defined)

View file

@ -340,7 +340,7 @@ in
systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ];
environment = env // {

View file

@ -25,7 +25,8 @@
#include <nlohmann/json.hpp>
void check_pid_status_nonblocking(pid_t check_pid) {
void check_pid_status_nonblocking(pid_t check_pid)
{
// Only check 'initialized' and known PID's
if (check_pid <= 0) { return; }
@ -100,7 +101,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
else if (v.type() == nAttrs) {
auto a = v.attrs->find(state.symbols.create(subAttribute));
if (a != v.attrs->end())
res.push_back(std::string(state.forceString(*a->value)));
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
}
};
@ -129,7 +130,7 @@ static void worker(
LockFlags {
.updateLockFile = false,
.useRegistries = false,
.allowMutable = false,
.allowUnlocked = false,
});
callFlake(state, lockedFlake, *vFlake);
@ -197,26 +198,30 @@ static void worker(
/* If this is an aggregate, then get its constituents. */
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*a->value, a->pos)) {
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a)
throw EvalError("derivation must have a constituents attribute");
NixStringContext context;
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
for (auto & c : context)
std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
job["constituents"].push_back(state.store->printStorePath(b.drvPath));
},
[&](const NixStringContextElem::Opaque & o) {
},
[&](const NixStringContextElem::DrvDeep & d) {
},
}, c.raw());
PathSet context;
state.coerceToString(a->pos, *a->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
job["constituents"].push_back(std::string(i, index + 1));
}
state.forceList(*a->value, a->pos);
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
auto v = a->value->listElems()[n];
state.forceValue(*v, noPos);
if (v->type() == nString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
job["namedConstituents"].push_back(v->str());
}
}
@ -245,7 +250,7 @@ static void worker(
StringSet ss;
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
std::string name(state.symbols[i->name]);
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
if (name.find(' ') != std::string::npos) {
printError("skipping job with illegal name '%s'", name);
continue;
}
@ -416,7 +421,11 @@ int main(int argc, char * * argv)
if (response.find("attrs") != response.end()) {
for (auto & i : response["attrs"]) {
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
std::string path = i;
if (path.find(".") != std::string::npos){
path = "\"" + path + "\"";
}
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
newAttrs.insert(s);
}
}

View file

@ -366,6 +366,9 @@ struct Evaluator
printInfo("received jobset event");
}
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in database monitor thread: %s", e.what());
sleep(30);
@ -473,6 +476,9 @@ struct Evaluator
while (true) {
try {
loop();
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in main loop: %s", e.what());
sleep(30);

View file

@ -6,10 +6,12 @@
#include <fcntl.h>
#include "build-result.hh"
#include "path.hh"
#include "serve-protocol.hh"
#include "state.hh"
#include "util.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
#include "finally.hh"
#include "url.hh"
@ -112,18 +114,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
StorePathSet closure;
destStore.computeFSClosure(paths, closure);
WorkerProto::WriteConn wconn { .to = to };
WorkerProto::ReadConn rconn { .from = from };
/* Send the "query valid paths" command with the "lock" option
enabled. This prevents a race where the remote host
garbage-collect paths that are already there. Optionally, ask
the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log
to << cmdQueryValidPaths << 1 << useSubstitutes;
worker_proto::write(destStore, to, closure);
to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes;
WorkerProto::write(destStore, wconn, closure);
to.flush();
/* Get back the set of paths that are already valid on the remote
host. */
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
auto present = WorkerProto::Serialise<StorePathSet>::read(destStore, rconn);
if (present.size() == closure.size()) return;
@ -138,7 +142,7 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
std::chrono::seconds(600));
to << cmdImportPaths;
to << ServeProto::Command::ImportPaths;
destStore.exportPaths(missing, to);
to.flush();
@ -179,7 +183,7 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
{
std::string base(drvPath.to_string());
auto logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
createDirs(dirOf(logFile));
@ -231,7 +235,7 @@ BasicDerivation sendInputs(
a no-op for regular stores, but for the binary cache store,
this will copy the inputs to the binary cache from the local
store. */
if (localStore != destStore) {
if (&localStore != &destStore) {
copyClosure(localStore, destStore,
step.drv->inputSrcs,
NoRepair, NoCheckSigs, NoSubstitute);
@ -276,14 +280,15 @@ BuildResult performBuild(
BuildResult result;
conn.to << cmdBuildDerivation << localStore.printStorePath(drvPath);
conn.to << ServeProto::Command::BuildDerivation << localStore.printStorePath(drvPath);
writeDerivation(conn.to, localStore, drv);
conn.to << options.maxSilentTime << options.buildTimeout;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2)
conn.to << options.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) {
conn.to << options.repeats // == build-repeat
<< options.enforceDeterminism;
conn.to
<< options.repeats // == build-repeat
<< options.enforceDeterminism;
}
conn.to.flush();
@ -310,7 +315,7 @@ BuildResult performBuild(
}
}
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 6) {
result.builtOutputs = worker_proto::read(localStore, conn.from, Phantom<DrvOutputs> {});
WorkerProto::Serialise<DrvOutputs>::read(localStore, conn);
}
return result;
@ -326,18 +331,18 @@ std::map<StorePath, ValidPathInfo> queryPathInfos(
/* Get info about each output path. */
std::map<StorePath, ValidPathInfo> infos;
conn.to << cmdQueryPathInfos;
worker_proto::write(localStore, conn.to, outputs);
conn.to << ServeProto::Command::QueryPathInfos;
WorkerProto::write(localStore, conn, outputs);
conn.to.flush();
while (true) {
auto storePathS = readString(conn.from);
if (storePathS == "") break;
auto deriver = readString(conn.from); // deriver
auto references = worker_proto::read(localStore, conn.from, Phantom<StorePathSet> {});
auto references = WorkerProto::Serialise<StorePathSet>::read(localStore, conn);
readLongLong(conn.from); // download size
auto narSize = readLongLong(conn.from);
auto narHash = Hash::parseAny(readString(conn.from), htSHA256);
auto ca = parseContentAddressOpt(readString(conn.from));
auto ca = ContentAddress::parseOpt(readString(conn.from));
readStrings<StringSet>(conn.from); // sigs
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
assert(outputs.count(info.path));
@ -374,7 +379,7 @@ void copyPathFromRemote(
lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather
than outside the lambda. */
conn.to << cmdDumpStorePath << localStore.printStorePath(info.path);
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
conn.to.flush();
TeeSource tee(conn.from, sink);

View file

@ -325,7 +325,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
pqxx::work txn(*conn);
for (auto & b : direct) {
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
printInfo("marking build %1% as succeeded", b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime);
}
@ -453,7 +453,7 @@ void State::failStep(
/* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) {
if (build->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build->id);
printError("marking build %1% as failed", build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id,

View file

@ -52,7 +52,7 @@ void State::dispatcher()
{
auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) {
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
debug("dispatcher sleeping for %1%s",
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
}
@ -60,7 +60,7 @@ void State::dispatcher()
}
} catch (std::exception & e) {
printMsg(lvlError, format("dispatcher: %1%") % e.what());
printError("dispatcher: %s", e.what());
sleep(1);
}
@ -80,8 +80,8 @@ system_time State::doDispatch()
jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed();
if (s1 != s2)
printMsg(lvlDebug, format("pruned scheduling window of %1%:%2% from %3% to %4%")
% jobset.first.first % jobset.first.second % s1 % s2);
debug("pruned scheduling window of %1%:%2% from %3% to %4%",
jobset.first.first, jobset.first.second, s1, s2);
}
}

View file

@ -8,6 +8,8 @@
#include <prometheus/exposer.h>
#include <nlohmann/json.hpp>
#include "state.hh"
#include "hydra-build-result.hh"
#include "store-api.hh"
@ -15,20 +17,11 @@
#include "globals.hh"
#include "hydra-config.hh"
#include "json.hh"
#include "s3-binary-cache-store.hh"
#include "shared.hh"
using namespace nix;
namespace nix {
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
}
using nlohmann::json;
std::string getEnvOrDie(const std::string & key)
@ -168,9 +161,9 @@ void State::parseMachines(const std::string & contents)
same name. */
auto i = oldMachines.find(machine->sshName);
if (i == oldMachines.end())
printMsg(lvlChatty, format("adding new machine %1%") % machine->sshName);
printMsg(lvlChatty, "adding new machine %1%", machine->sshName);
else
printMsg(lvlChatty, format("updating machine %1%") % machine->sshName);
printMsg(lvlChatty, "updating machine %1%", machine->sshName);
machine->state = i == oldMachines.end()
? std::make_shared<Machine::State>()
: i->second->state;
@ -180,7 +173,7 @@ void State::parseMachines(const std::string & contents)
for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled)
printMsg(lvlInfo, format("removing machine %1%") % m.first);
printInfo("removing machine %1%", m.first);
/* Add a disabled Machine object to make sure stats are
maintained. */
auto machine = std::make_shared<Machine>(*(m.second));
@ -542,181 +535,168 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
void State::dumpStatus(Connection & conn)
{
std::ostringstream out;
time_t now = time(0);
json statusJson = {
{"status", "up"},
{"time", time(0)},
{"uptime", now - startedAt},
{"pid", getpid()},
{"nrQueuedBuilds", builds.lock()->size()},
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
{"nrStepsWaiting", nrStepsWaiting.load()},
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
{"bytesSent", bytesSent.load()},
{"bytesReceived", bytesReceived.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
{"nrBuildsDone", nrBuildsDone.load()},
{"nrStepsStarted", nrStepsStarted.load()},
{"nrStepsDone", nrStepsDone.load()},
{"nrRetries", nrRetries.load()},
{"maxNrRetries", maxNrRetries.load()},
{"nrQueueWakeups", nrQueueWakeups.load()},
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
{"dispatchTimeMs", dispatchTimeMs.load()},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
{"nrDbConnections", dbPool.count()},
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
};
{
JSONObject root(out);
time_t now = time(0);
root.attr("status", "up");
root.attr("time", time(0));
root.attr("uptime", now - startedAt);
root.attr("pid", getpid());
{
auto builds_(builds.lock());
root.attr("nrQueuedBuilds", builds_->size());
}
{
auto steps_(steps.lock());
for (auto i = steps_->begin(); i != steps_->end(); )
if (i->second.lock()) ++i; else i = steps_->erase(i);
root.attr("nrUnfinishedSteps", steps_->size());
statusJson["nrUnfinishedSteps"] = steps_->size();
}
{
auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); )
if (i->lock()) ++i; else i = runnable_->erase(i);
root.attr("nrRunnableSteps", runnable_->size());
statusJson["nrRunnableSteps"] = runnable_->size();
}
root.attr("nrActiveSteps", activeSteps_.lock()->size());
root.attr("nrStepsBuilding", nrStepsBuilding);
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
root.attr("nrStepsWaiting", nrStepsWaiting);
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
root.attr("bytesSent", bytesSent);
root.attr("bytesReceived", bytesReceived);
root.attr("nrBuildsRead", nrBuildsRead);
root.attr("buildReadTimeMs", buildReadTimeMs);
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
root.attr("nrBuildsDone", nrBuildsDone);
root.attr("nrStepsStarted", nrStepsStarted);
root.attr("nrStepsDone", nrStepsDone);
root.attr("nrRetries", nrRetries);
root.attr("maxNrRetries", maxNrRetries);
if (nrStepsDone) {
root.attr("totalStepTime", totalStepTime);
root.attr("totalStepBuildTime", totalStepBuildTime);
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
statusJson["totalStepTime"] = totalStepTime.load();
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
}
root.attr("nrQueueWakeups", nrQueueWakeups);
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
root.attr("dispatchTimeMs", dispatchTimeMs);
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
root.attr("nrDbConnections", dbPool.count());
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
{
auto nested = root.object("machines");
auto machines_(machines.lock());
for (auto & i : *machines_) {
auto & m(i.second);
auto & s(m->state);
auto nested2 = nested.object(m->sshName);
nested2.attr("enabled", m->enabled);
{
auto list = nested2.list("systemTypes");
for (auto & s : m->systemTypes)
list.elem(s);
}
{
auto list = nested2.list("supportedFeatures");
for (auto & s : m->supportedFeatures)
list.elem(s);
}
{
auto list = nested2.list("mandatoryFeatures");
for (auto & s : m->mandatoryFeatures)
list.elem(s);
}
nested2.attr("currentJobs", s->currentJobs);
if (s->currentJobs == 0)
nested2.attr("idleSince", s->idleSince);
nested2.attr("nrStepsDone", s->nrStepsDone);
if (m->state->nrStepsDone) {
nested2.attr("totalStepTime", s->totalStepTime);
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
}
auto info(m->state->connectInfo.lock());
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
nested2.attr("consecutiveFailures", info->consecutiveFailures);
json machine = {
{"enabled", m->enabled},
{"systemTypes", m->systemTypes},
{"supportedFeatures", m->supportedFeatures},
{"mandatoryFeatures", m->mandatoryFeatures},
{"nrStepsDone", s->nrStepsDone.load()},
{"currentJobs", s->currentJobs.load()},
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
{"consecutiveFailures", info->consecutiveFailures},
};
if (s->currentJobs == 0)
machine["idleSince"] = s->idleSince.load();
if (m->state->nrStepsDone) {
machine["totalStepTime"] = s->totalStepTime.load();
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
}
statusJson["machines"][m->sshName] = machine;
}
}
{
auto nested = root.object("jobsets");
auto jobsets_json = json::object();
auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) {
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
nested2.attr("shareUsed", jobset.second->shareUsed());
nested2.attr("seconds", jobset.second->getSeconds());
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
{"shareUsed", jobset.second->shareUsed()},
{"seconds", jobset.second->getSeconds()},
};
}
statusJson["jobsets"] = jobsets_json;
}
{
auto nested = root.object("machineTypes");
auto machineTypesJson = json::object();
auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) {
auto nested2 = nested.object(i.first);
nested2.attr("runnable", i.second.runnable);
nested2.attr("running", i.second.running);
auto machineTypeJson = machineTypesJson[i.first] = {
{"runnable", i.second.runnable},
{"running", i.second.running},
};
if (i.second.runnable > 0)
nested2.attr("waitTime", i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck));
machineTypeJson["waitTime"] = i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck);
if (i.second.running == 0)
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
}
statusJson["machineTypes"] = machineTypesJson;
}
auto store = getDestStore();
auto nested = root.object("store");
auto & stats = store->getStats();
nested.attr("narInfoRead", stats.narInfoRead);
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
nested.attr("narInfoMissing", stats.narInfoMissing);
nested.attr("narInfoWrite", stats.narInfoWrite);
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
nested.attr("narRead", stats.narRead);
nested.attr("narReadBytes", stats.narReadBytes);
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
nested.attr("narWrite", stats.narWrite);
nested.attr("narWriteAverted", stats.narWriteAverted);
nested.attr("narWriteBytes", stats.narWriteBytes);
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
nested.attr("narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0);
nested.attr("narCompressionSpeed", // MiB/s
statusJson["store"] = {
{"narInfoRead", stats.narInfoRead.load()},
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
{"narInfoMissing", stats.narInfoMissing.load()},
{"narInfoWrite", stats.narInfoWrite.load()},
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
{"narRead", stats.narRead.load()},
{"narReadBytes", stats.narReadBytes.load()},
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
{"narWrite", stats.narWrite.load()},
{"narWriteAverted", stats.narWriteAverted.load()},
{"narWriteBytes", stats.narWriteBytes.load()},
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
{"narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0},
{"narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
: 0.0},
};
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) {
auto nested2 = nested.object("s3");
auto & s3Stats = s3Store->getS3Stats();
nested2.attr("put", s3Stats.put);
nested2.attr("putBytes", s3Stats.putBytes);
nested2.attr("putTimeMs", s3Stats.putTimeMs);
nested2.attr("putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("get", s3Stats.get);
nested2.attr("getBytes", s3Stats.getBytes);
nested2.attr("getTimeMs", s3Stats.getTimeMs);
nested2.attr("getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("head", s3Stats.head);
nested2.attr("costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
auto jsonS3 = statusJson["s3"] = {
{"put", s3Stats.put.load()},
{"putBytes", s3Stats.putBytes.load()},
{"putTimeMs", s3Stats.putTimeMs.load()},
{"putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"get", s3Stats.get.load()},
{"getBytes", s3Stats.getBytes.load()},
{"getTimeMs", s3Stats.getTimeMs.load()},
{"getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"head", s3Stats.head.load()},
{"costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
}
}
@ -725,7 +705,7 @@ void State::dumpStatus(Connection & conn)
pqxx::work txn(conn);
// FIXME: use PostgreSQL 9.5 upsert.
txn.exec("delete from SystemStatus where what = 'queue-runner'");
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
txn.exec("notify status_dumped");
txn.commit();
}
@ -950,7 +930,6 @@ int main(int argc, char * * argv)
});
settings.verboseBuild = true;
settings.lockCPU = false;
State state{metricsAddrOpt};
if (status)

View file

@ -13,7 +13,7 @@ void State::queueMonitor()
try {
queueMonitorLoop();
} catch (std::exception & e) {
printMsg(lvlError, format("queue monitor: %1%") % e.what());
printError("queue monitor: %s", e.what());
sleep(10); // probably a DB problem, so don't retry right away
}
}
@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
nrAdded++;
newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
printError("aborting GC'ed build %1%", build->id);
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to runnable and wake up
the builder threads. */
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
for (auto & r : newRunnable)
makeRunnable(r);
@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();
printMsg(lvlInfo, format("reusing build %d") % id);
printInfo("reusing build %d", id);
BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput;

View file

@ -21,6 +21,7 @@
#include "store-api.hh"
#include "sync.hh"
#include "nar-extractor.hh"
#include "worker-protocol.hh"
typedef unsigned int BuildID;
@ -308,6 +309,16 @@ struct Machine
// Backpointer to the machine
ptr machine;
operator nix::WorkerProto::ReadConn ()
{
return { .from = from };
}
operator nix::WorkerProto::WriteConn ()
{
return { .to = to };
}
};
};

View file

@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
} elsif ($type eq "git") {
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
die if ! -d $clonePath;
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
}
$c->stash->{'plain'} = { data => (scalar $diff) || " " };

View file

@ -238,9 +238,17 @@ sub serveFile {
"store", "cat", "--store", getStoreUri(), "$path"]) };
# Detect MIME type.
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
my $type = $info->{mime_with_encoding};
my $type = "text/plain";
if ($path =~ /.*\.(\S{1,})$/xms) {
my $ext = $1;
my $mimeTypes = MIME::Types->new(only_complete => 1);
my $t = $mimeTypes->mimeTypeOf($ext);
$type = ref $t ? $t->type : $t if $t;
} else {
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
$type = $info->{mime_with_encoding};
}
$c->response->content_type($type);
$c->forward('Hydra::View::Plain');
}

View file

@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
, "jobset.enabled" => 1
},
{ order_by => ["project", "jobset", "job"]
, join => ["project", "jobset"]
, join => {"jobset" => "project"}
})];
}

View file

@ -216,7 +216,7 @@ sub json_hint {
sub _authenticator() {
my $authenticator = Crypt::Passphrase->new(
encoder => 'Argon2',
encoder => { module => 'Argon2', output_size => 16 },
validators => [
(sub {
my ($password, $hash) = @_;

View file

@ -82,7 +82,7 @@
function onGoogleSignIn(googleUser) {
requestJSON({
url: "[% c.uri_for('/google-login') %]",
data: "id_token=" + googleUser.getAuthResponse().id_token,
data: "id_token=" + googleUser.credential,
type: 'POST',
success: function(data) {
window.location.reload();
@ -91,9 +91,6 @@
return false;
};
$("#google-signin").click(function() {
$(".g-signin2:first-child > div").click();
});
</script>
[% END %]

View file

@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
[% IF bi1.type == "git" %]
<tr><td>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
</td></tr>
[% ELSE %]
<tr><td>

View file

@ -133,8 +133,10 @@
[% ELSE %]
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
[% IF c.config.enable_google_login %]
<div style="display: none" class="g-signin2" data-onsuccess="onGoogleSignIn" data-theme="dark"></div>
<a class="dropdown-item" href="#" id="google-signin">Sign in with Google</a>
<script src="https://accounts.google.com/gsi/client" async defer></script>
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-callback="onGoogleSignIn">
</div>
<div class="g_id_signin" data-type="standard"></div>
<div class="dropdown-divider"></div>
[% END %]
[% IF c.config.github_client_id %]

3
src/sql/upgrade-83.sql Normal file
View file

@ -0,0 +1,3 @@
-- This index was introduced in a migration but was never recorded in
-- hydra.sql (the source of truth), which is why `if exists` is required.
drop index if exists IndexBuildOutputsOnPath;

View file

@ -0,0 +1,30 @@
use strict;
use warnings;
use Setup;
my $ctx = test_context();
use HTTP::Request::Common;
use Test2::V0;
use Catalyst::Test ();
Catalyst::Test->import('Hydra');
require Hydra::Schema;
require Hydra::Model::DB;
my $db = $ctx->db();
my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' });
$user->setPassword('foobar');
my $builds = $ctx->makeAndEvaluateJobset(
expression => "basic.nix",
build => 1
);
my $login = request(POST '/login', Referer => 'http://localhost', Content => {
username => 'alice',
password => 'foobar',
});
is($login->code, 302);
my $cookie = $login->header("set-cookie");
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
ok($my_jobs->is_success);
my $content = $my_jobs->content();
ok($content =~ /empty_dir/);
ok(!($content =~ /fails/));
ok(!($content =~ /succeed_with_failed/));
done_testing;

View file

@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_succeed(), "The process did succeed.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, 0, "This command should have succeeded.");
subtest "Validate the run log file exists" => sub {

View file

@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, undef, "This command should not have executed.");
is($runlog->error_number, 2, "This command failed to exec.");
};

View file

@ -55,7 +55,7 @@ subtest "Starting a process" => sub {
ok($runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, undef, "The end time is undefined.");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined.");
@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 0, "The exit code is 0.");
is($runlog->signal, undef, "The signal is undefined.");
@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 85, "The exit code is 85.");
is($runlog->signal, undef, "The signal is undefined.");
@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 15, "Signal 15 was sent.");
@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 9, "The signal is 9.");
@ -134,8 +134,8 @@ subtest "The process failed to start" => sub {
ok(!$runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, 2, "The error number is saved");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined.");

View file

@ -25,11 +25,11 @@ subtest "requeue" => sub {
$task->requeue();
is($task->attempts, 2, "We should have stored a second retry");
is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step");
is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step");
$task->requeue();
is($task->attempts, 3, "We should have stored a third retry");
is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step");
is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step");
};
done_testing;

View file

@ -101,7 +101,7 @@ subtest "save_task" => sub {
is($retry->pluginname, "FooPluginName", "Plugin name should match");
is($retry->payload, "1", "Payload should match");
is($retry->attempts, 1, "We've had one attempt");
is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away");
is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away");
};
done_testing;

View file

@ -4,6 +4,8 @@ with import ./config.nix;
mkDerivation {
name = "empty-dir";
builder = ./empty-dir-builder.sh;
meta.maintainers = [ "alice@invalid.org" ];
meta.outPath = "${placeholder "out"}";
};
fails =