From a0c8440a5c6eee911479c220706f74fd17e0c55f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 13:14:49 +0200 Subject: [PATCH 1/3] Update to Nix 2.16 and NixOS 23.05 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flake lock file updates: • Updated input 'nix': 'github:nixos/nix/4acc684ef7b3117c6d6ac12837398a0008a53d85' (2023-02-22) → 'github:NixOS/nix/84050709ea18f3285a85d729f40c8f8eddf5008e' (2023-06-06) • Added input 'nix/flake-compat': 'github:edolstra/flake-compat/35bb57c0c8d8b62bbfd284272c928ceb64ddbde9' (2023-01-17) • Updated input 'nixpkgs': follows 'nix/nixpkgs' → 'github:NixOS/nixpkgs/ef0bc3976340dab9a4e087a0bcff661a8b2e87f3' (2023-06-21) --- flake.lock | 46 +++++++++++++++++++++++++++++++--------------- flake.nix | 5 +++-- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/flake.lock b/flake.lock index 08fd86ad..ee85f6fa 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,21 @@ { "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "lowdown-src": { "flake": false, "locked": { @@ -18,37 +34,40 @@ }, "nix": { "inputs": { + "flake-compat": "flake-compat", "lowdown-src": "lowdown-src", - "nixpkgs": "nixpkgs", + "nixpkgs": [ + "nixpkgs" + ], "nixpkgs-regression": "nixpkgs-regression" }, "locked": { - "lastModified": 1677045134, - "narHash": "sha256-jUc2ccTR8f6MGY2pUKgujm+lxSPNGm/ZAP+toX+nMNc=", - "owner": "nixos", + "lastModified": 1686048923, + "narHash": "sha256-/XCWa2osNFIpPC5MkxlX6qTZf/DaTLwS3LWN0SRFiuU=", + "owner": "NixOS", "repo": "nix", - "rev": "4acc684ef7b3117c6d6ac12837398a0008a53d85", + "rev": "84050709ea18f3285a85d729f40c8f8eddf5008e", "type": "github" }, "original": { - "owner": "nixos", - "ref": "2.13.3", + "owner": "NixOS", + "ref": "2.16.1", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1670461440, - "narHash": "sha256-jy1LB8HOMKGJEGXgzFRLDU1CBGL0/LlkolgnqIsF0D8=", + "lastModified": 1687379288, + "narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "04a75b2eecc0acf6239acf9dd04485ff8d14f425", + "rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11-small", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } @@ -72,10 +91,7 @@ "root": { "inputs": { "nix": "nix", - "nixpkgs": [ - "nix", - "nixpkgs" - ] + "nixpkgs": "nixpkgs" } } }, diff --git a/flake.nix b/flake.nix index beeb90c1..6bbec9b0 100644 --- a/flake.nix +++ b/flake.nix @@ -1,8 +1,9 @@ { description = "A Nix-based continuous build system"; - inputs.nixpkgs.follows = "nix/nixpkgs"; - inputs.nix.url = "github:nixos/nix/2.13.3"; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05"; + inputs.nix.url = "github:NixOS/nix/2.16.1"; + inputs.nix.inputs.nixpkgs.follows = "nixpkgs"; outputs = { self, nixpkgs, nix }: let From 9f69bb5c2c132e9ac7b8155972096b425155c6e1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 15:06:34 +0200 Subject: [PATCH 2/3] Fix compilation against Nix 2.16 --- configure.ac | 2 -- src/hydra-eval-jobs/hydra-eval-jobs.cc | 31 ++++++++++++-------- src/hydra-queue-runner/build-remote.cc | 12 ++++---- src/hydra-queue-runner/builder.cc | 4 +-- src/hydra-queue-runner/dispatcher.cc | 8 ++--- src/hydra-queue-runner/hydra-queue-runner.cc | 7 ++--- src/hydra-queue-runner/queue-monitor.cc | 14 ++++----- 7 files changed, 40 insertions(+), 38 deletions(-) diff --git a/configure.ac b/configure.ac index 0c823696..eec647c3 100644 --- a/configure.ac +++ b/configure.ac @@ -10,8 +10,6 @@ AC_PROG_LN_S AC_PROG_LIBTOOL AC_PROG_CXX -CXXFLAGS+=" -std=c++17" - AC_PATH_PROG([XSLTPROC], [xsltproc]) AC_ARG_WITH([docbook-xsl], diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index af839bba..79523944 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -25,7 +25,8 @@ #include -void check_pid_status_nonblocking(pid_t check_pid) { +void check_pid_status_nonblocking(pid_t check_pid) +{ // Only check 'initialized' and known PID's if (check_pid <= 0) { return; } @@ -100,7 +101,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std: else if (v.type() == nAttrs) { auto a = v.attrs->find(state.symbols.create(subAttribute)); if (a != v.attrs->end()) - res.push_back(std::string(state.forceString(*a->value))); + res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes"))); } }; @@ -197,26 +198,30 @@ static void worker( /* If this is an aggregate, then get its constituents. */ auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, a->pos)) { + if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) { auto a = v->attrs->get(state.symbols.create("constituents")); if (!a) throw EvalError("derivation must have a ‘constituents’ attribute"); + NixStringContext context; + state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false); + for (auto & c : context) + std::visit(overloaded { + [&](const NixStringContextElem::Built & b) { + job["constituents"].push_back(state.store->printStorePath(b.drvPath)); + }, + [&](const NixStringContextElem::Opaque & o) { + }, + [&](const NixStringContextElem::DrvDeep & d) { + }, + }, c.raw()); - PathSet context; - state.coerceToString(a->pos, *a->value, context, true, false); - for (auto & i : context) - if (i.at(0) == '!') { - size_t index = i.find("!", 1); - job["constituents"].push_back(std::string(i, index + 1)); - } - - state.forceList(*a->value, a->pos); + state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute"); for (unsigned int n = 0; n < a->value->listSize(); ++n) { auto v = a->value->listElems()[n]; state.forceValue(*v, noPos); if (v->type() == nString) - job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); + job["namedConstituents"].push_back(v->str()); } } diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 21a6c331..6baff7df 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -116,12 +116,12 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore, the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log to << cmdQueryValidPaths << 1 << useSubstitutes; - worker_proto::write(destStore, to, closure); + workerProtoWrite(destStore, to, closure); to.flush(); /* Get back the set of paths that are already valid on the remote host. */ - auto present = worker_proto::read(destStore, from, Phantom {}); + auto present = WorkerProto::read(destStore, from); if (present.size() == closure.size()) return; @@ -367,7 +367,7 @@ void State::buildRemote(ref destStore, } } if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) { - worker_proto::read(*localStore, from, Phantom {}); + WorkerProto::read(*localStore, from); } switch ((BuildResult::Status) res) { case BuildResult::Built: @@ -444,17 +444,17 @@ void State::buildRemote(ref destStore, std::map infos; size_t totalNarSize = 0; to << cmdQueryPathInfos; - worker_proto::write(*localStore, to, outputs); + workerProtoWrite(*localStore, to, outputs); to.flush(); while (true) { auto storePathS = readString(from); if (storePathS == "") break; auto deriver = readString(from); // deriver - auto references = worker_proto::read(*localStore, from, Phantom {}); + auto references = WorkerProto::read(*localStore, from); readLongLong(from); // download size auto narSize = readLongLong(from); auto narHash = Hash::parseAny(readString(from), htSHA256); - auto ca = parseContentAddressOpt(readString(from)); + auto ca = ContentAddress::parseOpt(readString(from)); readStrings(from); // sigs ValidPathInfo info(localStore->parseStorePath(storePathS), narHash); assert(outputs.count(info.path)); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index 37022522..89aec323 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -323,7 +323,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, pqxx::work txn(*conn); for (auto & b : direct) { - printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id); + printInfo("marking build %1% as succeeded", b->id); markSucceededBuild(txn, b, res, buildId != b->id || result.isCached, result.startTime, result.stopTime); } @@ -451,7 +451,7 @@ void State::failStep( /* Mark all builds that depend on this derivation as failed. */ for (auto & build : indirect) { if (build->finishedInDB) continue; - printMsg(lvlError, format("marking build %1% as failed") % build->id); + printError("marking build %1% as failed", build->id); txn.exec_params0 ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0", build->id, diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index d2bb3c90..1e40fa69 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -52,7 +52,7 @@ void State::dispatcher() { auto dispatcherWakeup_(dispatcherWakeup.lock()); if (!*dispatcherWakeup_) { - printMsg(lvlDebug, format("dispatcher sleeping for %1%s") % + debug("dispatcher sleeping for %1%s", std::chrono::duration_cast(sleepUntil - std::chrono::system_clock::now()).count()); dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil); } @@ -60,7 +60,7 @@ void State::dispatcher() } } catch (std::exception & e) { - printMsg(lvlError, format("dispatcher: %1%") % e.what()); + printError("dispatcher: %s", e.what()); sleep(1); } @@ -80,8 +80,8 @@ system_time State::doDispatch() jobset.second->pruneSteps(); auto s2 = jobset.second->shareUsed(); if (s1 != s2) - printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%") - % jobset.first.first % jobset.first.second % s1 % s2); + debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%", + jobset.first.first, jobset.first.second, s1, s2); } } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index b84681d5..acf1282e 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -161,9 +161,9 @@ void State::parseMachines(const std::string & contents) same name. */ auto i = oldMachines.find(machine->sshName); if (i == oldMachines.end()) - printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName); else - printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName); + printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName); machine->state = i == oldMachines.end() ? std::make_shared() : i->second->state; @@ -173,7 +173,7 @@ void State::parseMachines(const std::string & contents) for (auto & m : oldMachines) if (newMachines.find(m.first) == newMachines.end()) { if (m.second->enabled) - printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first); + printInfo("removing machine ‘%1%’", m.first); /* Add a disabled Machine object to make sure stats are maintained. */ auto machine = std::make_shared(*(m.second)); @@ -928,7 +928,6 @@ int main(int argc, char * * argv) }); settings.verboseBuild = true; - settings.lockCPU = false; State state{metricsAddrOpt}; if (status) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 12d55b79..0bb167a2 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -13,7 +13,7 @@ void State::queueMonitor() try { queueMonitorLoop(); } catch (std::exception & e) { - printMsg(lvlError, format("queue monitor: %1%") % e.what()); + printError("queue monitor: %s", e.what()); sleep(10); // probably a DB problem, so don't retry right away } } @@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn, createBuild = [&](Build::ptr build) { prom.queue_build_loads.Increment(); - printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); + printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName()); nrAdded++; newBuildsByID.erase(build->id); if (!localStore->isValidPath(build->drvPath)) { /* Derivation has been GC'ed prematurely. */ - printMsg(lvlError, format("aborting GC'ed build %1%") % build->id); + printError("aborting GC'ed build %1%", build->id); if (!build->finishedInDB) { auto mc = startDbUpdate(); pqxx::work txn(conn); @@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn, /* Add the new runnable build steps to ‘runnable’ and wake up the builder threads. */ - printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded); + printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded); for (auto & r : newRunnable) makeRunnable(r); @@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn) for (auto i = builds_->begin(); i != builds_->end(); ) { auto b = currentIds.find(i->first); if (b == currentIds.end()) { - printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first); + printInfo("discarding cancelled build %1%", i->first); i = builds_->erase(i); // FIXME: ideally we would interrupt active build steps here. continue; } if (i->second->globalPriority < b->second) { - printMsg(lvlInfo, format("priority of build %1% increased") % i->first); + printInfo("priority of build %1% increased", i->first); i->second->globalPriority = b->second; i->second->propagatePriorities(); } @@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref if (r.empty()) continue; BuildID id = r[0][0].as(); - printMsg(lvlInfo, format("reusing build %d") % id); + printInfo("reusing build %d", id); BuildOutput res; res.failed = r[0][1].as() == bsFailedWithOutput; From ce001bb1420bb0c774ea08cd21fd624ccea04788 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 23 Jun 2023 15:09:09 +0200 Subject: [PATCH 3/3] Relax time interval checks I saw one of these failing randomly. --- t/Hydra/Plugin/RunCommand/basic.t | 4 ++-- t/Hydra/Plugin/RunCommand/errno.t | 4 ++-- t/Hydra/Schema/Result/RunCommandLogs.t | 22 +++++++++++----------- t/Hydra/Schema/Result/TaskRetries.t | 4 ++-- t/Hydra/Schema/ResultSet/TaskRetries.t | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/t/Hydra/Plugin/RunCommand/basic.t b/t/Hydra/Plugin/RunCommand/basic.t index e9fc730b..2c0eec68 100644 --- a/t/Hydra/Plugin/RunCommand/basic.t +++ b/t/Hydra/Plugin/RunCommand/basic.t @@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub { ok($runlog->did_succeed(), "The process did succeed."); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is also recent."); is($runlog->exit_code, 0, "This command should have succeeded."); subtest "Validate the run log file exists" => sub { diff --git a/t/Hydra/Plugin/RunCommand/errno.t b/t/Hydra/Plugin/RunCommand/errno.t index 9e06f9bb..6b05d457 100644 --- a/t/Hydra/Plugin/RunCommand/errno.t +++ b/t/Hydra/Plugin/RunCommand/errno.t @@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub { ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*"); is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is also recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is also recent."); is($runlog->exit_code, undef, "This command should not have executed."); is($runlog->error_number, 2, "This command failed to exec."); }; diff --git a/t/Hydra/Schema/Result/RunCommandLogs.t b/t/Hydra/Schema/Result/RunCommandLogs.t index 80589549..f702fcf9 100644 --- a/t/Hydra/Schema/Result/RunCommandLogs.t +++ b/t/Hydra/Schema/Result/RunCommandLogs.t @@ -55,7 +55,7 @@ subtest "Starting a process" => sub { ok($runlog->is_running(), "The process is running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); is($runlog->end_time, undef, "The end time is undefined."); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, undef, "The signal is undefined."); @@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, 0, "The exit code is 0."); is($runlog->signal, undef, "The signal is undefined."); @@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, 85, "The exit code is 85."); is($runlog->signal, undef, "The signal is undefined."); @@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, 15, "Signal 15 was sent."); @@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub { ok(!$runlog->is_running(), "The process is not running."); ok($runlog->did_fail_with_signal(), "The process was killed by a signal."); ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, undef, "The error number is undefined"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, 9, "The signal is 9."); @@ -134,8 +134,8 @@ subtest "The process failed to start" => sub { ok(!$runlog->is_running(), "The process is running."); ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal."); ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error."); - is($runlog->start_time, within(time() - 1, 2), "The start time is recent."); - is($runlog->end_time, within(time() - 1, 2), "The end time is recent."); + is($runlog->start_time, within(time() - 1, 5), "The start time is recent."); + is($runlog->end_time, within(time() - 1, 5), "The end time is recent."); is($runlog->error_number, 2, "The error number is saved"); is($runlog->exit_code, undef, "The exit code is undefined."); is($runlog->signal, undef, "The signal is undefined."); diff --git a/t/Hydra/Schema/Result/TaskRetries.t b/t/Hydra/Schema/Result/TaskRetries.t index 0425f11c..a9c9f132 100644 --- a/t/Hydra/Schema/Result/TaskRetries.t +++ b/t/Hydra/Schema/Result/TaskRetries.t @@ -25,11 +25,11 @@ subtest "requeue" => sub { $task->requeue(); is($task->attempts, 2, "We should have stored a second retry"); - is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step"); + is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step"); $task->requeue(); is($task->attempts, 3, "We should have stored a third retry"); - is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step"); + is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step"); }; done_testing; diff --git a/t/Hydra/Schema/ResultSet/TaskRetries.t b/t/Hydra/Schema/ResultSet/TaskRetries.t index 4555832c..a9354896 100644 --- a/t/Hydra/Schema/ResultSet/TaskRetries.t +++ b/t/Hydra/Schema/ResultSet/TaskRetries.t @@ -101,7 +101,7 @@ subtest "save_task" => sub { is($retry->pluginname, "FooPluginName", "Plugin name should match"); is($retry->payload, "1", "Payload should match"); is($retry->attempts, 1, "We've had one attempt"); - is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away"); + is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away"); }; done_testing;