Fix compilation against Nix 2.16

This commit is contained in:
Eelco Dolstra 2023-06-23 15:06:34 +02:00
parent a0c8440a5c
commit 9f69bb5c2c
7 changed files with 40 additions and 38 deletions

View file

@ -10,8 +10,6 @@ AC_PROG_LN_S
AC_PROG_LIBTOOL
AC_PROG_CXX
CXXFLAGS+=" -std=c++17"
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_ARG_WITH([docbook-xsl],

View file

@ -25,7 +25,8 @@
#include <nlohmann/json.hpp>
void check_pid_status_nonblocking(pid_t check_pid) {
void check_pid_status_nonblocking(pid_t check_pid)
{
// Only check 'initialized' and known PID's
if (check_pid <= 0) { return; }
@ -100,7 +101,7 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
else if (v.type() == nAttrs) {
auto a = v.attrs->find(state.symbols.create(subAttribute));
if (a != v.attrs->end())
res.push_back(std::string(state.forceString(*a->value)));
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
}
};
@ -197,26 +198,30 @@ static void worker(
/* If this is an aggregate, then get its constituents. */
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*a->value, a->pos)) {
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a)
throw EvalError("derivation must have a constituents attribute");
NixStringContext context;
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
for (auto & c : context)
std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
job["constituents"].push_back(state.store->printStorePath(b.drvPath));
},
[&](const NixStringContextElem::Opaque & o) {
},
[&](const NixStringContextElem::DrvDeep & d) {
},
}, c.raw());
PathSet context;
state.coerceToString(a->pos, *a->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
job["constituents"].push_back(std::string(i, index + 1));
}
state.forceList(*a->value, a->pos);
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
auto v = a->value->listElems()[n];
state.forceValue(*v, noPos);
if (v->type() == nString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
job["namedConstituents"].push_back(v->str());
}
}

View file

@ -116,12 +116,12 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log
to << cmdQueryValidPaths << 1 << useSubstitutes;
worker_proto::write(destStore, to, closure);
workerProtoWrite(destStore, to, closure);
to.flush();
/* Get back the set of paths that are already valid on the remote
host. */
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
auto present = WorkerProto<StorePathSet>::read(destStore, from);
if (present.size() == closure.size()) return;
@ -367,7 +367,7 @@ void State::buildRemote(ref<Store> destStore,
}
}
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
WorkerProto<DrvOutputs>::read(*localStore, from);
}
switch ((BuildResult::Status) res) {
case BuildResult::Built:
@ -444,17 +444,17 @@ void State::buildRemote(ref<Store> destStore,
std::map<StorePath, ValidPathInfo> infos;
size_t totalNarSize = 0;
to << cmdQueryPathInfos;
worker_proto::write(*localStore, to, outputs);
workerProtoWrite(*localStore, to, outputs);
to.flush();
while (true) {
auto storePathS = readString(from);
if (storePathS == "") break;
auto deriver = readString(from); // deriver
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
auto references = WorkerProto<StorePathSet>::read(*localStore, from);
readLongLong(from); // download size
auto narSize = readLongLong(from);
auto narHash = Hash::parseAny(readString(from), htSHA256);
auto ca = parseContentAddressOpt(readString(from));
auto ca = ContentAddress::parseOpt(readString(from));
readStrings<StringSet>(from); // sigs
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
assert(outputs.count(info.path));

View file

@ -323,7 +323,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
pqxx::work txn(*conn);
for (auto & b : direct) {
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
printInfo("marking build %1% as succeeded", b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime);
}
@ -451,7 +451,7 @@ void State::failStep(
/* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) {
if (build->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build->id);
printError("marking build %1% as failed", build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id,

View file

@ -52,7 +52,7 @@ void State::dispatcher()
{
auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) {
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
debug("dispatcher sleeping for %1%s",
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
}
@ -60,7 +60,7 @@ void State::dispatcher()
}
} catch (std::exception & e) {
printMsg(lvlError, format("dispatcher: %1%") % e.what());
printError("dispatcher: %s", e.what());
sleep(1);
}
@ -80,8 +80,8 @@ system_time State::doDispatch()
jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed();
if (s1 != s2)
printMsg(lvlDebug, format("pruned scheduling window of %1%:%2% from %3% to %4%")
% jobset.first.first % jobset.first.second % s1 % s2);
debug("pruned scheduling window of %1%:%2% from %3% to %4%",
jobset.first.first, jobset.first.second, s1, s2);
}
}

View file

@ -161,9 +161,9 @@ void State::parseMachines(const std::string & contents)
same name. */
auto i = oldMachines.find(machine->sshName);
if (i == oldMachines.end())
printMsg(lvlChatty, format("adding new machine %1%") % machine->sshName);
printMsg(lvlChatty, "adding new machine %1%", machine->sshName);
else
printMsg(lvlChatty, format("updating machine %1%") % machine->sshName);
printMsg(lvlChatty, "updating machine %1%", machine->sshName);
machine->state = i == oldMachines.end()
? std::make_shared<Machine::State>()
: i->second->state;
@ -173,7 +173,7 @@ void State::parseMachines(const std::string & contents)
for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled)
printMsg(lvlInfo, format("removing machine %1%") % m.first);
printInfo("removing machine %1%", m.first);
/* Add a disabled Machine object to make sure stats are
maintained. */
auto machine = std::make_shared<Machine>(*(m.second));
@ -928,7 +928,6 @@ int main(int argc, char * * argv)
});
settings.verboseBuild = true;
settings.lockCPU = false;
State state{metricsAddrOpt};
if (status)

View file

@ -13,7 +13,7 @@ void State::queueMonitor()
try {
queueMonitorLoop();
} catch (std::exception & e) {
printMsg(lvlError, format("queue monitor: %1%") % e.what());
printError("queue monitor: %s", e.what());
sleep(10); // probably a DB problem, so don't retry right away
}
}
@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
nrAdded++;
newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
printError("aborting GC'ed build %1%", build->id);
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to runnable and wake up
the builder threads. */
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
for (auto & r : newRunnable)
makeRunnable(r);
@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();
printMsg(lvlInfo, format("reusing build %d") % id);
printInfo("reusing build %d", id);
BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput;