hydra-queue-runner: Fix build

This commit is contained in:
Eelco Dolstra 2016-10-06 15:24:09 +02:00
parent 7089142fdc
commit 6a313c691b
3 changed files with 27 additions and 34 deletions

View file

@ -35,10 +35,10 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil
child.pid = startProcess([&]() { child.pid = startProcess([&]() {
if (dup2(to.readSide, STDIN_FILENO) == -1) if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
throw SysError("cannot dup input pipe to stdin"); throw SysError("cannot dup input pipe to stdin");
if (dup2(from.writeSide, STDOUT_FILENO) == -1) if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("cannot dup output pipe to stdout"); throw SysError("cannot dup output pipe to stdout");
if (dup2(stderrFD, STDERR_FILENO) == -1) if (dup2(stderrFD, STDERR_FILENO) == -1)
@ -67,11 +67,11 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil
throw SysError("cannot start ssh"); throw SysError("cannot start ssh");
}); });
to.readSide.close(); to.readSide = -1;
from.writeSide.close(); from.writeSide = -1;
child.to = to.writeSide.borrow(); child.to = to.writeSide.release();
child.from = from.readSide.borrow(); child.from = from.readSide.release();
} }
@ -93,7 +93,7 @@ static void copyClosureTo(ref<Store> destStore,
/* Get back the set of paths that are already valid on the remote /* Get back the set of paths that are already valid on the remote
host. */ host. */
auto present = readStorePaths<PathSet>(from); auto present = readStorePaths<PathSet>(*destStore, from);
if (present.size() == closure.size()) return; if (present.size() == closure.size()) return;
@ -125,8 +125,8 @@ void State::buildRemote(ref<Store> destStore,
createDirs(dirOf(result.logFile)); createDirs(dirOf(result.logFile));
AutoCloseFD logFD(open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666)); AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (logFD == -1) throw SysError(format("creating log file %1%") % result.logFile); if (!logFD) throw SysError(format("creating log file %1%") % result.logFile);
nix::Path tmpDir = createTempDir(); nix::Path tmpDir = createTempDir();
AutoDelete tmpDirDel(tmpDir, true); AutoDelete tmpDirDel(tmpDir, true);
@ -134,12 +134,12 @@ void State::buildRemote(ref<Store> destStore,
try { try {
Child child; Child child;
openConnection(machine, tmpDir, logFD, child); openConnection(machine, tmpDir, logFD.get(), child);
logFD.close(); logFD = -1;
FdSource from(child.from); FdSource from(child.from.get());
FdSink to(child.to); FdSink to(child.to.get());
Finally updateStats([&]() { Finally updateStats([&]() {
bytesReceived += from.read; bytesReceived += from.read;
@ -368,7 +368,7 @@ void State::buildRemote(ref<Store> destStore,
} }
/* Shut down the connection. */ /* Shut down the connection. */
child.to.close(); child.to = -1;
child.pid.wait(true); child.pid.wait(true);
} catch (Error & e) { } catch (Error & e) {

View file

@ -65,7 +65,7 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
// store paths, or that are outside the input closure? // store paths, or that are outside the input closure?
if (product.path == "" || product.path[0] != '/') continue; if (product.path == "" || product.path[0] != '/') continue;
product.path = canonPath(product.path); product.path = canonPath(product.path);
if (!isInStore(product.path)) continue; if (!store->isInStore(product.path)) continue;
auto st = accessor->stat(product.path); auto st = accessor->stat(product.path);
if (st.type == FSAccessor::Type::tMissing) continue; if (st.type == FSAccessor::Type::tMissing) continue;

View file

@ -11,7 +11,8 @@
#include "shared.hh" #include "shared.hh"
#include "globals.hh" #include "globals.hh"
#include "value-to-json.hh" #include "json.hh"
#include "s3-binary-cache-store.hh"
using namespace nix; using namespace nix;
@ -451,7 +452,7 @@ void State::logCompressor()
// FIXME: use libbz2 // FIXME: use libbz2
Pid pid = startProcess([&]() { Pid pid = startProcess([&]() {
if (dup2(fd, STDOUT_FILENO) == -1) if (dup2(fd.get(), STDOUT_FILENO) == -1)
throw SysError("cannot dup output pipe to stdout"); throw SysError("cannot dup output pipe to stdout");
execlp("bzip2", "bzip2", "-c", item.logPath.c_str(), nullptr); execlp("bzip2", "bzip2", "-c", item.logPath.c_str(), nullptr);
throw SysError("cannot start bzip2"); throw SysError("cannot start bzip2");
@ -582,7 +583,7 @@ void State::dumpStatus(Connection & conn, bool log)
root.attr("bytesReceived", bytesReceived); root.attr("bytesReceived", bytesReceived);
root.attr("nrBuildsRead", nrBuildsRead); root.attr("nrBuildsRead", nrBuildsRead);
root.attr("buildReadTimeMs", buildReadTimeMs); root.attr("buildReadTimeMs", buildReadTimeMs);
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead); root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
root.attr("nrBuildsDone", nrBuildsDone); root.attr("nrBuildsDone", nrBuildsDone);
root.attr("nrStepsStarted", nrStepsStarted); root.attr("nrStepsStarted", nrStepsStarted);
root.attr("nrStepsDone", nrStepsDone); root.attr("nrStepsDone", nrStepsDone);
@ -603,14 +604,12 @@ void State::dumpStatus(Connection & conn, bool log)
root.attr("memoryTokensInUse", memoryTokens.currentUse()); root.attr("memoryTokensInUse", memoryTokens.currentUse());
{ {
root.attr("machines"); auto nested = root.object("machines");
JSONObject nested(out);
auto machines_(machines.lock()); auto machines_(machines.lock());
for (auto & i : *machines_) { for (auto & i : *machines_) {
auto & m(i.second); auto & m(i.second);
auto & s(m->state); auto & s(m->state);
nested.attr(m->sshName); auto nested2 = nested.object(m->sshName);
JSONObject nested2(out);
nested2.attr("enabled", m->enabled); nested2.attr("enabled", m->enabled);
nested2.attr("currentJobs", s->currentJobs); nested2.attr("currentJobs", s->currentJobs);
if (s->currentJobs == 0) if (s->currentJobs == 0)
@ -631,24 +630,20 @@ void State::dumpStatus(Connection & conn, bool log)
} }
{ {
root.attr("jobsets"); auto nested = root.object("jobsets");
JSONObject nested(out);
auto jobsets_(jobsets.lock()); auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) { for (auto & jobset : *jobsets_) {
nested.attr(jobset.first.first + ":" + jobset.first.second); auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
JSONObject nested2(out);
nested2.attr("shareUsed", jobset.second->shareUsed()); nested2.attr("shareUsed", jobset.second->shareUsed());
nested2.attr("seconds", jobset.second->getSeconds()); nested2.attr("seconds", jobset.second->getSeconds());
} }
} }
{ {
root.attr("machineTypes"); auto nested = root.object("machineTypes");
JSONObject nested(out);
auto machineTypes_(machineTypes.lock()); auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) { for (auto & i : *machineTypes_) {
nested.attr(i.first); auto nested2 = nested.object(i.first);
JSONObject nested2(out);
nested2.attr("runnable", i.second.runnable); nested2.attr("runnable", i.second.runnable);
nested2.attr("running", i.second.running); nested2.attr("running", i.second.running);
if (i.second.runnable > 0) if (i.second.runnable > 0)
@ -661,8 +656,7 @@ void State::dumpStatus(Connection & conn, bool log)
auto store = getDestStore(); auto store = getDestStore();
root.attr("store"); auto nested = root.object("store");
JSONObject nested(out);
auto & stats = store->getStats(); auto & stats = store->getStats();
nested.attr("narInfoRead", stats.narInfoRead); nested.attr("narInfoRead", stats.narInfoRead);
@ -689,8 +683,7 @@ void State::dumpStatus(Connection & conn, bool log)
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store); auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) { if (s3Store) {
nested.attr("s3"); auto nested2 = nested.object("s3");
JSONObject nested2(out);
auto & s3Stats = s3Store->getS3Stats(); auto & s3Stats = s3Store->getS3Stats();
nested2.attr("put", s3Stats.put); nested2.attr("put", s3Stats.put);
nested2.attr("putBytes", s3Stats.putBytes); nested2.attr("putBytes", s3Stats.putBytes);