2015-06-09 12:21:21 +00:00
|
|
|
|
#include <algorithm>
|
2016-09-30 15:05:07 +00:00
|
|
|
|
#include <cmath>
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
|
|
|
|
|
#include "serve-protocol.hh"
|
2015-07-07 08:25:33 +00:00
|
|
|
|
#include "state.hh"
|
|
|
|
|
#include "util.hh"
|
2015-06-09 12:21:21 +00:00
|
|
|
|
#include "worker-protocol.hh"
|
2016-02-26 15:16:36 +00:00
|
|
|
|
#include "finally.hh"
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
using namespace nix;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct Child
|
|
|
|
|
{
|
|
|
|
|
Pid pid;
|
|
|
|
|
AutoCloseFD to, from;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2015-06-25 13:29:22 +00:00
|
|
|
|
static void append(Strings & dst, const Strings & src)
|
|
|
|
|
{
|
|
|
|
|
dst.insert(dst.end(), src.begin(), src.end());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-08-26 11:43:02 +00:00
|
|
|
|
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
{
|
|
|
|
|
Pipe to, from;
|
|
|
|
|
to.create();
|
|
|
|
|
from.create();
|
|
|
|
|
|
|
|
|
|
child.pid = startProcess([&]() {
|
|
|
|
|
|
2017-04-05 09:01:57 +00:00
|
|
|
|
restoreSignals();
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
throw SysError("cannot dup input pipe to stdin");
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
throw SysError("cannot dup output pipe to stdout");
|
|
|
|
|
|
|
|
|
|
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
|
|
|
|
throw SysError("cannot dup stderr");
|
|
|
|
|
|
2015-06-25 13:29:22 +00:00
|
|
|
|
Strings argv;
|
2015-08-26 11:43:02 +00:00
|
|
|
|
if (machine->sshName == "localhost")
|
2015-06-25 13:29:22 +00:00
|
|
|
|
argv = {"nix-store", "--serve", "--write"};
|
|
|
|
|
else {
|
2015-08-26 11:43:02 +00:00
|
|
|
|
argv = {"ssh", machine->sshName};
|
|
|
|
|
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
|
|
|
|
if (machine->sshPublicHostKey != "") {
|
|
|
|
|
Path fileName = tmpDir + "/host-key";
|
|
|
|
|
auto p = machine->sshName.find("@");
|
|
|
|
|
string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName;
|
|
|
|
|
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
|
|
|
|
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
|
|
|
|
}
|
2015-06-25 13:29:22 +00:00
|
|
|
|
append(argv,
|
|
|
|
|
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
|
|
|
|
, "--", "nix-store", "--serve", "--write" });
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
throw SysError("cannot start ssh");
|
|
|
|
|
});
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
to.readSide = -1;
|
|
|
|
|
from.writeSide = -1;
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
child.to = to.writeSide.release();
|
|
|
|
|
child.from = from.readSide.release();
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-15 20:10:29 +00:00
|
|
|
|
static void copyClosureTo(ref<Store> destStore,
|
2015-06-09 12:21:21 +00:00
|
|
|
|
FdSource & from, FdSink & to, const PathSet & paths,
|
|
|
|
|
bool useSubstitutes = false)
|
|
|
|
|
{
|
|
|
|
|
PathSet closure;
|
|
|
|
|
for (auto & path : paths)
|
2016-02-15 20:10:29 +00:00
|
|
|
|
destStore->computeFSClosure(path, closure);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
/* Send the "query valid paths" command with the "lock" option
|
|
|
|
|
enabled. This prevents a race where the remote host
|
|
|
|
|
garbage-collect paths that are already there. Optionally, ask
|
|
|
|
|
the remote host to substitute missing paths. */
|
2016-02-26 20:15:05 +00:00
|
|
|
|
// FIXME: substitute output pollutes our build log
|
2015-07-20 23:45:00 +00:00
|
|
|
|
to << cmdQueryValidPaths << 1 << useSubstitutes << closure;
|
2015-06-09 12:21:21 +00:00
|
|
|
|
to.flush();
|
|
|
|
|
|
|
|
|
|
/* Get back the set of paths that are already valid on the remote
|
|
|
|
|
host. */
|
2016-10-06 13:24:09 +00:00
|
|
|
|
auto present = readStorePaths<PathSet>(*destStore, from);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-09 14:03:41 +00:00
|
|
|
|
if (present.size() == closure.size()) return;
|
|
|
|
|
|
2016-02-15 20:10:29 +00:00
|
|
|
|
Paths sorted = destStore->topoSortPaths(closure);
|
2015-06-09 14:03:41 +00:00
|
|
|
|
|
|
|
|
|
Paths missing;
|
|
|
|
|
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
|
|
|
|
if (present.find(*i) == present.end()) missing.push_back(*i);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-17 09:45:20 +00:00
|
|
|
|
printMsg(lvlDebug, format("sending %1% missing paths") % missing.size());
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-07-20 23:45:00 +00:00
|
|
|
|
to << cmdImportPaths;
|
2016-05-11 16:36:04 +00:00
|
|
|
|
destStore->exportPaths(missing, to);
|
2015-06-09 14:03:41 +00:00
|
|
|
|
to.flush();
|
|
|
|
|
|
|
|
|
|
if (readInt(from) != 1)
|
|
|
|
|
throw Error("remote machine failed to import closure");
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-15 20:10:29 +00:00
|
|
|
|
void State::buildRemote(ref<Store> destStore,
|
2015-07-07 08:25:33 +00:00
|
|
|
|
Machine::ptr machine, Step::ptr step,
|
2016-12-07 14:57:13 +00:00
|
|
|
|
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
2016-11-07 18:34:35 +00:00
|
|
|
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
{
|
2016-10-26 13:09:16 +00:00
|
|
|
|
assert(BuildResult::TimedOut == 8);
|
|
|
|
|
|
2015-07-07 08:25:33 +00:00
|
|
|
|
string base = baseNameOf(step->drvPath);
|
2015-06-19 12:51:59 +00:00
|
|
|
|
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
|
|
|
|
|
AutoDelete autoDelete(result.logFile, false);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-19 12:51:59 +00:00
|
|
|
|
createDirs(dirOf(result.logFile));
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
|
|
|
|
if (!logFD) throw SysError(format("creating log file ‘%1%’") % result.logFile);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-08-26 11:43:02 +00:00
|
|
|
|
nix::Path tmpDir = createTempDir();
|
|
|
|
|
AutoDelete tmpDirDel(tmpDir, true);
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
try {
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
Child child;
|
2016-10-06 13:24:09 +00:00
|
|
|
|
openConnection(machine, tmpDir, logFD.get(), child);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-11-07 18:34:35 +00:00
|
|
|
|
{
|
|
|
|
|
auto activeStepState(activeStep->state_.lock());
|
|
|
|
|
if (activeStepState->cancelled) throw Error("step cancelled");
|
|
|
|
|
activeStepState->pid = child.pid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Finally clearPid([&]() {
|
|
|
|
|
auto activeStepState(activeStep->state_.lock());
|
|
|
|
|
activeStepState->pid = -1;
|
|
|
|
|
|
|
|
|
|
/* FIXME: there is a slight race here with step
|
|
|
|
|
cancellation in State::processQueueChange(), which
|
|
|
|
|
could call kill() on this pid after we've done waitpid()
|
|
|
|
|
on it. With pid wrap-around, there is a tiny
|
|
|
|
|
possibility that we end up killing another
|
|
|
|
|
process. Meh. */
|
|
|
|
|
});
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
FdSource from(child.from.get());
|
|
|
|
|
FdSink to(child.to.get());
|
2016-02-26 15:16:36 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
Finally updateStats([&]() {
|
|
|
|
|
bytesReceived += from.read;
|
|
|
|
|
bytesSent += to.written;
|
|
|
|
|
});
|
2015-10-06 15:35:08 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Handshake. */
|
|
|
|
|
bool sendDerivation = true;
|
|
|
|
|
unsigned int remoteVersion;
|
2015-06-17 09:45:20 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
try {
|
2016-12-06 16:46:06 +00:00
|
|
|
|
to << SERVE_MAGIC_1 << 0x203;
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
2015-07-31 01:39:20 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
unsigned int magic = readInt(from);
|
|
|
|
|
if (magic != SERVE_MAGIC_2)
|
|
|
|
|
throw Error(format("protocol mismatch with ‘nix-store --serve’ on ‘%1%’") % machine->sshName);
|
|
|
|
|
remoteVersion = readInt(from);
|
|
|
|
|
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
|
|
|
|
throw Error(format("unsupported ‘nix-store --serve’ protocol version on ‘%1%’") % machine->sshName);
|
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 1)
|
|
|
|
|
sendDerivation = false;
|
2016-12-08 15:03:02 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
|
|
|
|
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
|
|
|
|
} catch (EndOfFile & e) {
|
2017-02-03 13:39:18 +00:00
|
|
|
|
child.pid.wait();
|
2016-03-22 15:54:40 +00:00
|
|
|
|
string s = chomp(readFile(result.logFile));
|
|
|
|
|
throw Error(format("cannot connect to ‘%1%’: %2%") % machine->sshName % s);
|
|
|
|
|
}
|
2015-07-21 13:53:27 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
auto info(machine->state->connectInfo.lock());
|
2016-03-22 15:54:40 +00:00
|
|
|
|
info->consecutiveFailures = 0;
|
2015-07-21 13:53:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Gather the inputs. If the remote side is Nix <= 1.9, we have to
|
|
|
|
|
copy the entire closure of ‘drvPath’, as well as the required
|
|
|
|
|
outputs of the input derivations. On Nix > 1.9, we only need to
|
|
|
|
|
copy the immediate sources of the derivation and the required
|
|
|
|
|
outputs of the input derivations. */
|
|
|
|
|
PathSet inputs;
|
|
|
|
|
BasicDerivation basicDrv(step->drv);
|
|
|
|
|
|
|
|
|
|
if (sendDerivation)
|
|
|
|
|
inputs.insert(step->drvPath);
|
|
|
|
|
else
|
|
|
|
|
for (auto & p : step->drv.inputSrcs)
|
|
|
|
|
inputs.insert(p);
|
|
|
|
|
|
|
|
|
|
for (auto & input : step->drv.inputDrvs) {
|
|
|
|
|
Derivation drv2 = readDerivation(input.first);
|
|
|
|
|
for (auto & name : input.second) {
|
|
|
|
|
auto i = drv2.outputs.find(name);
|
|
|
|
|
if (i == drv2.outputs.end()) continue;
|
|
|
|
|
inputs.insert(i->second.path);
|
|
|
|
|
basicDrv.inputSrcs.insert(i->second.path);
|
|
|
|
|
}
|
2015-06-17 15:28:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Ensure that the inputs exist in the destination store. This is
|
|
|
|
|
a no-op for regular stores, but for the binary cache store,
|
|
|
|
|
this will copy the inputs to the binary cache from the local
|
|
|
|
|
store. */
|
2017-07-17 09:38:58 +00:00
|
|
|
|
copyClosure(ref<Store>(localStore), destStore, step->drv.inputSrcs, NoRepair, NoCheckSigs);
|
2016-02-15 20:10:29 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Copy the input closure. */
|
|
|
|
|
if (/* machine->sshName != "localhost" */ true) {
|
|
|
|
|
auto mc1 = std::make_shared<MaintainCount>(nrStepsWaiting);
|
2017-09-01 13:29:06 +00:00
|
|
|
|
std::unique_lock<std::timed_mutex> sendLock(
|
|
|
|
|
machine->state->sendLock, std::chrono::seconds(600));
|
2016-03-22 15:54:40 +00:00
|
|
|
|
mc1.reset();
|
|
|
|
|
MaintainCount mc2(nrStepsCopyingTo);
|
|
|
|
|
printMsg(lvlDebug, format("sending closure of ‘%1%’ to ‘%2%’") % step->drvPath % machine->sshName);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now1 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
copyClosureTo(destStore, from, to, inputs, true);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now2 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
autoDelete.cancel();
|
2015-06-19 12:51:59 +00:00
|
|
|
|
|
2016-10-26 11:39:43 +00:00
|
|
|
|
/* Truncate the log to get rid of messages about substitutions
|
|
|
|
|
etc. on the remote system. */
|
|
|
|
|
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
|
|
|
|
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
|
|
|
|
|
|
|
|
|
if (ftruncate(logFD.get(), 0) == -1)
|
|
|
|
|
throw SysError("truncating log file ‘%s’", result.logFile);
|
|
|
|
|
|
|
|
|
|
logFD = -1;
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Do the build. */
|
|
|
|
|
printMsg(lvlDebug, format("building ‘%1%’ on ‘%2%’") % step->drvPath % machine->sshName);
|
2015-07-20 23:45:00 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
if (sendDerivation)
|
|
|
|
|
to << cmdBuildPaths << PathSet({step->drvPath});
|
|
|
|
|
else
|
|
|
|
|
to << cmdBuildDerivation << step->drvPath << basicDrv;
|
|
|
|
|
to << maxSilentTime << buildTimeout;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
|
|
|
|
to << 64 * 1024 * 1024; // == maxLogSize
|
2016-12-07 14:57:13 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
|
|
|
to << repeats // == build-repeat
|
|
|
|
|
<< step->isDeterministic; // == enforce-determinism
|
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
2015-07-20 23:45:00 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.startTime = time(0);
|
|
|
|
|
int res;
|
|
|
|
|
{
|
|
|
|
|
MaintainCount mc(nrStepsBuilding);
|
|
|
|
|
res = readInt(from);
|
|
|
|
|
}
|
|
|
|
|
result.stopTime = time(0);
|
|
|
|
|
|
|
|
|
|
if (sendDerivation) {
|
|
|
|
|
if (res) {
|
|
|
|
|
result.errorMsg = (format("%1% on ‘%2%’") % readString(from) % machine->sshName).str();
|
|
|
|
|
if (res == 100) {
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
}
|
|
|
|
|
else if (res == 101) {
|
|
|
|
|
result.stepStatus = bsTimedOut;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
}
|
|
|
|
|
return;
|
2016-03-09 15:59:38 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
} else {
|
|
|
|
|
result.errorMsg = readString(from);
|
2016-12-07 14:57:13 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
|
|
|
result.timesBuilt = readInt(from);
|
|
|
|
|
result.isNonDeterministic = readInt(from);
|
2016-12-07 15:10:21 +00:00
|
|
|
|
auto start = readInt(from);
|
|
|
|
|
auto stop = readInt(from);
|
|
|
|
|
if (start && start) {
|
|
|
|
|
/* Note: this represents the duration of a single
|
|
|
|
|
round, rather than all rounds. */
|
|
|
|
|
result.startTime = start;
|
|
|
|
|
result.stopTime = stop;
|
|
|
|
|
}
|
2016-12-07 14:57:13 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
switch ((BuildResult::Status) res) {
|
|
|
|
|
case BuildResult::Built:
|
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::Substituted:
|
|
|
|
|
case BuildResult::AlreadyValid:
|
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
result.isCached = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::PermanentFailure:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::InputRejected:
|
|
|
|
|
case BuildResult::OutputRejected:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::TransientFailure:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::TimedOut:
|
|
|
|
|
result.stepStatus = bsTimedOut;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::MiscFailure:
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::LogLimitExceeded:
|
|
|
|
|
result.stepStatus = bsLogLimitExceeded;
|
|
|
|
|
break;
|
2016-12-06 16:46:06 +00:00
|
|
|
|
case BuildResult::NotDeterministic:
|
|
|
|
|
result.stepStatus = bsNotDeterministic;
|
|
|
|
|
result.canRetry = false;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
break;
|
2016-03-22 15:54:40 +00:00
|
|
|
|
default:
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
break;
|
2016-03-09 15:59:38 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
if (result.stepStatus != bsSuccess) return;
|
2015-07-20 23:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.errorMsg = "";
|
2016-03-09 15:59:38 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* If the path was substituted or already valid, then we didn't
|
|
|
|
|
get a build log. */
|
|
|
|
|
if (result.isCached) {
|
|
|
|
|
printMsg(lvlInfo, format("outputs of ‘%1%’ substituted or already valid on ‘%2%’") % step->drvPath % machine->sshName);
|
|
|
|
|
unlink(result.logFile.c_str());
|
|
|
|
|
result.logFile = "";
|
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Copy the output paths. */
|
|
|
|
|
if (/* machine->sshName != "localhost" */ true) {
|
|
|
|
|
MaintainCount mc(nrStepsCopyingFrom);
|
|
|
|
|
|
|
|
|
|
auto now1 = std::chrono::steady_clock::now();
|
|
|
|
|
|
|
|
|
|
PathSet outputs;
|
|
|
|
|
for (auto & output : step->drv.outputs)
|
|
|
|
|
outputs.insert(output.second.path);
|
|
|
|
|
|
|
|
|
|
/* Query the size of the output paths. */
|
|
|
|
|
size_t totalNarSize = 0;
|
|
|
|
|
to << cmdQueryPathInfos << outputs;
|
|
|
|
|
to.flush();
|
|
|
|
|
while (true) {
|
|
|
|
|
if (readString(from) == "") break;
|
|
|
|
|
readString(from); // deriver
|
|
|
|
|
readStrings<PathSet>(from); // references
|
|
|
|
|
readLongLong(from); // download size
|
|
|
|
|
totalNarSize += readLongLong(from);
|
|
|
|
|
}
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
if (totalNarSize > maxOutputSize) {
|
|
|
|
|
result.stepStatus = bsNarSizeLimitExceeded;
|
|
|
|
|
return;
|
|
|
|
|
}
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
printMsg(lvlDebug, format("copying outputs of ‘%s’ from ‘%s’ (%d bytes)")
|
|
|
|
|
% step->drvPath % machine->sshName % totalNarSize);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Block until we have the required amount of memory
|
2016-11-16 16:46:00 +00:00
|
|
|
|
available, which is twice the NAR size (namely the
|
|
|
|
|
uncompressed and worst-case compressed NAR), plus 150
|
|
|
|
|
MB for xz compression overhead. (The xz manpage claims
|
|
|
|
|
~94 MiB, but that's not was I'm seeing.) */
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resStart = std::chrono::steady_clock::now();
|
2016-11-16 16:46:00 +00:00
|
|
|
|
size_t compressionCost = totalNarSize + 150 * 1024 * 1024;
|
|
|
|
|
result.tokens = std::make_unique<nix::TokenServer::Token>(memoryTokens.get(totalNarSize + compressionCost));
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resStop = std::chrono::steady_clock::now();
|
2016-03-09 15:59:38 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resMs = std::chrono::duration_cast<std::chrono::milliseconds>(resStop - resStart).count();
|
|
|
|
|
if (resMs >= 1000)
|
|
|
|
|
printMsg(lvlError, format("warning: had to wait %d ms for %d memory tokens for %s")
|
|
|
|
|
% resMs % totalNarSize % step->drvPath);
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.accessor = destStore->getFSAccessor();
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to << cmdExportPaths << 0 << outputs;
|
|
|
|
|
to.flush();
|
2017-07-17 09:38:58 +00:00
|
|
|
|
destStore->importPaths(from, result.accessor, NoCheckSigs);
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-11-16 16:46:00 +00:00
|
|
|
|
/* Release the tokens pertaining to NAR
|
|
|
|
|
compression. After this we only have the uncompressed
|
|
|
|
|
NAR in memory. */
|
|
|
|
|
result.tokens->give_back(compressionCost);
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now2 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
|
|
|
}
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Shut down the connection. */
|
2016-10-06 13:24:09 +00:00
|
|
|
|
child.to = -1;
|
2017-02-03 13:39:18 +00:00
|
|
|
|
child.pid.wait();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
} catch (Error & e) {
|
|
|
|
|
/* Disable this machine until a certain period of time has
|
|
|
|
|
passed. This period increases on every consecutive
|
|
|
|
|
failure. However, don't count failures that occurred soon
|
|
|
|
|
after the last one (to take into account steps started in
|
|
|
|
|
parallel). */
|
|
|
|
|
auto info(machine->state->connectInfo.lock());
|
|
|
|
|
auto now = std::chrono::system_clock::now();
|
|
|
|
|
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
|
|
|
|
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
|
|
|
|
info->lastFailure = now;
|
2016-09-30 15:05:07 +00:00
|
|
|
|
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
printMsg(lvlInfo, format("will disable machine ‘%1%’ for %2%s") % machine->sshName % delta);
|
|
|
|
|
info->disabledUntil = now + std::chrono::seconds(delta);
|
|
|
|
|
}
|
|
|
|
|
throw;
|
2015-06-24 11:19:16 +00:00
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|