2015-06-09 12:21:21 +00:00
|
|
|
|
#include <algorithm>
|
2016-09-30 15:05:07 +00:00
|
|
|
|
#include <cmath>
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
|
|
|
|
|
#include "serve-protocol.hh"
|
2015-07-07 08:25:33 +00:00
|
|
|
|
#include "state.hh"
|
|
|
|
|
#include "util.hh"
|
2015-06-09 12:21:21 +00:00
|
|
|
|
#include "worker-protocol.hh"
|
2016-02-26 15:16:36 +00:00
|
|
|
|
#include "finally.hh"
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
using namespace nix;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct Child
|
|
|
|
|
{
|
|
|
|
|
Pid pid;
|
|
|
|
|
AutoCloseFD to, from;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2015-06-25 13:29:22 +00:00
|
|
|
|
static void append(Strings & dst, const Strings & src)
|
|
|
|
|
{
|
|
|
|
|
dst.insert(dst.end(), src.begin(), src.end());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-08-26 11:43:02 +00:00
|
|
|
|
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
{
|
2019-01-22 16:27:36 +00:00
|
|
|
|
string pgmName;
|
2015-06-09 12:21:21 +00:00
|
|
|
|
Pipe to, from;
|
|
|
|
|
to.create();
|
|
|
|
|
from.create();
|
|
|
|
|
|
|
|
|
|
child.pid = startProcess([&]() {
|
|
|
|
|
|
2017-04-05 09:01:57 +00:00
|
|
|
|
restoreSignals();
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
throw SysError("cannot dup input pipe to stdin");
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
throw SysError("cannot dup output pipe to stdout");
|
|
|
|
|
|
|
|
|
|
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
|
|
|
|
throw SysError("cannot dup stderr");
|
|
|
|
|
|
2015-06-25 13:29:22 +00:00
|
|
|
|
Strings argv;
|
2019-09-25 15:26:03 +00:00
|
|
|
|
if (machine->isLocalhost()) {
|
2019-01-22 16:27:36 +00:00
|
|
|
|
pgmName = "nix-store";
|
2015-06-25 13:29:22 +00:00
|
|
|
|
argv = {"nix-store", "--serve", "--write"};
|
2019-01-22 16:27:36 +00:00
|
|
|
|
}
|
2015-06-25 13:29:22 +00:00
|
|
|
|
else {
|
2019-01-22 16:27:36 +00:00
|
|
|
|
pgmName = "ssh";
|
2015-08-26 11:43:02 +00:00
|
|
|
|
argv = {"ssh", machine->sshName};
|
|
|
|
|
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
|
|
|
|
if (machine->sshPublicHostKey != "") {
|
|
|
|
|
Path fileName = tmpDir + "/host-key";
|
|
|
|
|
auto p = machine->sshName.find("@");
|
|
|
|
|
string host = p != string::npos ? string(machine->sshName, p + 1) : machine->sshName;
|
|
|
|
|
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
|
|
|
|
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
|
|
|
|
}
|
2015-06-25 13:29:22 +00:00
|
|
|
|
append(argv,
|
|
|
|
|
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
|
|
|
|
, "--", "nix-store", "--serve", "--write" });
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2019-01-22 16:27:36 +00:00
|
|
|
|
throw SysError("cannot start %s", pgmName);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
});
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
to.readSide = -1;
|
|
|
|
|
from.writeSide = -1;
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
child.to = to.writeSide.release();
|
|
|
|
|
child.from = from.readSide.release();
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2017-09-01 14:28:49 +00:00
|
|
|
|
static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
|
2019-12-30 21:49:26 +00:00
|
|
|
|
FdSource & from, FdSink & to, const StorePathSet & paths,
|
2015-06-09 12:21:21 +00:00
|
|
|
|
bool useSubstitutes = false)
|
|
|
|
|
{
|
2019-12-30 21:49:26 +00:00
|
|
|
|
StorePathSet closure;
|
2015-06-09 12:21:21 +00:00
|
|
|
|
for (auto & path : paths)
|
2016-02-15 20:10:29 +00:00
|
|
|
|
destStore->computeFSClosure(path, closure);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
|
|
|
|
/* Send the "query valid paths" command with the "lock" option
|
|
|
|
|
enabled. This prevents a race where the remote host
|
|
|
|
|
garbage-collect paths that are already there. Optionally, ask
|
|
|
|
|
the remote host to substitute missing paths. */
|
2016-02-26 20:15:05 +00:00
|
|
|
|
// FIXME: substitute output pollutes our build log
|
2019-12-30 21:49:26 +00:00
|
|
|
|
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
|
|
|
|
writeStorePaths(*destStore, to, closure);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
to.flush();
|
|
|
|
|
|
|
|
|
|
/* Get back the set of paths that are already valid on the remote
|
|
|
|
|
host. */
|
2019-12-30 21:49:26 +00:00
|
|
|
|
auto present = readStorePaths<StorePathSet>(*destStore, from);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-09 14:03:41 +00:00
|
|
|
|
if (present.size() == closure.size()) return;
|
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
auto sorted = destStore->topoSortPaths(closure);
|
2015-06-09 14:03:41 +00:00
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
StorePathSet missing;
|
2015-06-09 14:03:41 +00:00
|
|
|
|
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
2019-12-30 21:49:26 +00:00
|
|
|
|
if (!present.count(*i)) missing.insert(i->clone());
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-17 09:45:20 +00:00
|
|
|
|
printMsg(lvlDebug, format("sending %1% missing paths") % missing.size());
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2017-09-01 14:28:49 +00:00
|
|
|
|
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
|
|
|
|
std::chrono::seconds(600));
|
|
|
|
|
|
2015-07-20 23:45:00 +00:00
|
|
|
|
to << cmdImportPaths;
|
2016-05-11 16:36:04 +00:00
|
|
|
|
destStore->exportPaths(missing, to);
|
2015-06-09 14:03:41 +00:00
|
|
|
|
to.flush();
|
|
|
|
|
|
|
|
|
|
if (readInt(from) != 1)
|
|
|
|
|
throw Error("remote machine failed to import closure");
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-15 20:10:29 +00:00
|
|
|
|
void State::buildRemote(ref<Store> destStore,
|
2015-07-07 08:25:33 +00:00
|
|
|
|
Machine::ptr machine, Step::ptr step,
|
2016-12-07 14:57:13 +00:00
|
|
|
|
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
2017-12-07 14:35:31 +00:00
|
|
|
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
|
|
|
|
std::function<void(StepState)> updateStep)
|
2015-06-09 12:21:21 +00:00
|
|
|
|
{
|
2016-10-26 13:09:16 +00:00
|
|
|
|
assert(BuildResult::TimedOut == 8);
|
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
string base(step->drvPath.to_string());
|
2015-06-19 12:51:59 +00:00
|
|
|
|
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
|
|
|
|
|
AutoDelete autoDelete(result.logFile, false);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-06-19 12:51:59 +00:00
|
|
|
|
createDirs(dirOf(result.logFile));
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
|
|
|
|
if (!logFD) throw SysError(format("creating log file ‘%1%’") % result.logFile);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2015-08-26 11:43:02 +00:00
|
|
|
|
nix::Path tmpDir = createTempDir();
|
|
|
|
|
AutoDelete tmpDirDel(tmpDir, true);
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
try {
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2017-12-07 14:35:31 +00:00
|
|
|
|
updateStep(ssConnecting);
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
Child child;
|
2016-10-06 13:24:09 +00:00
|
|
|
|
openConnection(machine, tmpDir, logFD.get(), child);
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-11-07 18:34:35 +00:00
|
|
|
|
{
|
|
|
|
|
auto activeStepState(activeStep->state_.lock());
|
|
|
|
|
if (activeStepState->cancelled) throw Error("step cancelled");
|
|
|
|
|
activeStepState->pid = child.pid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Finally clearPid([&]() {
|
|
|
|
|
auto activeStepState(activeStep->state_.lock());
|
|
|
|
|
activeStepState->pid = -1;
|
|
|
|
|
|
|
|
|
|
/* FIXME: there is a slight race here with step
|
|
|
|
|
cancellation in State::processQueueChange(), which
|
|
|
|
|
could call kill() on this pid after we've done waitpid()
|
|
|
|
|
on it. With pid wrap-around, there is a tiny
|
|
|
|
|
possibility that we end up killing another
|
|
|
|
|
process. Meh. */
|
|
|
|
|
});
|
|
|
|
|
|
2016-10-06 13:24:09 +00:00
|
|
|
|
FdSource from(child.from.get());
|
|
|
|
|
FdSink to(child.to.get());
|
2016-02-26 15:16:36 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
Finally updateStats([&]() {
|
|
|
|
|
bytesReceived += from.read;
|
|
|
|
|
bytesSent += to.written;
|
|
|
|
|
});
|
2015-10-06 15:35:08 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Handshake. */
|
|
|
|
|
bool sendDerivation = true;
|
|
|
|
|
unsigned int remoteVersion;
|
2015-06-17 09:45:20 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
try {
|
2016-12-06 16:46:06 +00:00
|
|
|
|
to << SERVE_MAGIC_1 << 0x203;
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
2015-07-31 01:39:20 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
unsigned int magic = readInt(from);
|
|
|
|
|
if (magic != SERVE_MAGIC_2)
|
|
|
|
|
throw Error(format("protocol mismatch with ‘nix-store --serve’ on ‘%1%’") % machine->sshName);
|
|
|
|
|
remoteVersion = readInt(from);
|
|
|
|
|
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
|
|
|
|
throw Error(format("unsupported ‘nix-store --serve’ protocol version on ‘%1%’") % machine->sshName);
|
2019-09-25 15:26:03 +00:00
|
|
|
|
// Always send the derivation to localhost, since it's a
|
|
|
|
|
// no-op anyway but we might not be privileged to use
|
|
|
|
|
// cmdBuildDerivation (e.g. if we're running in a NixOS
|
|
|
|
|
// container).
|
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 1 && !machine->isLocalhost())
|
2016-03-22 15:54:40 +00:00
|
|
|
|
sendDerivation = false;
|
2016-12-08 15:03:02 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
|
|
|
|
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
|
|
|
|
} catch (EndOfFile & e) {
|
2017-02-03 13:39:18 +00:00
|
|
|
|
child.pid.wait();
|
2016-03-22 15:54:40 +00:00
|
|
|
|
string s = chomp(readFile(result.logFile));
|
|
|
|
|
throw Error(format("cannot connect to ‘%1%’: %2%") % machine->sshName % s);
|
|
|
|
|
}
|
2015-07-21 13:53:27 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
auto info(machine->state->connectInfo.lock());
|
2016-03-22 15:54:40 +00:00
|
|
|
|
info->consecutiveFailures = 0;
|
2015-07-21 13:53:27 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Gather the inputs. If the remote side is Nix <= 1.9, we have to
|
|
|
|
|
copy the entire closure of ‘drvPath’, as well as the required
|
|
|
|
|
outputs of the input derivations. On Nix > 1.9, we only need to
|
|
|
|
|
copy the immediate sources of the derivation and the required
|
|
|
|
|
outputs of the input derivations. */
|
2017-12-07 14:35:31 +00:00
|
|
|
|
updateStep(ssSendingInputs);
|
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
StorePathSet inputs;
|
|
|
|
|
BasicDerivation basicDrv(*step->drv);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
|
|
|
|
if (sendDerivation)
|
2019-12-30 21:49:26 +00:00
|
|
|
|
inputs.insert(step->drvPath.clone());
|
2016-03-22 15:54:40 +00:00
|
|
|
|
else
|
2019-12-30 21:49:26 +00:00
|
|
|
|
for (auto & p : step->drv->inputSrcs)
|
|
|
|
|
inputs.insert(p.clone());
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
for (auto & input : step->drv->inputDrvs) {
|
|
|
|
|
Derivation drv2 = readDerivation(*localStore, localStore->printStorePath(input.first));
|
2016-03-22 15:54:40 +00:00
|
|
|
|
for (auto & name : input.second) {
|
|
|
|
|
auto i = drv2.outputs.find(name);
|
|
|
|
|
if (i == drv2.outputs.end()) continue;
|
2019-12-30 21:49:26 +00:00
|
|
|
|
inputs.insert(i->second.path.clone());
|
|
|
|
|
basicDrv.inputSrcs.insert(i->second.path.clone());
|
2016-03-22 15:54:40 +00:00
|
|
|
|
}
|
2015-06-17 15:28:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Ensure that the inputs exist in the destination store. This is
|
|
|
|
|
a no-op for regular stores, but for the binary cache store,
|
|
|
|
|
this will copy the inputs to the binary cache from the local
|
|
|
|
|
store. */
|
2019-09-25 15:26:03 +00:00
|
|
|
|
if (localStore != std::shared_ptr<Store>(destStore))
|
2019-12-30 21:49:26 +00:00
|
|
|
|
copyClosure(ref<Store>(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs);
|
2016-02-15 20:10:29 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Copy the input closure. */
|
2019-09-25 15:26:03 +00:00
|
|
|
|
if (!machine->isLocalhost()) {
|
2017-09-14 15:22:48 +00:00
|
|
|
|
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
mc1.reset();
|
2017-09-14 15:22:48 +00:00
|
|
|
|
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
2019-12-30 21:49:26 +00:00
|
|
|
|
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
|
|
|
|
localStore->printStorePath(step->drvPath), machine->sshName);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now1 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2017-09-01 14:28:49 +00:00
|
|
|
|
copyClosureTo(machine->state->sendLock, destStore, from, to, inputs, true);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now2 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
autoDelete.cancel();
|
2015-06-19 12:51:59 +00:00
|
|
|
|
|
2016-10-26 11:39:43 +00:00
|
|
|
|
/* Truncate the log to get rid of messages about substitutions
|
|
|
|
|
etc. on the remote system. */
|
|
|
|
|
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
|
|
|
|
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
|
|
|
|
|
|
|
|
|
if (ftruncate(logFD.get(), 0) == -1)
|
|
|
|
|
throw SysError("truncating log file ‘%s’", result.logFile);
|
|
|
|
|
|
|
|
|
|
logFD = -1;
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Do the build. */
|
2019-12-30 21:49:26 +00:00
|
|
|
|
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
|
|
|
|
localStore->printStorePath(step->drvPath),
|
|
|
|
|
machine->sshName);
|
2015-07-20 23:45:00 +00:00
|
|
|
|
|
2017-12-07 14:35:31 +00:00
|
|
|
|
updateStep(ssBuilding);
|
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
if (sendDerivation) {
|
|
|
|
|
to << cmdBuildPaths;
|
|
|
|
|
writeStorePaths(*localStore, to, singleton(step->drvPath));
|
|
|
|
|
} else {
|
|
|
|
|
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
|
|
|
|
writeDerivation(to, *localStore, basicDrv);
|
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to << maxSilentTime << buildTimeout;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
2017-09-22 13:23:58 +00:00
|
|
|
|
to << maxLogSize;
|
2016-12-07 14:57:13 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
|
|
|
to << repeats // == build-repeat
|
|
|
|
|
<< step->isDeterministic; // == enforce-determinism
|
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
2015-07-20 23:45:00 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.startTime = time(0);
|
|
|
|
|
int res;
|
|
|
|
|
{
|
2017-09-14 15:22:48 +00:00
|
|
|
|
MaintainCount<counter> mc(nrStepsBuilding);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
res = readInt(from);
|
|
|
|
|
}
|
|
|
|
|
result.stopTime = time(0);
|
|
|
|
|
|
|
|
|
|
if (sendDerivation) {
|
|
|
|
|
if (res) {
|
|
|
|
|
result.errorMsg = (format("%1% on ‘%2%’") % readString(from) % machine->sshName).str();
|
|
|
|
|
if (res == 100) {
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
}
|
|
|
|
|
else if (res == 101) {
|
|
|
|
|
result.stepStatus = bsTimedOut;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
}
|
|
|
|
|
return;
|
2016-03-09 15:59:38 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
} else {
|
|
|
|
|
result.errorMsg = readString(from);
|
2016-12-07 14:57:13 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
|
|
|
result.timesBuilt = readInt(from);
|
|
|
|
|
result.isNonDeterministic = readInt(from);
|
2016-12-07 15:10:21 +00:00
|
|
|
|
auto start = readInt(from);
|
|
|
|
|
auto stop = readInt(from);
|
|
|
|
|
if (start && start) {
|
|
|
|
|
/* Note: this represents the duration of a single
|
|
|
|
|
round, rather than all rounds. */
|
|
|
|
|
result.startTime = start;
|
|
|
|
|
result.stopTime = stop;
|
|
|
|
|
}
|
2016-12-07 14:57:13 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
switch ((BuildResult::Status) res) {
|
|
|
|
|
case BuildResult::Built:
|
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::Substituted:
|
|
|
|
|
case BuildResult::AlreadyValid:
|
|
|
|
|
result.stepStatus = bsSuccess;
|
|
|
|
|
result.isCached = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::PermanentFailure:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::InputRejected:
|
|
|
|
|
case BuildResult::OutputRejected:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::TransientFailure:
|
|
|
|
|
result.stepStatus = bsFailed;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::TimedOut:
|
|
|
|
|
result.stepStatus = bsTimedOut;
|
|
|
|
|
result.errorMsg = "";
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::MiscFailure:
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
result.canRetry = true;
|
|
|
|
|
break;
|
|
|
|
|
case BuildResult::LogLimitExceeded:
|
|
|
|
|
result.stepStatus = bsLogLimitExceeded;
|
|
|
|
|
break;
|
2016-12-06 16:46:06 +00:00
|
|
|
|
case BuildResult::NotDeterministic:
|
|
|
|
|
result.stepStatus = bsNotDeterministic;
|
|
|
|
|
result.canRetry = false;
|
|
|
|
|
result.canCache = true;
|
|
|
|
|
break;
|
2016-03-22 15:54:40 +00:00
|
|
|
|
default:
|
|
|
|
|
result.stepStatus = bsAborted;
|
|
|
|
|
break;
|
2016-03-09 15:59:38 +00:00
|
|
|
|
}
|
2016-03-22 15:54:40 +00:00
|
|
|
|
if (result.stepStatus != bsSuccess) return;
|
2015-07-20 23:45:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.errorMsg = "";
|
2016-03-09 15:59:38 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* If the path was substituted or already valid, then we didn't
|
|
|
|
|
get a build log. */
|
|
|
|
|
if (result.isCached) {
|
2019-12-30 21:49:26 +00:00
|
|
|
|
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
|
|
|
|
localStore->printStorePath(step->drvPath), machine->sshName);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
unlink(result.logFile.c_str());
|
|
|
|
|
result.logFile = "";
|
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Copy the output paths. */
|
2019-09-25 15:26:03 +00:00
|
|
|
|
result.accessor = destStore->getFSAccessor();
|
|
|
|
|
|
|
|
|
|
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
2017-12-07 14:35:31 +00:00
|
|
|
|
updateStep(ssReceivingOutputs);
|
|
|
|
|
|
2017-09-14 15:22:48 +00:00
|
|
|
|
MaintainCount<counter> mc(nrStepsCopyingFrom);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
|
|
|
|
auto now1 = std::chrono::steady_clock::now();
|
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
auto outputs = step->drv->outputPaths();
|
2016-03-22 15:54:40 +00:00
|
|
|
|
|
|
|
|
|
/* Query the size of the output paths. */
|
|
|
|
|
size_t totalNarSize = 0;
|
2019-12-30 21:49:26 +00:00
|
|
|
|
to << cmdQueryPathInfos;
|
|
|
|
|
writeStorePaths(*localStore, to, outputs);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
|
|
|
|
while (true) {
|
|
|
|
|
if (readString(from) == "") break;
|
|
|
|
|
readString(from); // deriver
|
|
|
|
|
readStrings<PathSet>(from); // references
|
|
|
|
|
readLongLong(from); // download size
|
|
|
|
|
totalNarSize += readLongLong(from);
|
|
|
|
|
}
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
if (totalNarSize > maxOutputSize) {
|
|
|
|
|
result.stepStatus = bsNarSizeLimitExceeded;
|
|
|
|
|
return;
|
|
|
|
|
}
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
|
|
|
|
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Block until we have the required amount of memory
|
2016-11-16 16:46:00 +00:00
|
|
|
|
available, which is twice the NAR size (namely the
|
|
|
|
|
uncompressed and worst-case compressed NAR), plus 150
|
|
|
|
|
MB for xz compression overhead. (The xz manpage claims
|
|
|
|
|
~94 MiB, but that's not was I'm seeing.) */
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resStart = std::chrono::steady_clock::now();
|
2016-11-16 16:46:00 +00:00
|
|
|
|
size_t compressionCost = totalNarSize + 150 * 1024 * 1024;
|
|
|
|
|
result.tokens = std::make_unique<nix::TokenServer::Token>(memoryTokens.get(totalNarSize + compressionCost));
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resStop = std::chrono::steady_clock::now();
|
2016-03-09 15:59:38 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto resMs = std::chrono::duration_cast<std::chrono::milliseconds>(resStop - resStart).count();
|
|
|
|
|
if (resMs >= 1000)
|
2019-12-30 21:49:26 +00:00
|
|
|
|
printMsg(lvlError, "warning: had to wait %d ms for %d memory tokens for %s",
|
|
|
|
|
resMs, totalNarSize, localStore->printStorePath(step->drvPath));
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2019-12-30 21:49:26 +00:00
|
|
|
|
to << cmdExportPaths << 0;
|
|
|
|
|
writeStorePaths(*localStore, to, outputs);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
to.flush();
|
2017-07-17 09:38:58 +00:00
|
|
|
|
destStore->importPaths(from, result.accessor, NoCheckSigs);
|
2016-03-09 13:30:13 +00:00
|
|
|
|
|
2016-11-16 16:46:00 +00:00
|
|
|
|
/* Release the tokens pertaining to NAR
|
|
|
|
|
compression. After this we only have the uncompressed
|
|
|
|
|
NAR in memory. */
|
|
|
|
|
result.tokens->give_back(compressionCost);
|
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
auto now2 = std::chrono::steady_clock::now();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
|
|
|
}
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
/* Shut down the connection. */
|
2016-10-06 13:24:09 +00:00
|
|
|
|
child.to = -1;
|
2017-02-03 13:39:18 +00:00
|
|
|
|
child.pid.wait();
|
2016-02-17 09:28:42 +00:00
|
|
|
|
|
2016-03-22 15:54:40 +00:00
|
|
|
|
} catch (Error & e) {
|
|
|
|
|
/* Disable this machine until a certain period of time has
|
|
|
|
|
passed. This period increases on every consecutive
|
|
|
|
|
failure. However, don't count failures that occurred soon
|
|
|
|
|
after the last one (to take into account steps started in
|
|
|
|
|
parallel). */
|
|
|
|
|
auto info(machine->state->connectInfo.lock());
|
|
|
|
|
auto now = std::chrono::system_clock::now();
|
|
|
|
|
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
|
|
|
|
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
|
|
|
|
info->lastFailure = now;
|
2016-09-30 15:05:07 +00:00
|
|
|
|
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
2016-03-22 15:54:40 +00:00
|
|
|
|
printMsg(lvlInfo, format("will disable machine ‘%1%’ for %2%s") % machine->sshName % delta);
|
|
|
|
|
info->disabledUntil = now + std::chrono::seconds(delta);
|
|
|
|
|
}
|
|
|
|
|
throw;
|
2015-06-24 11:19:16 +00:00
|
|
|
|
}
|
2015-06-09 12:21:21 +00:00
|
|
|
|
}
|