lix/src/libstore/remote-store.cc

1133 lines
36 KiB
C++
Raw Normal View History

#include "serialise.hh"
#include "util.hh"
#include "path-with-outputs.hh"
#include "gc-store.hh"
#include "remote-fs-accessor.hh"
#include "build-result.hh"
2006-11-30 18:35:50 +00:00
#include "remote-store.hh"
#include "worker-protocol.hh"
2006-11-30 20:45:20 +00:00
#include "archive.hh"
#include "globals.hh"
#include "derivations.hh"
#include "pool.hh"
#include "finally.hh"
2020-07-02 15:04:31 +00:00
#include "logging.hh"
#include "callback.hh"
#include "filetransfer.hh"
#include <nlohmann/json.hpp>
2006-11-30 18:35:50 +00:00
namespace nix {
/* TODO: Separate these store impls into different files, give them better names */
RemoteStore::RemoteStore(const Params & params)
: RemoteStoreConfig(params)
, Store(params)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() {
auto conn = openConnectionWrapper();
try {
initConnection(*conn);
} catch (...) {
failed = true;
throw;
}
return conn;
},
[this](const ref<Connection> & r) {
return
r->to.good()
&& r->from.good()
&& std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::steady_clock::now() - r->startTime).count() < maxConnectionAge;
}
))
2006-12-04 13:28:14 +00:00
{
}
2017-03-03 18:35:34 +00:00
ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
{
if (failed)
throw Error("opening a connection to remote store '%s' previously failed", getUri());
2017-03-03 18:35:34 +00:00
try {
return openConnection();
} catch (...) {
failed = true;
throw;
}
}
void RemoteStore::initConnection(Connection & conn)
{
/* Send the magic greeting, check for the reply. */
try {
conn.to << WORKER_MAGIC_1;
conn.to.flush();
StringSink saved;
try {
TeeSource tee(conn.from, saved);
unsigned int magic = readInt(tee);
if (magic != WORKER_MAGIC_2)
throw Error("protocol mismatch");
} catch (SerialisationError & e) {
/* In case the other side is waiting for our input, close
it. */
conn.closeWrite();
auto msg = conn.from.drain();
throw Error("protocol mismatch, got '%s'", chomp(saved.s + msg));
}
conn.from >> conn.daemonVersion;
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
throw Error("Nix daemon protocol version not supported");
if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
throw Error("the Nix daemon version is too old");
conn.to << PROTOCOL_VERSION;
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
// Obsolete CPU affinity.
conn.to << 0;
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
conn.to << false; // obsolete reserveSpace
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 33) {
conn.to.flush();
conn.daemonNixVersion = readString(conn.from);
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
conn.remoteTrustsUs = WorkerProto<std::optional<TrustedFlag>>::read(*this, conn.from);
} else {
// We don't know the answer; protocol to old.
conn.remoteTrustsUs = std::nullopt;
}
auto ex = conn.processStderr();
if (ex) std::rethrow_exception(ex);
}
catch (Error & e) {
throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
}
setOptions(conn);
2006-11-30 18:35:50 +00:00
}
void RemoteStore::setOptions(Connection & conn)
{
conn.to << wopSetOptions
2015-07-19 23:16:16 +00:00
<< settings.keepFailed
<< settings.keepGoing
<< settings.tryFallback
<< verbosity
2015-07-19 23:16:16 +00:00
<< settings.maxBuildJobs
<< settings.maxSilentTime
<< true
<< (settings.verboseBuild ? lvlError : lvlVomit)
<< 0 // obsolete log type
<< 0 /* obsolete print build trace */
<< settings.buildCores
<< settings.useSubstitutes;
2012-07-30 21:13:25 +00:00
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
std::map<std::string, Config::SettingInfo> overrides;
settings.getSettings(overrides, true); // libstore settings
fileTransferSettings.getSettings(overrides, true);
overrides.erase(settings.keepFailed.name);
overrides.erase(settings.keepGoing.name);
overrides.erase(settings.tryFallback.name);
overrides.erase(settings.maxBuildJobs.name);
overrides.erase(settings.maxSilentTime.name);
overrides.erase(settings.buildCores.name);
overrides.erase(settings.useSubstitutes.name);
2020-07-02 15:04:31 +00:00
overrides.erase(loggerSettings.showTrace.name);
overrides.erase(experimentalFeatureSettings.experimentalFeatures.name);
overrides.erase(settings.pluginFiles.name);
conn.to << overrides.size();
2015-07-19 23:16:16 +00:00
for (auto & i : overrides)
conn.to << i.first << i.second.value;
2012-07-31 22:19:44 +00:00
}
auto ex = conn.processStderr();
if (ex) std::rethrow_exception(ex);
}
/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
the connection as bad (causing it to be closed) if a non-daemon
exception is thrown before the handle is closed. Such an exception
causes a deviation from the expected protocol and therefore a
desynchronization between the client and daemon. */
struct ConnectionHandle
{
Pool<RemoteStore::Connection>::Handle handle;
bool daemonException = false;
ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
: handle(std::move(handle))
{ }
ConnectionHandle(ConnectionHandle && h)
: handle(std::move(h.handle))
{ }
~ConnectionHandle()
{
if (!daemonException && std::uncaught_exceptions()) {
handle.markBad();
debug("closing daemon connection because of an exception");
}
}
RemoteStore::Connection * operator -> () { return &*handle; }
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
{
auto ex = handle->processStderr(sink, source, flush);
if (ex) {
daemonException = true;
std::rethrow_exception(ex);
}
}
void withFramedSink(std::function<void(Sink & sink)> fun);
};
ConnectionHandle RemoteStore::getConnection()
{
return ConnectionHandle(connections->get());
}
void RemoteStore::setOptions()
{
setOptions(*(getConnection().handle));
}
bool RemoteStore::isValidPathUncached(const StorePath & path)
2006-11-30 18:35:50 +00:00
{
auto conn(getConnection());
conn->to << wopIsValidPath << printStorePath(path);
conn.processStderr();
return readInt(conn->from);
2006-11-30 18:35:50 +00:00
}
StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
2015-07-17 17:24:28 +00:00
for (auto & i : paths)
if (isValidPath(i)) res.insert(i);
return res;
} else {
conn->to << wopQueryValidPaths;
workerProtoWrite(*this, conn->to, paths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
conn->to << (settings.buildersUseSubstitutes ? 1 : 0);
}
conn.processStderr();
return WorkerProto<StorePathSet>::read(*this, conn->from);
}
}
StorePathSet RemoteStore::queryAllValidPaths()
{
auto conn(getConnection());
conn->to << wopQueryAllValidPaths;
conn.processStderr();
return WorkerProto<StorePathSet>::read(*this, conn->from);
}
StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
2015-07-17 17:24:28 +00:00
for (auto & i : paths) {
conn->to << wopHasSubstitutes << printStorePath(i);
conn.processStderr();
if (readInt(conn->from)) res.insert(i);
}
return res;
} else {
conn->to << wopQuerySubstitutablePaths;
workerProtoWrite(*this, conn->to, paths);
conn.processStderr();
return WorkerProto<StorePathSet>::read(*this, conn->from);
}
2006-11-30 18:35:50 +00:00
}
void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, SubstitutablePathInfos & infos)
download-from-binary-cache: parallelise fetching of NAR info files Getting substitute information using the binary cache substituter has non-trivial latency overhead. A package or NixOS system configuration can have hundreds of dependencies, and in the worst case (when the local info cache is empty) we have to do a separate HTTP request for each of these. If the ping time to the server is t, getting N info files will take tN seconds; e.g., with a ping time of 0.1s to nixos.org, sequentially downloading 1000 info files (a typical NixOS config) will take at least 100 seconds. To fix this problem, the binary cache substituter can now perform requests in parallel. This required changing the substituter interface to support a function querySubstitutablePathInfos() that queries multiple paths at the same time, and rewriting queryMissing() to take advantage of parallelism. (Due to local caching, parallelising queryMissing() is sufficient for most use cases, since it's almost always called before building a derivation and thus fills the local info cache.) For example, parallelism speeds up querying all 1056 paths in a particular NixOS system configuration from 116s to 2.6s. It works so well because the eccentricity of the top-level derivation in the dependency graph is only 9. So we only need 10 round-trips (when using an unlimited number of parallel connections) to get everything. Currently we do a maximum of 150 parallel connections to the server. Thus it's important that the binary cache server (e.g. nixos.org) has a high connection limit. Alternatively we could use HTTP pipelining, but WWW::Curl doesn't support it and libcurl has a hard-coded limit of 5 requests per pipeline.
2012-07-06 23:08:20 +00:00
{
if (pathsMap.empty()) return;
auto conn(getConnection());
2012-07-30 21:13:25 +00:00
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
2012-07-30 21:13:25 +00:00
for (auto & i : pathsMap) {
SubstitutablePathInfo info;
conn->to << wopQuerySubstitutablePathInfo << printStorePath(i.first);
conn.processStderr();
unsigned int reply = readInt(conn->from);
if (reply == 0) continue;
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
}
2012-07-30 21:13:25 +00:00
} else {
2012-07-30 21:13:25 +00:00
conn->to << wopQuerySubstitutablePathInfos;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
workerProtoWrite(*this, conn->to, paths);
} else
workerProtoWrite(*this, conn->to, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
SubstitutablePathInfo & info(infos[parseStorePath(readString(conn->from))]);
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
2012-07-30 21:13:25 +00:00
download-from-binary-cache: parallelise fetching of NAR info files Getting substitute information using the binary cache substituter has non-trivial latency overhead. A package or NixOS system configuration can have hundreds of dependencies, and in the worst case (when the local info cache is empty) we have to do a separate HTTP request for each of these. If the ping time to the server is t, getting N info files will take tN seconds; e.g., with a ping time of 0.1s to nixos.org, sequentially downloading 1000 info files (a typical NixOS config) will take at least 100 seconds. To fix this problem, the binary cache substituter can now perform requests in parallel. This required changing the substituter interface to support a function querySubstitutablePathInfos() that queries multiple paths at the same time, and rewriting queryMissing() to take advantage of parallelism. (Due to local caching, parallelising queryMissing() is sufficient for most use cases, since it's almost always called before building a derivation and thus fills the local info cache.) For example, parallelism speeds up querying all 1056 paths in a particular NixOS system configuration from 116s to 2.6s. It works so well because the eccentricity of the top-level derivation in the dependency graph is only 9. So we only need 10 round-trips (when using an unlimited number of parallel connections) to get everything. Currently we do a maximum of 150 parallel connections to the server. Thus it's important that the binary cache server (e.g. nixos.org) has a high connection limit. Alternatively we could use HTTP pipelining, but WWW::Curl doesn't support it and libcurl has a hard-coded limit of 5 requests per pipeline.
2012-07-06 23:08:20 +00:00
}
}
void RemoteStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
2018-03-27 20:16:01 +00:00
try {
std::shared_ptr<const ValidPathInfo> info;
{
auto conn(getConnection());
conn->to << wopQueryPathInfo << printStorePath(path);
try {
conn.processStderr();
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
bool valid; conn->from >> valid;
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
}
info = std::make_shared<ValidPathInfo>(
ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion), StorePath{path}));
}
2018-03-27 20:16:01 +00:00
callback(std::move(info));
} catch (...) { callback.rethrow(); }
}
void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet & referrers)
2006-11-30 18:35:50 +00:00
{
auto conn(getConnection());
conn->to << wopQueryReferrers << printStorePath(path);
conn.processStderr();
for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
referrers.insert(i);
2006-11-30 18:35:50 +00:00
}
StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopQueryValidDerivers << printStorePath(path);
conn.processStderr();
return WorkerProto<StorePathSet>::read(*this, conn->from);
}
StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
return Store::queryDerivationOutputs(path);
}
auto conn(getConnection());
conn->to << wopQueryDerivationOutputs << printStorePath(path);
conn.processStderr();
return WorkerProto<StorePathSet>::read(*this, conn->from);
}
std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivationOutputMap(const StorePath & path)
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
auto conn(getConnection());
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
} else {
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
// under-approximation as it only returns the paths that can be inferred
// from the derivation itself (and not the ones that are known because
// the have been built), but as old stores don't handle floating-CA
// derivations this shouldn't matter
auto derivation = readDerivation(path);
auto outputsWithOptPaths = derivation.outputsAndOptPaths(*this);
std::map<std::string, std::optional<StorePath>> ret;
for (auto & [outputName, outputAndPath] : outputsWithOptPaths) {
ret.emplace(outputName, outputAndPath.second);
}
return ret;
}
}
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
{
auto conn(getConnection());
conn->to << wopQueryPathFromHashPart << hashPart;
conn.processStderr();
Path path = readString(conn->from);
if (path.empty()) return {};
return parseStorePath(path);
}
2020-09-22 09:40:19 +00:00
ref<const ValidPathInfo> RemoteStore::addCAToStore(
Source & dump,
std::string_view name,
2020-09-22 09:40:19 +00:00
ContentAddressMethod caMethod,
HashType hashType,
2020-09-22 09:40:19 +00:00
const StorePathSet & references,
RepairFlag repair)
2020-09-17 17:27:11 +00:00
{
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
2020-09-17 17:27:11 +00:00
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
conn->to
<< wopAddToStore
<< name
<< caMethod.render(hashType);
workerProtoWrite(*this, conn->to, references);
2020-09-18 08:06:34 +00:00
conn->to << repair;
2020-09-17 17:27:11 +00:00
// The dump source may invoke the store, so we need to make some room.
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
conn.withFramedSink([&](Sink & sink) {
dump.drainInto(sink);
});
}
2020-09-17 17:27:11 +00:00
return make_ref<ValidPathInfo>(
ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion)));
2020-09-17 17:27:11 +00:00
}
else {
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
[&](const TextIngestionMethod & thm) -> void {
if (hashType != htSHA256)
2023-05-09 17:24:53 +00:00
throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
name, printHashType(hashType));
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
workerProtoWrite(*this, conn->to, references);
conn.processStderr();
},
[&](const FileIngestionMethod & fim) -> void {
conn->to
<< wopAddToStore
<< name
<< ((hashType == htSHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (fim == FileIngestionMethod::Recursive ? 1 : 0)
<< printHashType(hashType);
2020-09-17 17:27:11 +00:00
try {
conn->to.written = 0;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
if (fim == FileIngestionMethod::Recursive) {
dump.drainInto(conn->to);
} else {
std::string contents = dump.drain();
dumpString(contents, conn->to);
}
}
2020-09-17 17:27:11 +00:00
conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
or I/O error. */
if (e.errNo == EPIPE)
try {
conn.processStderr();
} catch (EndOfFile & e) { }
throw;
}
2020-09-17 17:27:11 +00:00
}
}, caMethod.raw);
auto path = parseStorePath(readString(conn->from));
// Release our connection to prevent a deadlock in queryPathInfo().
conn_.reset();
return queryPathInfo(path);
2020-09-17 17:27:11 +00:00
}
}
2020-09-22 09:40:19 +00:00
StorePath RemoteStore::addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
{
return addCAToStore(dump, name, method, hashType, references, repair)->path;
}
2020-09-17 17:27:11 +00:00
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
conn->to << wopImportPaths;
auto source2 = sinkToSource([&](Sink & sink) {
sink << 1 // == path follows
;
copyNAR(source, sink);
sink
<< exportMagic
<< printStorePath(info.path);
workerProtoWrite(*this, sink, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
<< 0 // == no path follows
;
});
conn.processStderr(0, source2.get());
auto importedPaths = WorkerProto<StorePathSet>::read(*this, conn->from);
assert(importedPaths.size() <= 1);
}
else {
conn->to << wopAddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
workerProtoWrite(*this, conn->to, info.references);
conn->to << info.registrationTime << info.narSize
2020-06-02 00:37:43 +00:00
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
conn.withFramedSink([&](Sink & sink) {
copyNAR(source, sink);
});
} else if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21) {
conn.processStderr(0, &source);
} else {
copyNAR(source, conn->to);
conn.processStderr(0, nullptr);
}
}
}
2022-06-08 12:03:46 +00:00
void RemoteStore::addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
auto source = sinkToSource([&](Sink & sink) {
sink << pathsToCopy.size();
for (auto & [pathInfo, pathSource] : pathsToCopy) {
pathInfo.write(sink, *this, 16);
pathSource->drainInto(sink);
}
});
addMultipleToStore(*source, repair, checkSigs);
}
void RemoteStore::addMultipleToStore(
Source & source,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
auto conn(getConnection());
conn->to
<< wopAddMultipleToStore
<< repair
<< !checkSigs;
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
});
} else
Store::addMultipleToStore(source, repair, checkSigs);
}
StorePath RemoteStore::addTextToStore(
std::string_view name,
std::string_view s,
const StorePathSet & references,
RepairFlag repair)
2006-11-30 18:35:50 +00:00
{
StringSource source(s);
return addCAToStore(source, name, TextIngestionMethod {}, htSHA256, references, repair)->path;
2006-11-30 18:35:50 +00:00
}
void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
conn->to << wopRegisterDrvOutput;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
workerProtoWrite(*this, conn->to, info);
}
conn.processStderr();
}
void RemoteStore::queryRealisationUncached(const DrvOutput & id,
Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
try {
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
return callback(nullptr);
}
conn->to << wopQueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
auto outPaths = WorkerProto<std::set<StorePath>>::read(
*this, conn->from);
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
auto realisations = WorkerProto<std::set<Realisation>>::read(
*this, conn->from);
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
}
}();
callback(std::shared_ptr<const Realisation>(real));
} catch (...) { return callback.rethrow(); }
}
2021-04-05 13:48:18 +00:00
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
{
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
workerProtoWrite(store, conn->to, reqs);
} else {
Strings ss;
for (auto & p : reqs) {
2021-04-05 13:48:18 +00:00
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
[&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(store));
},
[&](const StorePath & drvPath) {
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
store.printStorePath(drvPath),
GET_PROTOCOL_MAJOR(conn->daemonVersion),
GET_PROTOCOL_MINOR(conn->daemonVersion));
},
}, sOrDrvPath);
}
conn->to << ss;
}
}
2006-11-30 18:35:50 +00:00
void RemoteStore::copyDrvsFromEvalStore(
const std::vector<DerivedPath> & paths,
std::shared_ptr<Store> evalStore)
2006-11-30 18:35:50 +00:00
{
if (evalStore && evalStore.get() != this) {
/* The remote doesn't have a way to access evalStore, so copy
the .drvs. */
RealisedPath::Set drvPaths2;
for (auto & i : paths)
if (auto p = std::get_if<DerivedPath::Built>(&i))
drvPaths2.insert(p->drvPath);
copyClosure(*evalStore, *this, drvPaths2);
}
}
void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
copyDrvsFromEvalStore(drvPaths, evalStore);
auto conn(getConnection());
conn->to << wopBuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
2021-04-05 13:48:18 +00:00
writeDerivedPaths(*this, conn, drvPaths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
conn->to << buildMode;
else
/* Old daemons did not take a 'buildMode' parameter, so we
need to validate it here on the client side. */
if (buildMode != bmNormal)
throw Error("repairing or checking is not supported when building through the Nix daemon");
conn.processStderr();
readInt(conn->from);
2006-11-30 18:35:50 +00:00
}
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 01:26:07 +00:00
std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode,
std::shared_ptr<Store> evalStore)
{
copyDrvsFromEvalStore(paths, evalStore);
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
conn->to << wopBuildPathsWithResults;
writeDerivedPaths(*this, conn, paths);
conn->to << buildMode;
conn.processStderr();
return WorkerProto<std::vector<KeyedBuildResult>>::read(*this, conn->from);
} else {
// Avoid deadlock.
conn_.reset();
// Note: this throws an exception if a build/substitution
// fails, but meh.
buildPaths(paths, buildMode, evalStore);
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 01:26:07 +00:00
std::vector<KeyedBuildResult> results;
for (auto & path : paths) {
std::visit(
overloaded {
[&](const DerivedPath::Opaque & bo) {
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 01:26:07 +00:00
results.push_back(KeyedBuildResult {
{
.status = BuildResult::Substituted,
},
/* .path = */ bo,
2022-03-09 11:25:35 +00:00
});
},
[&](const DerivedPath::Built & bfd) {
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 01:26:07 +00:00
KeyedBuildResult res {
{
.status = BuildResult::Built
},
/* .path = */ bfd,
2022-03-09 11:25:35 +00:00
};
OutputPathMap outputs;
auto drv = evalStore->readDerivation(bfd.drvPath);
const auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
auto built = resolveDerivedPath(*this, bfd, &*evalStore);
for (auto & [output, outputPath] : built) {
auto outputHash = get(outputHashes, output);
if (!outputHash)
throw Error(
"the derivation '%s' doesn't have an output named '%s'",
printStorePath(bfd.drvPath), output);
auto outputId = DrvOutput{ *outputHash, output };
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto realisation =
queryRealisation(outputId);
if (!realisation)
throw MissingRealisation(outputId);
res.builtOutputs.emplace(output, *realisation);
} else {
res.builtOutputs.emplace(
output,
Realisation {
.id = outputId,
.outPath = outputPath,
});
}
}
results.push_back(res);
}
},
path.raw());
}
return results;
}
}
2006-11-30 18:35:50 +00:00
BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode)
{
auto conn(getConnection());
conn->to << wopBuildDerivation << printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
conn->to << buildMode;
conn.processStderr();
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 01:26:07 +00:00
BuildResult res;
res.status = (BuildResult::Status) readInt(conn->from);
conn->from >> res.errorMsg;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
2021-03-22 15:18:48 +00:00
conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
std::move(realisation));
}
return res;
}
void RemoteStore::ensurePath(const StorePath & path)
2006-11-30 18:35:50 +00:00
{
auto conn(getConnection());
conn->to << wopEnsurePath << printStorePath(path);
conn.processStderr();
readInt(conn->from);
2006-11-30 18:35:50 +00:00
}
void RemoteStore::addTempRoot(const StorePath & path)
{
auto conn(getConnection());
conn->to << wopAddTempRoot << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
void RemoteStore::addIndirectRoot(const Path & path)
{
auto conn(getConnection());
conn->to << wopAddIndirectRoot << path;
conn.processStderr();
readInt(conn->from);
}
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
conn->to << wopFindRoots;
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
Roots result;
while (count--) {
Path link = readString(conn->from);
auto target = parseStorePath(readString(conn->from));
result[std::move(target)].emplace(link);
}
return result;
}
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
{
auto conn(getConnection());
2012-07-30 21:13:25 +00:00
conn->to
<< wopCollectGarbage << options.action;
workerProtoWrite(*this, conn->to, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
<< 0 << 0 << 0;
2012-07-30 21:13:25 +00:00
conn.processStderr();
2012-07-30 21:13:25 +00:00
results.paths = readStrings<PathSet>(conn->from);
results.bytesFreed = readLongLong(conn->from);
readLongLong(conn->from); // obsolete
{
auto state_(Store::state.lock());
state_->pathInfoCache.clear();
}
}
void RemoteStore::optimiseStore()
{
auto conn(getConnection());
conn->to << wopOptimiseStore;
conn.processStderr();
readInt(conn->from);
}
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
{
auto conn(getConnection());
conn->to << wopVerifyStore << checkContents << repair;
conn.processStderr();
return readInt(conn->from);
}
2016-02-23 15:40:16 +00:00
void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
{
auto conn(getConnection());
conn->to << wopAddSignatures << printStorePath(storePath) << sigs;
conn.processStderr();
readInt(conn->from);
}
2021-04-05 13:48:18 +00:00
void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
2020-07-30 11:10:49 +00:00
uint64_t & downloadSize, uint64_t & narSize)
{
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
conn->to << wopQueryMissing;
2021-04-05 13:48:18 +00:00
writeDerivedPaths(*this, conn, targets);
conn.processStderr();
willBuild = WorkerProto<StorePathSet>::read(*this, conn->from);
willSubstitute = WorkerProto<StorePathSet>::read(*this, conn->from);
unknown = WorkerProto<StorePathSet>::read(*this, conn->from);
conn->from >> downloadSize >> narSize;
return;
}
fallback:
return Store::queryMissing(targets, willBuild, willSubstitute,
unknown, downloadSize, narSize);
}
void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
{
auto conn(getConnection());
conn->to << wopAddBuildLog << drvPath.to_string();
StringSource source(log);
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
});
readInt(conn->from);
}
std::optional<std::string> RemoteStore::getVersion()
{
auto conn(getConnection());
return conn->daemonNixVersion;
}
void RemoteStore::connect()
{
auto conn(getConnection());
}
unsigned int RemoteStore::getProtocol()
{
auto conn(connections->get());
return conn->daemonVersion;
}
std::optional<TrustedFlag> RemoteStore::isTrustedClient()
{
auto conn(getConnection());
return conn->remoteTrustsUs;
}
void RemoteStore::flushBadConnections()
{
connections->flushBad();
}
2016-02-23 15:40:16 +00:00
RemoteStore::Connection::~Connection()
{
try {
to.flush();
} catch (...) {
ignoreException();
}
}
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->to << wopNarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}
ref<FSAccessor> RemoteStore::getFSAccessor()
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
2016-02-23 15:40:16 +00:00
static Logger::Fields readFields(Source & from)
{
Logger::Fields fields;
size_t size = readInt(from);
for (size_t n = 0; n < size; n++) {
auto type = (decltype(Logger::Field::type)) readInt(from);
if (type == Logger::Field::tInt)
fields.push_back(readNum<uint64_t>(from));
else if (type == Logger::Field::tString)
fields.push_back(readString(from));
else
throw Error("got unsupported field type %x from Nix daemon", (int) type);
}
return fields;
}
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush)
{
if (flush)
to.flush();
while (true) {
auto msg = readNum<uint64_t>(from);
if (msg == STDERR_WRITE) {
auto s = readString(from);
2007-02-21 16:34:00 +00:00
if (!sink) throw Error("no sink");
2016-05-04 13:46:25 +00:00
(*sink)(s);
2007-02-21 16:34:00 +00:00
}
else if (msg == STDERR_READ) {
if (!source) throw Error("no source");
size_t len = readNum<size_t>(from);
auto buf = std::make_unique<char[]>(len);
2020-12-02 13:00:43 +00:00
writeString({(const char *) buf.get(), source->read(buf.get(), len)}, to);
to.flush();
}
else if (msg == STDERR_ERROR) {
if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) {
return std::make_exception_ptr(readError(from));
} else {
auto error = readString(from);
unsigned int status = readInt(from);
return std::make_exception_ptr(Error(status, error));
}
}
else if (msg == STDERR_NEXT)
printError(chomp(readString(from)));
else if (msg == STDERR_START_ACTIVITY) {
auto act = readNum<ActivityId>(from);
auto lvl = (Verbosity) readInt(from);
auto type = (ActivityType) readInt(from);
auto s = readString(from);
auto fields = readFields(from);
auto parent = readNum<ActivityId>(from);
logger->startActivity(act, lvl, type, s, fields, parent);
}
else if (msg == STDERR_STOP_ACTIVITY) {
auto act = readNum<ActivityId>(from);
logger->stopActivity(act);
}
else if (msg == STDERR_RESULT) {
auto act = readNum<ActivityId>(from);
auto type = (ResultType) readInt(from);
auto fields = readFields(from);
logger->result(act, type, fields);
}
else if (msg == STDERR_LAST)
break;
else
throw Error("got unknown message type %x from Nix daemon", msg);
}
return nullptr;
}
void ConnectionHandle::withFramedSink(std::function<void(Sink & sink)> fun)
{
(*this)->to.flush();
std::exception_ptr ex;
/* Handle log messages / exceptions from the remote on a separate
thread. */
std::thread stderrThread([&]()
{
try {
processStderr(nullptr, nullptr, false);
} catch (...) {
ex = std::current_exception();
}
});
Finally joinStderrThread([&]()
{
if (stderrThread.joinable()) {
stderrThread.join();
if (ex) {
try {
std::rethrow_exception(ex);
} catch (...) {
ignoreException();
}
}
}
});
{
2020-09-17 20:01:35 +00:00
FramedSink sink((*this)->to, ex);
fun(sink);
sink.flush();
}
stderrThread.join();
if (ex)
std::rethrow_exception(ex);
}
2006-11-30 18:35:50 +00:00
}