2006-11-30 19:54:43 +00:00
|
|
|
|
#include "serialise.hh"
|
|
|
|
|
#include "util.hh"
|
2006-11-30 18:35:50 +00:00
|
|
|
|
#include "remote-store.hh"
|
2006-11-30 20:13:59 +00:00
|
|
|
|
#include "worker-protocol.hh"
|
2006-11-30 20:45:20 +00:00
|
|
|
|
#include "archive.hh"
|
2013-08-07 11:51:55 +00:00
|
|
|
|
#include "affinity.hh"
|
2006-12-04 13:09:16 +00:00
|
|
|
|
#include "globals.hh"
|
2015-09-03 10:56:59 +00:00
|
|
|
|
#include "derivations.hh"
|
2016-02-23 14:00:59 +00:00
|
|
|
|
#include "pool.hh"
|
2006-11-30 18:35:50 +00:00
|
|
|
|
|
2006-12-03 02:08:13 +00:00
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
2006-12-03 02:36:44 +00:00
|
|
|
|
#include <sys/socket.h>
|
2006-12-04 14:21:39 +00:00
|
|
|
|
#include <sys/un.h>
|
2014-12-09 11:16:27 +00:00
|
|
|
|
#include <errno.h>
|
2006-12-03 02:08:13 +00:00
|
|
|
|
#include <fcntl.h>
|
2006-11-30 19:54:43 +00:00
|
|
|
|
#include <unistd.h>
|
2016-02-23 15:40:16 +00:00
|
|
|
|
|
2010-06-24 17:51:04 +00:00
|
|
|
|
#include <cstring>
|
2006-11-30 19:54:43 +00:00
|
|
|
|
|
2006-11-30 18:35:50 +00:00
|
|
|
|
namespace nix {
|
|
|
|
|
|
|
|
|
|
|
2006-12-05 01:31:45 +00:00
|
|
|
|
Path readStorePath(Source & from)
|
|
|
|
|
{
|
|
|
|
|
Path path = readString(from);
|
|
|
|
|
assertStorePath(path);
|
|
|
|
|
return path;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
|
template<class T> T readStorePaths(Source & from)
|
2006-12-05 01:31:45 +00:00
|
|
|
|
{
|
2011-12-16 22:31:25 +00:00
|
|
|
|
T paths = readStrings<T>(from);
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : paths) assertStorePath(i);
|
2006-12-05 01:31:45 +00:00
|
|
|
|
return paths;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
|
template PathSet readStorePaths(Source & from);
|
|
|
|
|
|
2006-12-05 01:31:45 +00:00
|
|
|
|
|
2016-02-23 15:40:16 +00:00
|
|
|
|
RemoteStore::RemoteStore(size_t maxConnections)
|
2016-02-24 10:39:56 +00:00
|
|
|
|
: connections(make_ref<Pool<Connection>>(
|
|
|
|
|
maxConnections,
|
|
|
|
|
[this]() { return openConnection(); },
|
|
|
|
|
[](const ref<Connection> & r) { return r->to.good() && r->from.good(); }
|
|
|
|
|
))
|
2006-12-04 13:28:14 +00:00
|
|
|
|
{
|
2008-12-11 14:30:25 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-24 16:33:53 +00:00
|
|
|
|
ref<RemoteStore::Connection> RemoteStore::openConnection()
|
2008-12-11 14:30:25 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn = make_ref<Connection>();
|
2008-12-11 14:30:25 +00:00
|
|
|
|
|
2016-01-31 09:19:14 +00:00
|
|
|
|
/* Connect to a daemon that does the privileged work for us. */
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->fd = socket(PF_UNIX, SOCK_STREAM, 0);
|
|
|
|
|
if (conn->fd == -1)
|
2006-12-04 14:21:39 +00:00
|
|
|
|
throw SysError("cannot create Unix domain socket");
|
2016-02-23 14:00:59 +00:00
|
|
|
|
closeOnExec(conn->fd);
|
2006-12-04 14:21:39 +00:00
|
|
|
|
|
2013-03-08 00:24:59 +00:00
|
|
|
|
string socketPath = settings.nixDaemonSocketFile;
|
2006-12-04 14:21:39 +00:00
|
|
|
|
|
|
|
|
|
struct sockaddr_un addr;
|
|
|
|
|
addr.sun_family = AF_UNIX;
|
2016-03-23 16:16:16 +00:00
|
|
|
|
if (socketPath.size() + 1 >= sizeof(addr.sun_path))
|
|
|
|
|
throw Error(format("socket path ‘%1%’ is too long") % socketPath);
|
|
|
|
|
strcpy(addr.sun_path, socketPath.c_str());
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (connect(conn->fd, (struct sockaddr *) &addr, sizeof(addr)) == -1)
|
2014-08-20 15:00:17 +00:00
|
|
|
|
throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath);
|
2008-04-09 05:57:01 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->from.fd = conn->fd;
|
|
|
|
|
conn->to.fd = conn->fd;
|
|
|
|
|
|
|
|
|
|
/* Send the magic greeting, check for the reply. */
|
|
|
|
|
try {
|
|
|
|
|
conn->to << WORKER_MAGIC_1;
|
|
|
|
|
conn->to.flush();
|
|
|
|
|
unsigned int magic = readInt(conn->from);
|
|
|
|
|
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
|
|
|
|
|
|
|
|
|
|
conn->daemonVersion = readInt(conn->from);
|
|
|
|
|
if (GET_PROTOCOL_MAJOR(conn->daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
|
|
|
|
|
throw Error("Nix daemon protocol version not supported");
|
|
|
|
|
conn->to << PROTOCOL_VERSION;
|
|
|
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 14) {
|
|
|
|
|
int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
|
|
|
|
|
if (cpu != -1)
|
|
|
|
|
conn->to << 1 << cpu;
|
|
|
|
|
else
|
|
|
|
|
conn->to << 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 11)
|
2016-02-24 16:33:53 +00:00
|
|
|
|
conn->to << false;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
}
|
|
|
|
|
catch (Error & e) {
|
|
|
|
|
throw Error(format("cannot start daemon worker: %1%") % e.msg());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
setOptions(conn);
|
|
|
|
|
|
|
|
|
|
return conn;
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
void RemoteStore::setOptions(ref<Connection> conn)
|
2007-09-18 09:11:20 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopSetOptions
|
2015-07-19 23:16:16 +00:00
|
|
|
|
<< settings.keepFailed
|
|
|
|
|
<< settings.keepGoing
|
|
|
|
|
<< settings.tryFallback
|
|
|
|
|
<< verbosity
|
|
|
|
|
<< settings.maxBuildJobs
|
|
|
|
|
<< settings.maxSilentTime;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 2)
|
|
|
|
|
conn->to << settings.useBuildHook;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 4)
|
|
|
|
|
conn->to << settings.buildVerbosity
|
2015-07-19 23:16:16 +00:00
|
|
|
|
<< logType
|
|
|
|
|
<< settings.printBuildTrace;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 6)
|
|
|
|
|
conn->to << settings.buildCores;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 10)
|
|
|
|
|
conn->to << settings.useSubstitutes;
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 12) {
|
2012-07-31 22:19:44 +00:00
|
|
|
|
Settings::SettingsMap overrides = settings.getOverrides();
|
2014-07-25 16:02:48 +00:00
|
|
|
|
if (overrides["ssh-auth-sock"] == "")
|
|
|
|
|
overrides["ssh-auth-sock"] = getEnv("SSH_AUTH_SOCK");
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << overrides.size();
|
2015-07-19 23:16:16 +00:00
|
|
|
|
for (auto & i : overrides)
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << i.first << i.second;
|
2012-07-31 22:19:44 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr();
|
2007-09-18 09:11:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
bool RemoteStore::isValidPathUncached(const Path & path)
|
2006-11-30 18:35:50 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopIsValidPath << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
unsigned int reply = readInt(conn->from);
|
2006-11-30 20:13:59 +00:00
|
|
|
|
return reply != 0;
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-11 15:08:47 +00:00
|
|
|
|
PathSet RemoteStore::queryValidPaths(const PathSet & paths)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2012-07-11 15:08:47 +00:00
|
|
|
|
PathSet res;
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : paths)
|
|
|
|
|
if (isValidPath(i)) res.insert(i);
|
2012-07-11 15:08:47 +00:00
|
|
|
|
return res;
|
|
|
|
|
} else {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopQueryValidPaths << paths;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePaths<PathSet>(conn->from);
|
2012-07-11 15:08:47 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-11 14:49:04 +00:00
|
|
|
|
PathSet RemoteStore::queryAllValidPaths()
|
2008-01-29 18:17:36 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryAllValidPaths;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePaths<PathSet>(conn->from);
|
2008-01-29 18:17:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-11 21:52:18 +00:00
|
|
|
|
PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
|
2006-11-30 22:43:55 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2012-07-11 21:52:18 +00:00
|
|
|
|
PathSet res;
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : paths) {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopHasSubstitutes << i;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
if (readInt(conn->from)) res.insert(i);
|
2012-07-11 21:52:18 +00:00
|
|
|
|
}
|
|
|
|
|
return res;
|
|
|
|
|
} else {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopQuerySubstitutablePaths << paths;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePaths<PathSet>(conn->from);
|
2012-07-11 21:52:18 +00:00
|
|
|
|
}
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 23:08:20 +00:00
|
|
|
|
void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
|
|
|
|
SubstitutablePathInfos & infos)
|
|
|
|
|
{
|
|
|
|
|
if (paths.empty()) return;
|
2012-07-11 14:43:24 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 3) return;
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : paths) {
|
2012-07-11 14:43:24 +00:00
|
|
|
|
SubstitutablePathInfo info;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopQuerySubstitutablePathInfo << i;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
unsigned int reply = readInt(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
if (reply == 0) continue;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
info.deriver = readString(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
if (info.deriver != "") assertStorePath(info.deriver);
|
2016-02-23 14:00:59 +00:00
|
|
|
|
info.references = readStorePaths<PathSet>(conn->from);
|
|
|
|
|
info.downloadSize = readLongLong(conn->from);
|
|
|
|
|
info.narSize = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 7 ? readLongLong(conn->from) : 0;
|
2015-07-17 17:24:28 +00:00
|
|
|
|
infos[i] = info;
|
2012-07-11 14:43:24 +00:00
|
|
|
|
}
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2012-07-11 14:43:24 +00:00
|
|
|
|
} else {
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopQuerySubstitutablePathInfos << paths;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
unsigned int count = readInt(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
for (unsigned int n = 0; n < count; n++) {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
Path path = readStorePath(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
SubstitutablePathInfo & info(infos[path]);
|
2016-02-23 14:00:59 +00:00
|
|
|
|
info.deriver = readString(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
if (info.deriver != "") assertStorePath(info.deriver);
|
2016-02-23 14:00:59 +00:00
|
|
|
|
info.references = readStorePaths<PathSet>(conn->from);
|
|
|
|
|
info.downloadSize = readLongLong(conn->from);
|
|
|
|
|
info.narSize = readLongLong(conn->from);
|
2012-07-11 14:43:24 +00:00
|
|
|
|
}
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 23:08:20 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
std::shared_ptr<ValidPathInfo> RemoteStore::queryPathInfoUncached(const Path & path)
|
2010-11-16 17:11:46 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryPathInfo << path;
|
|
|
|
|
conn->processStderr();
|
2016-04-19 16:50:15 +00:00
|
|
|
|
auto info = std::make_shared<ValidPathInfo>();
|
|
|
|
|
info->path = path;
|
|
|
|
|
info->deriver = readString(conn->from);
|
|
|
|
|
if (info->deriver != "") assertStorePath(info->deriver);
|
|
|
|
|
info->narHash = parseHash(htSHA256, readString(conn->from));
|
|
|
|
|
info->references = readStorePaths<PathSet>(conn->from);
|
|
|
|
|
info->registrationTime = readInt(conn->from);
|
|
|
|
|
info->narSize = readLongLong(conn->from);
|
2016-03-30 15:35:48 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
|
2016-04-19 16:50:15 +00:00
|
|
|
|
info->ultimate = readInt(conn->from) != 0;
|
|
|
|
|
info->sigs = readStrings<StringSet>(conn->from);
|
2016-03-30 15:35:48 +00:00
|
|
|
|
}
|
2010-11-17 12:08:01 +00:00
|
|
|
|
return info;
|
2010-11-16 17:11:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-11-30 22:43:55 +00:00
|
|
|
|
void RemoteStore::queryReferrers(const Path & path,
|
2006-11-30 18:35:50 +00:00
|
|
|
|
PathSet & referrers)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryReferrers << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
PathSet referrers2 = readStorePaths<PathSet>(conn->from);
|
2006-11-30 22:43:55 +00:00
|
|
|
|
referrers.insert(referrers2.begin(), referrers2.end());
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-12-20 17:41:44 +00:00
|
|
|
|
PathSet RemoteStore::queryValidDerivers(const Path & path)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryValidDerivers << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePaths<PathSet>(conn->from);
|
2012-12-20 17:41:44 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-02-22 12:44:36 +00:00
|
|
|
|
PathSet RemoteStore::queryDerivationOutputs(const Path & path)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryDerivationOutputs << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePaths<PathSet>(conn->from);
|
2010-02-22 12:44:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-06 06:28:20 +00:00
|
|
|
|
PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryDerivationOutputNames << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStrings<PathSet>(conn->from);
|
2011-11-06 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-17 22:55:39 +00:00
|
|
|
|
Path RemoteStore::queryPathFromHashPart(const string & hashPart)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopQueryPathFromHashPart << hashPart;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
Path path = readString(conn->from);
|
2012-07-17 22:55:39 +00:00
|
|
|
|
if (!path.empty()) assertStorePath(path);
|
|
|
|
|
return path;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-03-25 16:06:12 +00:00
|
|
|
|
Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
|
2012-10-03 19:09:18 +00:00
|
|
|
|
bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
|
2006-11-30 18:35:50 +00:00
|
|
|
|
{
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
|
Path srcPath(absPath(_srcPath));
|
2011-12-01 13:48:48 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopAddToStore << name
|
2015-07-19 23:16:16 +00:00
|
|
|
|
<< ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */
|
|
|
|
|
<< (recursive ? 1 : 0)
|
|
|
|
|
<< printHashType(hashAlgo);
|
2014-06-10 11:45:50 +00:00
|
|
|
|
|
|
|
|
|
try {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to.written = 0;
|
|
|
|
|
conn->to.warn = true;
|
2016-03-22 13:21:45 +00:00
|
|
|
|
dumpPath(srcPath, conn->to, filter);
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to.warn = false;
|
|
|
|
|
conn->processStderr();
|
2014-06-10 11:45:50 +00:00
|
|
|
|
} catch (SysError & e) {
|
|
|
|
|
/* Daemon closed while we were sending the path. Probably OOM
|
|
|
|
|
or I/O error. */
|
|
|
|
|
if (e.errNo == EPIPE)
|
|
|
|
|
try {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr();
|
2014-06-10 11:45:50 +00:00
|
|
|
|
} catch (EndOfFile & e) { }
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
return readStorePath(conn->from);
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-12-03 15:06:30 +00:00
|
|
|
|
Path RemoteStore::addTextToStore(const string & name, const string & s,
|
2012-10-03 19:09:18 +00:00
|
|
|
|
const PathSet & references, bool repair)
|
2006-11-30 18:35:50 +00:00
|
|
|
|
{
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopAddTextToStore << name << s << references;
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readStorePath(conn->from);
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-02-20 23:17:20 +00:00
|
|
|
|
void RemoteStore::exportPath(const Path & path, bool sign,
|
|
|
|
|
Sink & sink)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopExportPath << path << (sign ? 1 : 0);
|
|
|
|
|
conn->processStderr(&sink); /* sink receives the actual data */
|
|
|
|
|
readInt(conn->from);
|
2007-02-20 23:17:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-26 14:20:10 +00:00
|
|
|
|
Paths RemoteStore::importPaths(bool requireSignature, Source & source,
|
|
|
|
|
std::shared_ptr<FSAccessor> accessor)
|
2007-02-21 15:45:32 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopImportPaths;
|
2007-02-21 17:34:02 +00:00
|
|
|
|
/* We ignore requireSignature, since the worker forces it to true
|
2011-12-15 16:19:53 +00:00
|
|
|
|
anyway. */
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr(0, &source);
|
|
|
|
|
return readStorePaths<Paths>(conn->from);
|
2007-02-21 15:45:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-02-18 00:01:14 +00:00
|
|
|
|
void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
|
2006-11-30 18:35:50 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopBuildPaths;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
|
|
|
|
|
conn->to << drvPaths;
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
|
|
|
|
|
conn->to << buildMode;
|
2015-12-02 17:13:33 +00:00
|
|
|
|
else
|
|
|
|
|
/* Old daemons did not take a 'buildMode' parameter, so we
|
|
|
|
|
need to validate it here on the client side. */
|
|
|
|
|
if (buildMode != bmNormal)
|
|
|
|
|
throw Error("repairing or checking is not supported when building through the Nix daemon");
|
|
|
|
|
} else {
|
2012-12-29 22:21:46 +00:00
|
|
|
|
/* For backwards compatibility with old daemons, strip output
|
|
|
|
|
identifiers. */
|
|
|
|
|
PathSet drvPaths2;
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : drvPaths)
|
|
|
|
|
drvPaths2.insert(string(i, 0, i.find('!')));
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << drvPaths2;
|
2012-12-29 22:21:46 +00:00
|
|
|
|
}
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
|
BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
|
|
|
|
BuildMode buildMode)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopBuildDerivation << drvPath << drv << buildMode;
|
|
|
|
|
conn->processStderr();
|
2015-09-03 10:56:59 +00:00
|
|
|
|
BuildResult res;
|
|
|
|
|
unsigned int status;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->from >> status >> res.errorMsg;
|
2015-09-03 10:56:59 +00:00
|
|
|
|
res.status = (BuildResult::Status) status;
|
|
|
|
|
return res;
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-11-30 22:43:55 +00:00
|
|
|
|
void RemoteStore::ensurePath(const Path & path)
|
2006-11-30 18:35:50 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopEnsurePath << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-12-02 16:41:36 +00:00
|
|
|
|
void RemoteStore::addTempRoot(const Path & path)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopAddTempRoot << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2006-12-02 16:41:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-12-04 23:29:16 +00:00
|
|
|
|
void RemoteStore::addIndirectRoot(const Path & path)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopAddIndirectRoot << path;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2006-12-04 23:29:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-12-02 16:41:36 +00:00
|
|
|
|
void RemoteStore::syncWithGC()
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopSyncWithGC;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2006-12-02 16:41:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-12-05 01:31:45 +00:00
|
|
|
|
Roots RemoteStore::findRoots()
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopFindRoots;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
unsigned int count = readInt(conn->from);
|
2006-12-05 01:31:45 +00:00
|
|
|
|
Roots result;
|
|
|
|
|
while (count--) {
|
2016-02-23 14:00:59 +00:00
|
|
|
|
Path link = readString(conn->from);
|
|
|
|
|
Path target = readStorePath(conn->from);
|
2006-12-05 01:31:45 +00:00
|
|
|
|
result[link] = target;
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-06-18 09:34:17 +00:00
|
|
|
|
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
2006-12-05 02:18:46 +00:00
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
|
2015-07-19 23:16:16 +00:00
|
|
|
|
<< options.maxFreed << 0;
|
2016-02-23 14:00:59 +00:00
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 5)
|
2009-11-20 17:12:38 +00:00
|
|
|
|
/* removed options */
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->to << 0 << 0;
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
conn->processStderr();
|
2012-07-30 21:13:25 +00:00
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
results.paths = readStrings<PathSet>(conn->from);
|
|
|
|
|
results.bytesFreed = readLongLong(conn->from);
|
|
|
|
|
readLongLong(conn->from); // obsolete
|
2016-04-19 16:50:15 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
auto state_(Store::state.lock());
|
|
|
|
|
state_->pathInfoCache.clear();
|
|
|
|
|
}
|
2006-12-05 02:18:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-09-01 20:21:42 +00:00
|
|
|
|
void RemoteStore::optimiseStore()
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopOptimiseStore;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
2014-09-01 20:21:42 +00:00
|
|
|
|
}
|
2010-05-04 10:45:10 +00:00
|
|
|
|
|
2016-04-08 16:16:53 +00:00
|
|
|
|
|
2015-06-01 21:20:11 +00:00
|
|
|
|
bool RemoteStore::verifyStore(bool checkContents, bool repair)
|
|
|
|
|
{
|
2016-02-23 14:00:59 +00:00
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopVerifyStore << checkContents << repair;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
return readInt(conn->from) != 0;
|
2015-06-01 21:20:11 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-23 15:40:16 +00:00
|
|
|
|
|
2016-04-05 13:30:22 +00:00
|
|
|
|
void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
|
|
|
|
|
{
|
|
|
|
|
auto conn(connections->get());
|
|
|
|
|
conn->to << wopAddSignatures << storePath << sigs;
|
|
|
|
|
conn->processStderr();
|
|
|
|
|
readInt(conn->from);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-23 15:40:16 +00:00
|
|
|
|
RemoteStore::Connection::~Connection()
|
|
|
|
|
{
|
|
|
|
|
try {
|
|
|
|
|
to.flush();
|
|
|
|
|
fd.close();
|
|
|
|
|
} catch (...) {
|
|
|
|
|
ignoreException();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-02-23 14:00:59 +00:00
|
|
|
|
void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
|
2006-12-03 02:08:13 +00:00
|
|
|
|
{
|
2011-12-14 23:30:06 +00:00
|
|
|
|
to.flush();
|
2006-12-03 02:08:13 +00:00
|
|
|
|
unsigned int msg;
|
2007-02-21 17:34:02 +00:00
|
|
|
|
while ((msg = readInt(from)) == STDERR_NEXT
|
|
|
|
|
|| msg == STDERR_READ || msg == STDERR_WRITE) {
|
|
|
|
|
if (msg == STDERR_WRITE) {
|
|
|
|
|
string s = readString(from);
|
2007-02-21 16:34:00 +00:00
|
|
|
|
if (!sink) throw Error("no sink");
|
2012-02-09 17:27:45 +00:00
|
|
|
|
(*sink)((const unsigned char *) s.data(), s.size());
|
2007-02-21 16:34:00 +00:00
|
|
|
|
}
|
2007-02-21 17:34:02 +00:00
|
|
|
|
else if (msg == STDERR_READ) {
|
|
|
|
|
if (!source) throw Error("no source");
|
2011-12-16 19:44:13 +00:00
|
|
|
|
size_t len = readInt(from);
|
2007-02-21 17:34:02 +00:00
|
|
|
|
unsigned char * buf = new unsigned char[len];
|
|
|
|
|
AutoDeleteArray<unsigned char> d(buf);
|
2011-12-16 22:31:25 +00:00
|
|
|
|
writeString(buf, source->read(buf, len), to);
|
2011-12-14 23:30:06 +00:00
|
|
|
|
to.flush();
|
2007-02-21 17:34:02 +00:00
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
string s = readString(from);
|
2012-11-15 14:01:02 +00:00
|
|
|
|
writeToStderr(s);
|
2007-02-21 17:34:02 +00:00
|
|
|
|
}
|
2006-12-03 02:08:13 +00:00
|
|
|
|
}
|
2010-12-17 11:28:26 +00:00
|
|
|
|
if (msg == STDERR_ERROR) {
|
|
|
|
|
string error = readString(from);
|
|
|
|
|
unsigned int status = GET_PROTOCOL_MINOR(daemonVersion) >= 8 ? readInt(from) : 1;
|
2011-04-19 10:44:44 +00:00
|
|
|
|
throw Error(format("%1%") % error, status);
|
2010-12-17 11:28:26 +00:00
|
|
|
|
}
|
2006-12-03 02:08:13 +00:00
|
|
|
|
else if (msg != STDERR_LAST)
|
|
|
|
|
throw Error("protocol error processing standard error");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-11-30 18:35:50 +00:00
|
|
|
|
}
|