forked from lix-project/lix
Merge remote-tracking branch 'origin/master' into flakes
This commit is contained in:
commit
b81d9d26f5
10
.travis.yml
10
.travis.yml
|
@ -1,2 +1,8 @@
|
||||||
os: osx
|
matrix:
|
||||||
script: ./tests/install-darwin.sh
|
include:
|
||||||
|
- language: osx
|
||||||
|
script: ./tests/install-darwin.sh
|
||||||
|
- language: nix
|
||||||
|
script: nix-build release.nix -A build.x86_64-linux
|
||||||
|
notifications:
|
||||||
|
email: false
|
||||||
|
|
|
@ -746,6 +746,11 @@ builtins.genList (x: x * x) 5
|
||||||
separate file, and use it from Nix expressions in other
|
separate file, and use it from Nix expressions in other
|
||||||
files.</para>
|
files.</para>
|
||||||
|
|
||||||
|
<note><para>Unlike some languages, <function>import</function> is a regular
|
||||||
|
function in Nix. Paths using the angle bracket syntax (e.g., <function>
|
||||||
|
import</function> <replaceable><foo></replaceable>) are normal path
|
||||||
|
values (see <xref linkend='ssec-values' />).</para></note>
|
||||||
|
|
||||||
<para>A Nix expression loaded by <function>import</function> must
|
<para>A Nix expression loaded by <function>import</function> must
|
||||||
not contain any <emphasis>free variables</emphasis> (identifiers
|
not contain any <emphasis>free variables</emphasis> (identifiers
|
||||||
that are not defined in the Nix expression itself and are not
|
that are not defined in the Nix expression itself and are not
|
||||||
|
|
|
@ -159,7 +159,6 @@ the S3 URL:</para>
|
||||||
"s3:ListBucket",
|
"s3:ListBucket",
|
||||||
"s3:ListBucketMultipartUploads",
|
"s3:ListBucketMultipartUploads",
|
||||||
"s3:ListMultipartUploadParts",
|
"s3:ListMultipartUploadParts",
|
||||||
"s3:ListObjects",
|
|
||||||
"s3:PutObject"
|
"s3:PutObject"
|
||||||
],
|
],
|
||||||
"Resource": [
|
"Resource": [
|
||||||
|
|
|
@ -19,9 +19,6 @@ readonly BLUE_UL='\033[38;4;34m'
|
||||||
readonly GREEN='\033[38;32m'
|
readonly GREEN='\033[38;32m'
|
||||||
readonly GREEN_UL='\033[38;4;32m'
|
readonly GREEN_UL='\033[38;4;32m'
|
||||||
readonly RED='\033[38;31m'
|
readonly RED='\033[38;31m'
|
||||||
readonly RED_UL='\033[38;4;31m'
|
|
||||||
readonly YELLOW='\033[38;33m'
|
|
||||||
readonly YELLOW_UL='\033[38;4;33m'
|
|
||||||
|
|
||||||
readonly NIX_USER_COUNT="32"
|
readonly NIX_USER_COUNT="32"
|
||||||
readonly NIX_BUILD_GROUP_ID="30000"
|
readonly NIX_BUILD_GROUP_ID="30000"
|
||||||
|
|
|
@ -1607,6 +1607,19 @@ bool EvalState::isDerivation(Value & v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
|
||||||
|
PathSet & context, bool coerceMore, bool copyToStore)
|
||||||
|
{
|
||||||
|
auto i = v.attrs->find(sToString);
|
||||||
|
if (i != v.attrs->end()) {
|
||||||
|
Value v1;
|
||||||
|
callFunction(*i->value, v, v1, pos);
|
||||||
|
return coerceToString(pos, v1, context, coerceMore, copyToStore);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||||
bool coerceMore, bool copyToStore)
|
bool coerceMore, bool copyToStore)
|
||||||
{
|
{
|
||||||
|
@ -1625,13 +1638,11 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (v.type == tAttrs) {
|
if (v.type == tAttrs) {
|
||||||
auto i = v.attrs->find(sToString);
|
auto maybeString = tryAttrsToString(pos, v, context, coerceMore, copyToStore);
|
||||||
if (i != v.attrs->end()) {
|
if (maybeString) {
|
||||||
Value v1;
|
return *maybeString;
|
||||||
callFunction(*i->value, v, v1, pos);
|
|
||||||
return coerceToString(pos, v1, context, coerceMore, copyToStore);
|
|
||||||
}
|
}
|
||||||
i = v.attrs->find(sOutPath);
|
auto i = v.attrs->find(sOutPath);
|
||||||
if (i == v.attrs->end()) throwTypeError("cannot coerce a set to a string, at %1%", pos);
|
if (i == v.attrs->end()) throwTypeError("cannot coerce a set to a string, at %1%", pos);
|
||||||
return coerceToString(pos, *i->value, context, coerceMore, copyToStore);
|
return coerceToString(pos, *i->value, context, coerceMore, copyToStore);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "function-trace.hh"
|
#include "function-trace.hh"
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <optional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
|
|
||||||
|
@ -207,6 +208,9 @@ public:
|
||||||
set with attribute `type = "derivation"'). */
|
set with attribute `type = "derivation"'). */
|
||||||
bool isDerivation(Value & v);
|
bool isDerivation(Value & v);
|
||||||
|
|
||||||
|
std::optional<string> tryAttrsToString(const Pos & pos, Value & v,
|
||||||
|
PathSet & context, bool coerceMore = false, bool copyToStore = true);
|
||||||
|
|
||||||
/* String coercion. Converts strings, paths and derivations to a
|
/* String coercion. Converts strings, paths and derivations to a
|
||||||
string. If `coerceMore' is set, also converts nulls, integers,
|
string. If `coerceMore' is set, also converts nulls, integers,
|
||||||
booleans and lists to a string. If `copyToStore' is set,
|
booleans and lists to a string. If `copyToStore' is set,
|
||||||
|
|
|
@ -115,8 +115,7 @@ GitInfo exportGit(ref<Store> store, std::string uri,
|
||||||
|
|
||||||
if (!clean) {
|
if (!clean) {
|
||||||
|
|
||||||
/* This is an unclean working tree. So copy all tracked
|
/* This is an unclean working tree. So copy all tracked files. */
|
||||||
files. */
|
|
||||||
|
|
||||||
if (!evalSettings.allowDirty)
|
if (!evalSettings.allowDirty)
|
||||||
throw Error("Git tree '%s' is dirty", uri);
|
throw Error("Git tree '%s' is dirty", uri);
|
||||||
|
|
|
@ -40,7 +40,12 @@ void printValueAsJSON(EvalState & state, bool strict,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case tAttrs: {
|
case tAttrs: {
|
||||||
Bindings::iterator i = v.attrs->find(state.sOutPath);
|
auto maybeString = state.tryAttrsToString(noPos, v, context, false, false);
|
||||||
|
if (maybeString) {
|
||||||
|
out.write(*maybeString);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto i = v.attrs->find(state.sOutPath);
|
||||||
if (i == v.attrs->end()) {
|
if (i == v.attrs->end()) {
|
||||||
auto obj(out.object());
|
auto obj(out.object());
|
||||||
StringSet names;
|
StringSet names;
|
||||||
|
|
|
@ -296,7 +296,7 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
|
||||||
}
|
}
|
||||||
|
|
||||||
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
auto uri = getUri();
|
auto uri = getUri();
|
||||||
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
||||||
|
|
|
@ -74,7 +74,7 @@ public:
|
||||||
bool isValidPathUncached(const Path & path) override;
|
bool isValidPathUncached(const Path & path) override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
Path queryPathFromHashPart(const string & hashPart) override
|
Path queryPathFromHashPart(const string & hashPart) override
|
||||||
{ unsupported("queryPathFromHashPart"); }
|
{ unsupported("queryPathFromHashPart"); }
|
||||||
|
|
|
@ -1875,6 +1875,21 @@ static void preloadNSS() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void linkOrCopy(const Path & from, const Path & to)
|
||||||
|
{
|
||||||
|
if (link(from.c_str(), to.c_str()) == -1) {
|
||||||
|
/* Hard-linking fails if we exceed the maximum link count on a
|
||||||
|
file (e.g. 32000 of ext3), which is quite possible after a
|
||||||
|
'nix-store --optimise'. FIXME: actually, why don't we just
|
||||||
|
bind-mount in this case? */
|
||||||
|
if (errno != EMLINK)
|
||||||
|
throw SysError("linking '%s' to '%s'", to, from);
|
||||||
|
copyPath(from, to);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void DerivationGoal::startBuilder()
|
void DerivationGoal::startBuilder()
|
||||||
{
|
{
|
||||||
/* Right platform? */
|
/* Right platform? */
|
||||||
|
@ -2118,22 +2133,8 @@ void DerivationGoal::startBuilder()
|
||||||
throw SysError(format("getting attributes of path '%1%'") % i);
|
throw SysError(format("getting attributes of path '%1%'") % i);
|
||||||
if (S_ISDIR(st.st_mode))
|
if (S_ISDIR(st.st_mode))
|
||||||
dirsInChroot[i] = r;
|
dirsInChroot[i] = r;
|
||||||
else {
|
else
|
||||||
Path p = chrootRootDir + i;
|
linkOrCopy(r, chrootRootDir + i);
|
||||||
debug("linking '%1%' to '%2%'", p, r);
|
|
||||||
if (link(r.c_str(), p.c_str()) == -1) {
|
|
||||||
/* Hard-linking fails if we exceed the maximum
|
|
||||||
link count on a file (e.g. 32000 of ext3),
|
|
||||||
which is quite possible after a `nix-store
|
|
||||||
--optimise'. */
|
|
||||||
if (errno != EMLINK)
|
|
||||||
throw SysError(format("linking '%1%' to '%2%'") % p % i);
|
|
||||||
StringSink sink;
|
|
||||||
dumpPath(r, sink);
|
|
||||||
StringSource source(*sink.s);
|
|
||||||
restorePath(p, source);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we're repairing, checking or rebuilding part of a
|
/* If we're repairing, checking or rebuilding part of a
|
||||||
|
@ -3264,8 +3265,7 @@ void DerivationGoal::registerOutputs()
|
||||||
i.second.parseHashInfo(recursive, h);
|
i.second.parseHashInfo(recursive, h);
|
||||||
|
|
||||||
if (!recursive) {
|
if (!recursive) {
|
||||||
/* The output path should be a regular file without
|
/* The output path should be a regular file without execute permission. */
|
||||||
execute permission. */
|
|
||||||
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
|
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
|
||||||
throw BuildError(
|
throw BuildError(
|
||||||
format("output path '%1%' should be a non-executable regular file") % path);
|
format("output path '%1%' should be a non-executable regular file") % path);
|
||||||
|
@ -3343,8 +3343,7 @@ void DerivationGoal::registerOutputs()
|
||||||
% drvPath % path);
|
% drvPath % path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Since we verified the build, it's now ultimately
|
/* Since we verified the build, it's now ultimately trusted. */
|
||||||
trusted. */
|
|
||||||
if (!info.ultimate) {
|
if (!info.ultimate) {
|
||||||
info.ultimate = true;
|
info.ultimate = true;
|
||||||
worker.store.signPathInfo(info);
|
worker.store.signPathInfo(info);
|
||||||
|
@ -3354,8 +3353,7 @@ void DerivationGoal::registerOutputs()
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For debugging, print out the referenced and unreferenced
|
/* For debugging, print out the referenced and unreferenced paths. */
|
||||||
paths. */
|
|
||||||
for (auto & i : inputPaths) {
|
for (auto & i : inputPaths) {
|
||||||
PathSet::iterator j = references.find(i);
|
PathSet::iterator j = references.find(i);
|
||||||
if (j == references.end())
|
if (j == references.end())
|
||||||
|
@ -3413,8 +3411,7 @@ void DerivationGoal::registerOutputs()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If this is the first round of several, then move the output out
|
/* If this is the first round of several, then move the output out of the way. */
|
||||||
of the way. */
|
|
||||||
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
|
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
|
||||||
for (auto & i : drv->outputs) {
|
for (auto & i : drv->outputs) {
|
||||||
Path prev = i.second.path + checkSuffix;
|
Path prev = i.second.path + checkSuffix;
|
||||||
|
@ -4138,9 +4135,6 @@ void SubstitutionGoal::handleEOF(int fd)
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
static bool working = false;
|
|
||||||
|
|
||||||
|
|
||||||
Worker::Worker(LocalStore & store)
|
Worker::Worker(LocalStore & store)
|
||||||
: act(*logger, actRealise)
|
: act(*logger, actRealise)
|
||||||
, actDerivations(*logger, actBuilds)
|
, actDerivations(*logger, actBuilds)
|
||||||
|
@ -4148,8 +4142,6 @@ Worker::Worker(LocalStore & store)
|
||||||
, store(store)
|
, store(store)
|
||||||
{
|
{
|
||||||
/* Debugging: prevent recursive workers. */
|
/* Debugging: prevent recursive workers. */
|
||||||
if (working) abort();
|
|
||||||
working = true;
|
|
||||||
nrLocalBuilds = 0;
|
nrLocalBuilds = 0;
|
||||||
lastWokenUp = steady_time_point::min();
|
lastWokenUp = steady_time_point::min();
|
||||||
permanentFailure = false;
|
permanentFailure = false;
|
||||||
|
@ -4161,8 +4153,6 @@ Worker::Worker(LocalStore & store)
|
||||||
|
|
||||||
Worker::~Worker()
|
Worker::~Worker()
|
||||||
{
|
{
|
||||||
working = false;
|
|
||||||
|
|
||||||
/* Explicitly get rid of all strong pointers now. After this all
|
/* Explicitly get rid of all strong pointers now. After this all
|
||||||
goals that refer to this worker should be gone. (Otherwise we
|
goals that refer to this worker should be gone. (Otherwise we
|
||||||
are in trouble, since goals may call childTerminated() etc. in
|
are in trouble, since goals may call childTerminated() etc. in
|
||||||
|
|
790
src/libstore/daemon.cc
Normal file
790
src/libstore/daemon.cc
Normal file
|
@ -0,0 +1,790 @@
|
||||||
|
#include "daemon.hh"
|
||||||
|
#include "monitor-fd.hh"
|
||||||
|
#include "worker-protocol.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "finally.hh"
|
||||||
|
#include "affinity.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
#include "derivations.hh"
|
||||||
|
#include "args.hh"
|
||||||
|
|
||||||
|
namespace nix::daemon {
|
||||||
|
|
||||||
|
Sink & operator << (Sink & sink, const Logger::Fields & fields)
|
||||||
|
{
|
||||||
|
sink << fields.size();
|
||||||
|
for (auto & f : fields) {
|
||||||
|
sink << f.type;
|
||||||
|
if (f.type == Logger::Field::tInt)
|
||||||
|
sink << f.i;
|
||||||
|
else if (f.type == Logger::Field::tString)
|
||||||
|
sink << f.s;
|
||||||
|
else abort();
|
||||||
|
}
|
||||||
|
return sink;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Logger that forwards log messages to the client, *if* we're in a
|
||||||
|
state where the protocol allows it (i.e., when canSendStderr is
|
||||||
|
true). */
|
||||||
|
struct TunnelLogger : public Logger
|
||||||
|
{
|
||||||
|
FdSink & to;
|
||||||
|
|
||||||
|
struct State
|
||||||
|
{
|
||||||
|
bool canSendStderr = false;
|
||||||
|
std::vector<std::string> pendingMsgs;
|
||||||
|
};
|
||||||
|
|
||||||
|
Sync<State> state_;
|
||||||
|
|
||||||
|
unsigned int clientVersion;
|
||||||
|
|
||||||
|
TunnelLogger(FdSink & to, unsigned int clientVersion)
|
||||||
|
: to(to), clientVersion(clientVersion) { }
|
||||||
|
|
||||||
|
void enqueueMsg(const std::string & s)
|
||||||
|
{
|
||||||
|
auto state(state_.lock());
|
||||||
|
|
||||||
|
if (state->canSendStderr) {
|
||||||
|
assert(state->pendingMsgs.empty());
|
||||||
|
try {
|
||||||
|
to(s);
|
||||||
|
to.flush();
|
||||||
|
} catch (...) {
|
||||||
|
/* Write failed; that means that the other side is
|
||||||
|
gone. */
|
||||||
|
state->canSendStderr = false;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
state->pendingMsgs.push_back(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void log(Verbosity lvl, const FormatOrString & fs) override
|
||||||
|
{
|
||||||
|
if (lvl > verbosity) return;
|
||||||
|
|
||||||
|
StringSink buf;
|
||||||
|
buf << STDERR_NEXT << (fs.s + "\n");
|
||||||
|
enqueueMsg(*buf.s);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* startWork() means that we're starting an operation for which we
|
||||||
|
want to send out stderr to the client. */
|
||||||
|
void startWork()
|
||||||
|
{
|
||||||
|
auto state(state_.lock());
|
||||||
|
state->canSendStderr = true;
|
||||||
|
|
||||||
|
for (auto & msg : state->pendingMsgs)
|
||||||
|
to(msg);
|
||||||
|
|
||||||
|
state->pendingMsgs.clear();
|
||||||
|
|
||||||
|
to.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* stopWork() means that we're done; stop sending stderr to the
|
||||||
|
client. */
|
||||||
|
void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
|
||||||
|
{
|
||||||
|
auto state(state_.lock());
|
||||||
|
|
||||||
|
state->canSendStderr = false;
|
||||||
|
|
||||||
|
if (success)
|
||||||
|
to << STDERR_LAST;
|
||||||
|
else {
|
||||||
|
to << STDERR_ERROR << msg;
|
||||||
|
if (status != 0) to << status;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void startActivity(ActivityId act, Verbosity lvl, ActivityType type,
|
||||||
|
const std::string & s, const Fields & fields, ActivityId parent) override
|
||||||
|
{
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 20) {
|
||||||
|
if (!s.empty())
|
||||||
|
log(lvl, s + "...");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
StringSink buf;
|
||||||
|
buf << STDERR_START_ACTIVITY << act << lvl << type << s << fields << parent;
|
||||||
|
enqueueMsg(*buf.s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void stopActivity(ActivityId act) override
|
||||||
|
{
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
||||||
|
StringSink buf;
|
||||||
|
buf << STDERR_STOP_ACTIVITY << act;
|
||||||
|
enqueueMsg(*buf.s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void result(ActivityId act, ResultType type, const Fields & fields) override
|
||||||
|
{
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
||||||
|
StringSink buf;
|
||||||
|
buf << STDERR_RESULT << act << type << fields;
|
||||||
|
enqueueMsg(*buf.s);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct TunnelSink : Sink
|
||||||
|
{
|
||||||
|
Sink & to;
|
||||||
|
TunnelSink(Sink & to) : to(to) { }
|
||||||
|
virtual void operator () (const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
to << STDERR_WRITE;
|
||||||
|
writeString(data, len, to);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct TunnelSource : BufferedSource
|
||||||
|
{
|
||||||
|
Source & from;
|
||||||
|
BufferedSink & to;
|
||||||
|
TunnelSource(Source & from, BufferedSink & to) : from(from), to(to) { }
|
||||||
|
size_t readUnbuffered(unsigned char * data, size_t len) override
|
||||||
|
{
|
||||||
|
to << STDERR_READ << len;
|
||||||
|
to.flush();
|
||||||
|
size_t n = readString(data, len, from);
|
||||||
|
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* If the NAR archive contains a single file at top-level, then save
|
||||||
|
the contents of the file to `s'. Otherwise barf. */
|
||||||
|
struct RetrieveRegularNARSink : ParseSink
|
||||||
|
{
|
||||||
|
bool regular;
|
||||||
|
string s;
|
||||||
|
|
||||||
|
RetrieveRegularNARSink() : regular(true) { }
|
||||||
|
|
||||||
|
void createDirectory(const Path & path)
|
||||||
|
{
|
||||||
|
regular = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void receiveContents(unsigned char * data, unsigned int len)
|
||||||
|
{
|
||||||
|
s.append((const char *) data, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
void createSymlink(const Path & path, const string & target)
|
||||||
|
{
|
||||||
|
regular = false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
|
bool trusted, unsigned int clientVersion,
|
||||||
|
Source & from, BufferedSink & to, unsigned int op)
|
||||||
|
{
|
||||||
|
switch (op) {
|
||||||
|
|
||||||
|
case wopIsValidPath: {
|
||||||
|
/* 'readStorePath' could raise an error leading to the connection
|
||||||
|
being closed. To be able to recover from an invalid path error,
|
||||||
|
call 'startWork' early, and do 'assertStorePath' afterwards so
|
||||||
|
that the 'Error' exception handler doesn't close the
|
||||||
|
connection. */
|
||||||
|
Path path = readString(from);
|
||||||
|
logger->startWork();
|
||||||
|
store->assertStorePath(path);
|
||||||
|
bool result = store->isValidPath(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << result;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryValidPaths: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
PathSet res = store->queryValidPaths(paths);
|
||||||
|
logger->stopWork();
|
||||||
|
to << res;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopHasSubstitutes: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
PathSet res = store->querySubstitutablePaths({path});
|
||||||
|
logger->stopWork();
|
||||||
|
to << (res.find(path) != res.end());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePaths: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
PathSet res = store->querySubstitutablePaths(paths);
|
||||||
|
logger->stopWork();
|
||||||
|
to << res;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathHash: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
auto hash = store->queryPathInfo(path)->narHash;
|
||||||
|
logger->stopWork();
|
||||||
|
to << hash.to_string(Base16, false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryReferences:
|
||||||
|
case wopQueryReferrers:
|
||||||
|
case wopQueryValidDerivers:
|
||||||
|
case wopQueryDerivationOutputs: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
PathSet paths;
|
||||||
|
if (op == wopQueryReferences)
|
||||||
|
paths = store->queryPathInfo(path)->references;
|
||||||
|
else if (op == wopQueryReferrers)
|
||||||
|
store->queryReferrers(path, paths);
|
||||||
|
else if (op == wopQueryValidDerivers)
|
||||||
|
paths = store->queryValidDerivers(path);
|
||||||
|
else paths = store->queryDerivationOutputs(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << paths;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryDerivationOutputNames: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
StringSet names;
|
||||||
|
names = store->queryDerivationOutputNames(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << names;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryDeriver: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
auto deriver = store->queryPathInfo(path)->deriver;
|
||||||
|
logger->stopWork();
|
||||||
|
to << deriver;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathFromHashPart: {
|
||||||
|
string hashPart = readString(from);
|
||||||
|
logger->startWork();
|
||||||
|
Path path = store->queryPathFromHashPart(hashPart);
|
||||||
|
logger->stopWork();
|
||||||
|
to << path;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddToStore: {
|
||||||
|
bool fixed, recursive;
|
||||||
|
std::string s, baseName;
|
||||||
|
from >> baseName >> fixed /* obsolete */ >> recursive >> s;
|
||||||
|
/* Compatibility hack. */
|
||||||
|
if (!fixed) {
|
||||||
|
s = "sha256";
|
||||||
|
recursive = true;
|
||||||
|
}
|
||||||
|
HashType hashAlgo = parseHashType(s);
|
||||||
|
|
||||||
|
TeeSource savedNAR(from);
|
||||||
|
RetrieveRegularNARSink savedRegular;
|
||||||
|
|
||||||
|
if (recursive) {
|
||||||
|
/* Get the entire NAR dump from the client and save it to
|
||||||
|
a string so that we can pass it to
|
||||||
|
addToStoreFromDump(). */
|
||||||
|
ParseSink sink; /* null sink; just parse the NAR */
|
||||||
|
parseDump(sink, savedNAR);
|
||||||
|
} else
|
||||||
|
parseDump(savedRegular, from);
|
||||||
|
|
||||||
|
logger->startWork();
|
||||||
|
if (!savedRegular.regular) throw Error("regular file expected");
|
||||||
|
|
||||||
|
Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
|
||||||
|
logger->stopWork();
|
||||||
|
|
||||||
|
to << path;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddTextToStore: {
|
||||||
|
string suffix = readString(from);
|
||||||
|
string s = readString(from);
|
||||||
|
PathSet refs = readStorePaths<PathSet>(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
Path path = store->addTextToStore(suffix, s, refs, NoRepair);
|
||||||
|
logger->stopWork();
|
||||||
|
to << path;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopExportPath: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
readInt(from); // obsolete
|
||||||
|
logger->startWork();
|
||||||
|
TunnelSink sink(to);
|
||||||
|
store->exportPath(path, sink);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopImportPaths: {
|
||||||
|
logger->startWork();
|
||||||
|
TunnelSource source(from, to);
|
||||||
|
Paths paths = store->importPaths(source, nullptr,
|
||||||
|
trusted ? NoCheckSigs : CheckSigs);
|
||||||
|
logger->stopWork();
|
||||||
|
to << paths;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopBuildPaths: {
|
||||||
|
PathSet drvs = readStorePaths<PathSet>(*store, from);
|
||||||
|
BuildMode mode = bmNormal;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
|
||||||
|
mode = (BuildMode) readInt(from);
|
||||||
|
|
||||||
|
/* Repairing is not atomic, so disallowed for "untrusted"
|
||||||
|
clients. */
|
||||||
|
if (mode == bmRepair && !trusted)
|
||||||
|
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
||||||
|
}
|
||||||
|
logger->startWork();
|
||||||
|
store->buildPaths(drvs, mode);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopBuildDerivation: {
|
||||||
|
Path drvPath = readStorePath(*store, from);
|
||||||
|
BasicDerivation drv;
|
||||||
|
readDerivation(from, *store, drv);
|
||||||
|
BuildMode buildMode = (BuildMode) readInt(from);
|
||||||
|
logger->startWork();
|
||||||
|
if (!trusted)
|
||||||
|
throw Error("you are not privileged to build derivations");
|
||||||
|
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
||||||
|
logger->stopWork();
|
||||||
|
to << res.status << res.errorMsg;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopEnsurePath: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
store->ensurePath(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddTempRoot: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
store->addTempRoot(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddIndirectRoot: {
|
||||||
|
Path path = absPath(readString(from));
|
||||||
|
logger->startWork();
|
||||||
|
store->addIndirectRoot(path);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopSyncWithGC: {
|
||||||
|
logger->startWork();
|
||||||
|
store->syncWithGC();
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopFindRoots: {
|
||||||
|
logger->startWork();
|
||||||
|
Roots roots = store->findRoots(!trusted);
|
||||||
|
logger->stopWork();
|
||||||
|
|
||||||
|
size_t size = 0;
|
||||||
|
for (auto & i : roots)
|
||||||
|
size += i.second.size();
|
||||||
|
|
||||||
|
to << size;
|
||||||
|
|
||||||
|
for (auto & [target, links] : roots)
|
||||||
|
for (auto & link : links)
|
||||||
|
to << link << target;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopCollectGarbage: {
|
||||||
|
GCOptions options;
|
||||||
|
options.action = (GCOptions::GCAction) readInt(from);
|
||||||
|
options.pathsToDelete = readStorePaths<PathSet>(*store, from);
|
||||||
|
from >> options.ignoreLiveness >> options.maxFreed;
|
||||||
|
// obsolete fields
|
||||||
|
readInt(from);
|
||||||
|
readInt(from);
|
||||||
|
readInt(from);
|
||||||
|
|
||||||
|
GCResults results;
|
||||||
|
|
||||||
|
logger->startWork();
|
||||||
|
if (options.ignoreLiveness)
|
||||||
|
throw Error("you are not allowed to ignore liveness");
|
||||||
|
store->collectGarbage(options, results);
|
||||||
|
logger->stopWork();
|
||||||
|
|
||||||
|
to << results.paths << results.bytesFreed << 0 /* obsolete */;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopSetOptions: {
|
||||||
|
settings.keepFailed = readInt(from);
|
||||||
|
settings.keepGoing = readInt(from);
|
||||||
|
settings.tryFallback = readInt(from);
|
||||||
|
verbosity = (Verbosity) readInt(from);
|
||||||
|
settings.maxBuildJobs.assign(readInt(from));
|
||||||
|
settings.maxSilentTime = readInt(from);
|
||||||
|
readInt(from); // obsolete useBuildHook
|
||||||
|
settings.verboseBuild = lvlError == (Verbosity) readInt(from);
|
||||||
|
readInt(from); // obsolete logType
|
||||||
|
readInt(from); // obsolete printBuildTrace
|
||||||
|
settings.buildCores = readInt(from);
|
||||||
|
settings.useSubstitutes = readInt(from);
|
||||||
|
|
||||||
|
StringMap overrides;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
||||||
|
unsigned int n = readInt(from);
|
||||||
|
for (unsigned int i = 0; i < n; i++) {
|
||||||
|
string name = readString(from);
|
||||||
|
string value = readString(from);
|
||||||
|
overrides.emplace(name, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger->startWork();
|
||||||
|
|
||||||
|
for (auto & i : overrides) {
|
||||||
|
auto & name(i.first);
|
||||||
|
auto & value(i.second);
|
||||||
|
|
||||||
|
auto setSubstituters = [&](Setting<Strings> & res) {
|
||||||
|
if (name != res.name && res.aliases.count(name) == 0)
|
||||||
|
return false;
|
||||||
|
StringSet trusted = settings.trustedSubstituters;
|
||||||
|
for (auto & s : settings.substituters.get())
|
||||||
|
trusted.insert(s);
|
||||||
|
Strings subs;
|
||||||
|
auto ss = tokenizeString<Strings>(value);
|
||||||
|
for (auto & s : ss)
|
||||||
|
if (trusted.count(s))
|
||||||
|
subs.push_back(s);
|
||||||
|
else
|
||||||
|
warn("ignoring untrusted substituter '%s'", s);
|
||||||
|
res = subs;
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (name == "ssh-auth-sock") // obsolete
|
||||||
|
;
|
||||||
|
else if (trusted
|
||||||
|
|| name == settings.buildTimeout.name
|
||||||
|
|| name == "connect-timeout"
|
||||||
|
|| (name == "builders" && value == ""))
|
||||||
|
settings.set(name, value);
|
||||||
|
else if (setSubstituters(settings.substituters))
|
||||||
|
;
|
||||||
|
else if (setSubstituters(settings.extraSubstituters))
|
||||||
|
;
|
||||||
|
else
|
||||||
|
warn("ignoring the user-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
||||||
|
} catch (UsageError & e) {
|
||||||
|
warn(e.what());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger->stopWork();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePathInfo: {
|
||||||
|
Path path = absPath(readString(from));
|
||||||
|
logger->startWork();
|
||||||
|
SubstitutablePathInfos infos;
|
||||||
|
store->querySubstitutablePathInfos({path}, infos);
|
||||||
|
logger->stopWork();
|
||||||
|
SubstitutablePathInfos::iterator i = infos.find(path);
|
||||||
|
if (i == infos.end())
|
||||||
|
to << 0;
|
||||||
|
else {
|
||||||
|
to << 1 << i->second.deriver << i->second.references << i->second.downloadSize << i->second.narSize;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePathInfos: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
SubstitutablePathInfos infos;
|
||||||
|
store->querySubstitutablePathInfos(paths, infos);
|
||||||
|
logger->stopWork();
|
||||||
|
to << infos.size();
|
||||||
|
for (auto & i : infos) {
|
||||||
|
to << i.first << i.second.deriver << i.second.references
|
||||||
|
<< i.second.downloadSize << i.second.narSize;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryAllValidPaths: {
|
||||||
|
logger->startWork();
|
||||||
|
PathSet paths = store->queryAllValidPaths();
|
||||||
|
logger->stopWork();
|
||||||
|
to << paths;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathInfo: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
std::shared_ptr<const ValidPathInfo> info;
|
||||||
|
logger->startWork();
|
||||||
|
try {
|
||||||
|
info = store->queryPathInfo(path);
|
||||||
|
} catch (InvalidPath &) {
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 17) throw;
|
||||||
|
}
|
||||||
|
logger->stopWork();
|
||||||
|
if (info) {
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
|
||||||
|
to << 1;
|
||||||
|
to << info->deriver << info->narHash.to_string(Base16, false) << info->references
|
||||||
|
<< info->registrationTime << info->narSize;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
|
||||||
|
to << info->ultimate
|
||||||
|
<< info->sigs
|
||||||
|
<< info->ca;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
|
||||||
|
to << 0;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopOptimiseStore:
|
||||||
|
logger->startWork();
|
||||||
|
store->optimiseStore();
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case wopVerifyStore: {
|
||||||
|
bool checkContents, repair;
|
||||||
|
from >> checkContents >> repair;
|
||||||
|
logger->startWork();
|
||||||
|
if (repair && !trusted)
|
||||||
|
throw Error("you are not privileged to repair paths");
|
||||||
|
bool errors = store->verifyStore(checkContents, (RepairFlag) repair);
|
||||||
|
logger->stopWork();
|
||||||
|
to << errors;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddSignatures: {
|
||||||
|
Path path = readStorePath(*store, from);
|
||||||
|
StringSet sigs = readStrings<StringSet>(from);
|
||||||
|
logger->startWork();
|
||||||
|
if (!trusted)
|
||||||
|
throw Error("you are not privileged to add signatures");
|
||||||
|
store->addSignatures(path, sigs);
|
||||||
|
logger->stopWork();
|
||||||
|
to << 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopNarFromPath: {
|
||||||
|
auto path = readStorePath(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
logger->stopWork();
|
||||||
|
dumpPath(path, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddToStoreNar: {
|
||||||
|
bool repair, dontCheckSigs;
|
||||||
|
ValidPathInfo info;
|
||||||
|
info.path = readStorePath(*store, from);
|
||||||
|
from >> info.deriver;
|
||||||
|
if (!info.deriver.empty())
|
||||||
|
store->assertStorePath(info.deriver);
|
||||||
|
info.narHash = Hash(readString(from), htSHA256);
|
||||||
|
info.references = readStorePaths<PathSet>(*store, from);
|
||||||
|
from >> info.registrationTime >> info.narSize >> info.ultimate;
|
||||||
|
info.sigs = readStrings<StringSet>(from);
|
||||||
|
from >> info.ca >> repair >> dontCheckSigs;
|
||||||
|
if (!trusted && dontCheckSigs)
|
||||||
|
dontCheckSigs = false;
|
||||||
|
if (!trusted)
|
||||||
|
info.ultimate = false;
|
||||||
|
|
||||||
|
std::string saved;
|
||||||
|
std::unique_ptr<Source> source;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
|
||||||
|
source = std::make_unique<TunnelSource>(from, to);
|
||||||
|
else {
|
||||||
|
TeeSink tee(from);
|
||||||
|
parseDump(tee, tee.source);
|
||||||
|
saved = std::move(*tee.source.data);
|
||||||
|
source = std::make_unique<StringSource>(saved);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger->startWork();
|
||||||
|
|
||||||
|
// FIXME: race if addToStore doesn't read source?
|
||||||
|
store->addToStore(info, *source, (RepairFlag) repair,
|
||||||
|
dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
|
||||||
|
|
||||||
|
logger->stopWork();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryMissing: {
|
||||||
|
PathSet targets = readStorePaths<PathSet>(*store, from);
|
||||||
|
logger->startWork();
|
||||||
|
PathSet willBuild, willSubstitute, unknown;
|
||||||
|
unsigned long long downloadSize, narSize;
|
||||||
|
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||||
|
logger->stopWork();
|
||||||
|
to << willBuild << willSubstitute << unknown << downloadSize << narSize;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw Error(format("invalid operation %1%") % op);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void processConnection(
|
||||||
|
ref<Store> store,
|
||||||
|
FdSource & from,
|
||||||
|
FdSink & to,
|
||||||
|
bool trusted,
|
||||||
|
const std::string & userName,
|
||||||
|
uid_t userId)
|
||||||
|
{
|
||||||
|
MonitorFdHup monitor(from.fd);
|
||||||
|
|
||||||
|
/* Exchange the greeting. */
|
||||||
|
unsigned int magic = readInt(from);
|
||||||
|
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
|
||||||
|
to << WORKER_MAGIC_2 << PROTOCOL_VERSION;
|
||||||
|
to.flush();
|
||||||
|
unsigned int clientVersion = readInt(from);
|
||||||
|
|
||||||
|
if (clientVersion < 0x10a)
|
||||||
|
throw Error("the Nix client version is too old");
|
||||||
|
|
||||||
|
auto tunnelLogger = new TunnelLogger(to, clientVersion);
|
||||||
|
auto prevLogger = nix::logger;
|
||||||
|
logger = tunnelLogger;
|
||||||
|
|
||||||
|
unsigned int opCount = 0;
|
||||||
|
|
||||||
|
Finally finally([&]() {
|
||||||
|
_isInterrupted = false;
|
||||||
|
prevLogger->log(lvlDebug, fmt("%d operations", opCount));
|
||||||
|
});
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
|
||||||
|
setAffinityTo(readInt(from));
|
||||||
|
|
||||||
|
readInt(from); // obsolete reserveSpace
|
||||||
|
|
||||||
|
/* Send startup error messages to the client. */
|
||||||
|
tunnelLogger->startWork();
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
/* If we can't accept clientVersion, then throw an error
|
||||||
|
*here* (not above). */
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/* Prevent users from doing something very dangerous. */
|
||||||
|
if (geteuid() == 0 &&
|
||||||
|
querySetting("build-users-group", "") == "")
|
||||||
|
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
store->createUser(userName, userId);
|
||||||
|
|
||||||
|
tunnelLogger->stopWork();
|
||||||
|
to.flush();
|
||||||
|
|
||||||
|
/* Process client requests. */
|
||||||
|
while (true) {
|
||||||
|
WorkerOp op;
|
||||||
|
try {
|
||||||
|
op = (WorkerOp) readInt(from);
|
||||||
|
} catch (Interrupted & e) {
|
||||||
|
break;
|
||||||
|
} catch (EndOfFile & e) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
opCount++;
|
||||||
|
|
||||||
|
try {
|
||||||
|
performOp(tunnelLogger, store, trusted, clientVersion, from, to, op);
|
||||||
|
} catch (Error & e) {
|
||||||
|
/* If we're not in a state where we can send replies, then
|
||||||
|
something went wrong processing the input of the
|
||||||
|
client. This can happen especially if I/O errors occur
|
||||||
|
during addTextToStore() / importPath(). If that
|
||||||
|
happens, just send the error message and exit. */
|
||||||
|
bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr;
|
||||||
|
tunnelLogger->stopWork(false, e.msg(), e.status);
|
||||||
|
if (!errorAllowed) throw;
|
||||||
|
} catch (std::bad_alloc & e) {
|
||||||
|
tunnelLogger->stopWork(false, "Nix daemon out of memory", 1);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
to.flush();
|
||||||
|
|
||||||
|
assert(!tunnelLogger->state_.lock()->canSendStderr);
|
||||||
|
};
|
||||||
|
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
tunnelLogger->stopWork(false, e.what(), 1);
|
||||||
|
to.flush();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
14
src/libstore/daemon.hh
Normal file
14
src/libstore/daemon.hh
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
#include "serialise.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
|
||||||
|
namespace nix::daemon {
|
||||||
|
|
||||||
|
void processConnection(
|
||||||
|
ref<Store> store,
|
||||||
|
FdSource & from,
|
||||||
|
FdSink & to,
|
||||||
|
bool trusted,
|
||||||
|
const std::string & userName,
|
||||||
|
uid_t userId);
|
||||||
|
|
||||||
|
}
|
|
@ -88,7 +88,7 @@ struct LegacySSHStore : public Store
|
||||||
}
|
}
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
|
|
|
@ -625,7 +625,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::queryPathInfoUncached(const Path & path,
|
void LocalStore::queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
auto info = std::make_shared<ValidPathInfo>();
|
auto info = std::make_shared<ValidPathInfo>();
|
||||||
|
@ -930,8 +930,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
||||||
not be valid yet. */
|
not be valid yet. */
|
||||||
for (auto & i : infos)
|
for (auto & i : infos)
|
||||||
if (isDerivation(i.path)) {
|
if (isDerivation(i.path)) {
|
||||||
// FIXME: inefficient; we already loaded the
|
// FIXME: inefficient; we already loaded the derivation in addValidPath().
|
||||||
// derivation in addValidPath().
|
|
||||||
Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(i.path));
|
Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(i.path));
|
||||||
checkDerivationOutputs(i.path, drv);
|
checkDerivationOutputs(i.path, drv);
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ public:
|
||||||
PathSet queryAllValidPaths() override;
|
PathSet queryAllValidPaths() override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ public:
|
||||||
true) or simply the contents of a regular file (if recursive ==
|
true) or simply the contents of a regular file (if recursive ==
|
||||||
false). */
|
false). */
|
||||||
Path addToStoreFromDump(const string & dump, const string & name,
|
Path addToStoreFromDump(const string & dump, const string & name,
|
||||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair);
|
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
|
||||||
|
|
||||||
Path addTextToStore(const string & name, const string & s,
|
Path addTextToStore(const string & name, const string & s,
|
||||||
const PathSet & references, RepairFlag repair) override;
|
const PathSet & references, RepairFlag repair) override;
|
||||||
|
|
|
@ -33,7 +33,7 @@ void Store::computeFSClosure(const PathSet & startPaths,
|
||||||
state->pending++;
|
state->pending++;
|
||||||
}
|
}
|
||||||
|
|
||||||
queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
|
queryPathInfo(path, {[&, path](std::future<ref<const ValidPathInfo>> fut) {
|
||||||
// FIXME: calls to isValidPath() should be async
|
// FIXME: calls to isValidPath() should be async
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -214,7 +214,7 @@ public:
|
||||||
|
|
||||||
void upsertNarInfo(
|
void upsertNarInfo(
|
||||||
const std::string & uri, const std::string & hashPart,
|
const std::string & uri, const std::string & hashPart,
|
||||||
std::shared_ptr<ValidPathInfo> info) override
|
std::shared_ptr<const ValidPathInfo> info) override
|
||||||
{
|
{
|
||||||
retrySQLite<void>([&]() {
|
retrySQLite<void>([&]() {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
@ -223,7 +223,7 @@ public:
|
||||||
|
|
||||||
if (info) {
|
if (info) {
|
||||||
|
|
||||||
auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
|
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
|
||||||
|
|
||||||
assert(hashPart == storePathToHash(info->path));
|
assert(hashPart == storePathToHash(info->path));
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ public:
|
||||||
|
|
||||||
virtual void upsertNarInfo(
|
virtual void upsertNarInfo(
|
||||||
const std::string & uri, const std::string & hashPart,
|
const std::string & uri, const std::string & hashPart,
|
||||||
std::shared_ptr<ValidPathInfo> info) = 0;
|
std::shared_ptr<const ValidPathInfo> info) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Return a singleton cache object that can be used concurrently by
|
/* Return a singleton cache object that can be used concurrently by
|
||||||
|
|
|
@ -350,7 +350,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
|
||||||
|
|
||||||
void RemoteStore::queryPathInfoUncached(const Path & path,
|
void RemoteStore::queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
std::shared_ptr<ValidPathInfo> info;
|
std::shared_ptr<ValidPathInfo> info;
|
||||||
|
|
|
@ -43,7 +43,7 @@ public:
|
||||||
PathSet queryAllValidPaths() override;
|
PathSet queryAllValidPaths() override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||||
|
|
||||||
|
|
|
@ -320,10 +320,10 @@ bool Store::isValidPathUncached(const Path & path)
|
||||||
|
|
||||||
ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
||||||
{
|
{
|
||||||
std::promise<ref<ValidPathInfo>> promise;
|
std::promise<ref<const ValidPathInfo>> promise;
|
||||||
|
|
||||||
queryPathInfo(storePath,
|
queryPathInfo(storePath,
|
||||||
{[&](std::future<ref<ValidPathInfo>> result) {
|
{[&](std::future<ref<const ValidPathInfo>> result) {
|
||||||
try {
|
try {
|
||||||
promise.set_value(result.get());
|
promise.set_value(result.get());
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
|
@ -336,7 +336,7 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
||||||
|
|
||||||
|
|
||||||
void Store::queryPathInfo(const Path & storePath,
|
void Store::queryPathInfo(const Path & storePath,
|
||||||
Callback<ref<ValidPathInfo>> callback) noexcept
|
Callback<ref<const ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
std::string hashPart;
|
std::string hashPart;
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
stats.narInfoReadAverted++;
|
stats.narInfoReadAverted++;
|
||||||
if (!*res)
|
if (!*res)
|
||||||
throw InvalidPath(format("path '%s' is not valid") % storePath);
|
throw InvalidPath(format("path '%s' is not valid") % storePath);
|
||||||
return callback(ref<ValidPathInfo>(*res));
|
return callback(ref<const ValidPathInfo>(*res));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
(res.second->path != storePath && storePathToName(storePath) != ""))
|
(res.second->path != storePath && storePathToName(storePath) != ""))
|
||||||
throw InvalidPath(format("path '%s' is not valid") % storePath);
|
throw InvalidPath(format("path '%s' is not valid") % storePath);
|
||||||
}
|
}
|
||||||
return callback(ref<ValidPathInfo>(res.second));
|
return callback(ref<const ValidPathInfo>(res.second));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||||
|
|
||||||
queryPathInfoUncached(storePath,
|
queryPathInfoUncached(storePath,
|
||||||
{[this, storePath, hashPart, callbackPtr](std::future<std::shared_ptr<ValidPathInfo>> fut) {
|
{[this, storePath, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto info = fut.get();
|
auto info = fut.get();
|
||||||
|
@ -396,7 +396,7 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
throw InvalidPath("path '%s' is not valid", storePath);
|
throw InvalidPath("path '%s' is not valid", storePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
(*callbackPtr)(ref<ValidPathInfo>(info));
|
(*callbackPtr)(ref<const ValidPathInfo>(info));
|
||||||
} catch (...) { callbackPtr->rethrow(); }
|
} catch (...) { callbackPtr->rethrow(); }
|
||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
@ -418,7 +418,7 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti
|
||||||
|
|
||||||
auto doQuery = [&](const Path & path ) {
|
auto doQuery = [&](const Path & path ) {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<ValidPathInfo>> fut) {
|
queryPathInfo(path, {[path, &state_, &wakeup](std::future<ref<const ValidPathInfo>> fut) {
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
try {
|
try {
|
||||||
auto info = fut.get();
|
auto info = fut.get();
|
||||||
|
|
|
@ -259,7 +259,7 @@ protected:
|
||||||
|
|
||||||
struct State
|
struct State
|
||||||
{
|
{
|
||||||
LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache;
|
LRUCache<std::string, std::shared_ptr<const ValidPathInfo>> pathInfoCache;
|
||||||
};
|
};
|
||||||
|
|
||||||
Sync<State> state;
|
Sync<State> state;
|
||||||
|
@ -362,12 +362,12 @@ public:
|
||||||
|
|
||||||
/* Asynchronous version of queryPathInfo(). */
|
/* Asynchronous version of queryPathInfo(). */
|
||||||
void queryPathInfo(const Path & path,
|
void queryPathInfo(const Path & path,
|
||||||
Callback<ref<ValidPathInfo>> callback) noexcept;
|
Callback<ref<const ValidPathInfo>> callback) noexcept;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
virtual void queryPathInfoUncached(const Path & path,
|
virtual void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept = 0;
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -423,6 +423,13 @@ public:
|
||||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) = 0;
|
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) = 0;
|
||||||
|
|
||||||
|
// FIXME: remove?
|
||||||
|
virtual Path addToStoreFromDump(const string & dump, const string & name,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
|
||||||
|
{
|
||||||
|
throw Error("addToStoreFromDump() is not supported by this store");
|
||||||
|
}
|
||||||
|
|
||||||
/* Like addToStore, but the contents written to the output path is
|
/* Like addToStore, but the contents written to the output path is
|
||||||
a regular file containing the given string. */
|
a regular file containing the given string. */
|
||||||
virtual Path addTextToStore(const string & name, const string & s,
|
virtual Path addTextToStore(const string & name, const string & s,
|
||||||
|
|
|
@ -62,6 +62,9 @@ typedef enum {
|
||||||
#define STDERR_RESULT 0x52534c54
|
#define STDERR_RESULT 0x52534c54
|
||||||
|
|
||||||
|
|
||||||
|
class Store;
|
||||||
|
struct Source;
|
||||||
|
|
||||||
Path readStorePath(Store & store, Source & from);
|
Path readStorePath(Store & store, Source & from);
|
||||||
template<class T> T readStorePaths(Store & store, Source & from);
|
template<class T> T readStorePaths(Store & store, Source & from);
|
||||||
|
|
||||||
|
|
|
@ -375,4 +375,13 @@ void copyNAR(Source & source, Sink & sink)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void copyPath(const Path & from, const Path & to)
|
||||||
|
{
|
||||||
|
auto source = sinkToSource([&](Sink & sink) {
|
||||||
|
dumpPath(from, sink);
|
||||||
|
});
|
||||||
|
restorePath(to, *source);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,8 @@ void restorePath(const Path & path, Source & source);
|
||||||
/* Read a NAR from 'source' and write it to 'sink'. */
|
/* Read a NAR from 'source' and write it to 'sink'. */
|
||||||
void copyNAR(Source & source, Sink & sink);
|
void copyNAR(Source & source, Sink & sink);
|
||||||
|
|
||||||
|
void copyPath(const Path & from, const Path & to);
|
||||||
|
|
||||||
|
|
||||||
extern const std::string narVersionMagic1;
|
extern const std::string narVersionMagic1;
|
||||||
|
|
||||||
|
|
|
@ -21,8 +21,10 @@
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
#include <sys/ioctl.h>
|
#include <sys/ioctl.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
#include <sys/wait.h>
|
#include <sys/wait.h>
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
|
#include <sys/un.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
|
@ -1470,7 +1472,7 @@ static Sync<std::pair<unsigned short, unsigned short>> windowSize{{0, 0}};
|
||||||
static void updateWindowSize()
|
static void updateWindowSize()
|
||||||
{
|
{
|
||||||
struct winsize ws;
|
struct winsize ws;
|
||||||
if (ioctl(1, TIOCGWINSZ, &ws) == 0) {
|
if (ioctl(2, TIOCGWINSZ, &ws) == 0) {
|
||||||
auto windowSize_(windowSize.lock());
|
auto windowSize_(windowSize.lock());
|
||||||
windowSize_->first = ws.ws_row;
|
windowSize_->first = ws.ws_row;
|
||||||
windowSize_->second = ws.ws_col;
|
windowSize_->second = ws.ws_col;
|
||||||
|
@ -1567,4 +1569,33 @@ std::unique_ptr<InterruptCallback> createInterruptCallback(std::function<void()>
|
||||||
return std::unique_ptr<InterruptCallback>(res.release());
|
return std::unique_ptr<InterruptCallback>(res.release());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode)
|
||||||
|
{
|
||||||
|
AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
|
||||||
|
if (!fdSocket)
|
||||||
|
throw SysError("cannot create Unix domain socket");
|
||||||
|
|
||||||
|
closeOnExec(fdSocket.get());
|
||||||
|
|
||||||
|
struct sockaddr_un addr;
|
||||||
|
addr.sun_family = AF_UNIX;
|
||||||
|
if (path.size() >= sizeof(addr.sun_path))
|
||||||
|
throw Error("socket path '%1%' is too long", path);
|
||||||
|
strcpy(addr.sun_path, path.c_str());
|
||||||
|
|
||||||
|
unlink(path.c_str());
|
||||||
|
|
||||||
|
if (bind(fdSocket.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
|
||||||
|
throw SysError("cannot bind to socket '%1%'", path);
|
||||||
|
|
||||||
|
if (chmod(path.c_str(), mode) == -1)
|
||||||
|
throw SysError("changing permissions on '%1%'", path);
|
||||||
|
|
||||||
|
if (listen(fdSocket.get(), 5) == -1)
|
||||||
|
throw SysError("cannot listen on socket '%1%'", path);
|
||||||
|
|
||||||
|
return fdSocket;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,4 +577,8 @@ typedef std::function<bool(const Path & path)> PathFilter;
|
||||||
extern PathFilter defaultPathFilter;
|
extern PathFilter defaultPathFilter;
|
||||||
|
|
||||||
|
|
||||||
|
/* Create a Unix domain socket in listen mode. */
|
||||||
|
AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -245,7 +245,17 @@ static void _main(int argc, char * * argv)
|
||||||
auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
|
auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
|
||||||
state->repair = repair;
|
state->repair = repair;
|
||||||
|
|
||||||
Bindings & autoArgs = *myArgs.getAutoArgs(*state);
|
auto autoArgs = myArgs.getAutoArgs(*state);
|
||||||
|
|
||||||
|
if (runEnv) {
|
||||||
|
auto newArgs = state->allocBindings(autoArgs->size() + 1);
|
||||||
|
auto tru = state->allocValue();
|
||||||
|
mkBool(*tru, true);
|
||||||
|
newArgs->push_back(Attr(state->symbols.create("inNixShell"), tru));
|
||||||
|
for (auto & i : *autoArgs) newArgs->push_back(i);
|
||||||
|
newArgs->sort();
|
||||||
|
autoArgs = newArgs;
|
||||||
|
}
|
||||||
|
|
||||||
if (packages) {
|
if (packages) {
|
||||||
std::ostringstream joined;
|
std::ostringstream joined;
|
||||||
|
@ -299,9 +309,9 @@ static void _main(int argc, char * * argv)
|
||||||
state->eval(e, vRoot);
|
state->eval(e, vRoot);
|
||||||
|
|
||||||
for (auto & i : attrPaths) {
|
for (auto & i : attrPaths) {
|
||||||
Value & v(*findAlongAttrPath(*state, i, autoArgs, vRoot));
|
Value & v(*findAlongAttrPath(*state, i, *autoArgs, vRoot));
|
||||||
state->forceValue(v);
|
state->forceValue(v);
|
||||||
getDerivations(*state, v, "", autoArgs, drvs, false);
|
getDerivations(*state, v, "", *autoArgs, drvs, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,14 +2,12 @@
|
||||||
#include "local-store.hh"
|
#include "local-store.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "serialise.hh"
|
#include "serialise.hh"
|
||||||
#include "worker-protocol.hh"
|
|
||||||
#include "archive.hh"
|
#include "archive.hh"
|
||||||
#include "affinity.hh"
|
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "monitor-fd.hh"
|
|
||||||
#include "derivations.hh"
|
#include "derivations.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "legacy.hh"
|
#include "legacy.hh"
|
||||||
|
#include "daemon.hh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
|
@ -32,6 +30,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
using namespace nix::daemon;
|
||||||
|
|
||||||
#ifndef __linux__
|
#ifndef __linux__
|
||||||
#define SPLICE_F_MOVE 0
|
#define SPLICE_F_MOVE 0
|
||||||
|
@ -53,793 +52,6 @@ static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static FdSource from(STDIN_FILENO);
|
|
||||||
static FdSink to(STDOUT_FILENO);
|
|
||||||
|
|
||||||
|
|
||||||
Sink & operator << (Sink & sink, const Logger::Fields & fields)
|
|
||||||
{
|
|
||||||
sink << fields.size();
|
|
||||||
for (auto & f : fields) {
|
|
||||||
sink << f.type;
|
|
||||||
if (f.type == Logger::Field::tInt)
|
|
||||||
sink << f.i;
|
|
||||||
else if (f.type == Logger::Field::tString)
|
|
||||||
sink << f.s;
|
|
||||||
else abort();
|
|
||||||
}
|
|
||||||
return sink;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* Logger that forwards log messages to the client, *if* we're in a
|
|
||||||
state where the protocol allows it (i.e., when canSendStderr is
|
|
||||||
true). */
|
|
||||||
struct TunnelLogger : public Logger
|
|
||||||
{
|
|
||||||
struct State
|
|
||||||
{
|
|
||||||
bool canSendStderr = false;
|
|
||||||
std::vector<std::string> pendingMsgs;
|
|
||||||
};
|
|
||||||
|
|
||||||
Sync<State> state_;
|
|
||||||
|
|
||||||
unsigned int clientVersion;
|
|
||||||
|
|
||||||
TunnelLogger(unsigned int clientVersion) : clientVersion(clientVersion) { }
|
|
||||||
|
|
||||||
void enqueueMsg(const std::string & s)
|
|
||||||
{
|
|
||||||
auto state(state_.lock());
|
|
||||||
|
|
||||||
if (state->canSendStderr) {
|
|
||||||
assert(state->pendingMsgs.empty());
|
|
||||||
try {
|
|
||||||
to(s);
|
|
||||||
to.flush();
|
|
||||||
} catch (...) {
|
|
||||||
/* Write failed; that means that the other side is
|
|
||||||
gone. */
|
|
||||||
state->canSendStderr = false;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
state->pendingMsgs.push_back(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
void log(Verbosity lvl, const FormatOrString & fs) override
|
|
||||||
{
|
|
||||||
if (lvl > verbosity) return;
|
|
||||||
|
|
||||||
StringSink buf;
|
|
||||||
buf << STDERR_NEXT << (fs.s + "\n");
|
|
||||||
enqueueMsg(*buf.s);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* startWork() means that we're starting an operation for which we
|
|
||||||
want to send out stderr to the client. */
|
|
||||||
void startWork()
|
|
||||||
{
|
|
||||||
auto state(state_.lock());
|
|
||||||
state->canSendStderr = true;
|
|
||||||
|
|
||||||
for (auto & msg : state->pendingMsgs)
|
|
||||||
to(msg);
|
|
||||||
|
|
||||||
state->pendingMsgs.clear();
|
|
||||||
|
|
||||||
to.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* stopWork() means that we're done; stop sending stderr to the
|
|
||||||
client. */
|
|
||||||
void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
|
|
||||||
{
|
|
||||||
auto state(state_.lock());
|
|
||||||
|
|
||||||
state->canSendStderr = false;
|
|
||||||
|
|
||||||
if (success)
|
|
||||||
to << STDERR_LAST;
|
|
||||||
else {
|
|
||||||
to << STDERR_ERROR << msg;
|
|
||||||
if (status != 0) to << status;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void startActivity(ActivityId act, Verbosity lvl, ActivityType type,
|
|
||||||
const std::string & s, const Fields & fields, ActivityId parent) override
|
|
||||||
{
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) {
|
|
||||||
if (!s.empty())
|
|
||||||
log(lvl, s + "...");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
StringSink buf;
|
|
||||||
buf << STDERR_START_ACTIVITY << act << lvl << type << s << fields << parent;
|
|
||||||
enqueueMsg(*buf.s);
|
|
||||||
}
|
|
||||||
|
|
||||||
void stopActivity(ActivityId act) override
|
|
||||||
{
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
|
||||||
StringSink buf;
|
|
||||||
buf << STDERR_STOP_ACTIVITY << act;
|
|
||||||
enqueueMsg(*buf.s);
|
|
||||||
}
|
|
||||||
|
|
||||||
void result(ActivityId act, ResultType type, const Fields & fields) override
|
|
||||||
{
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
|
|
||||||
StringSink buf;
|
|
||||||
buf << STDERR_RESULT << act << type << fields;
|
|
||||||
enqueueMsg(*buf.s);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
struct TunnelSink : Sink
|
|
||||||
{
|
|
||||||
Sink & to;
|
|
||||||
TunnelSink(Sink & to) : to(to) { }
|
|
||||||
virtual void operator () (const unsigned char * data, size_t len)
|
|
||||||
{
|
|
||||||
to << STDERR_WRITE;
|
|
||||||
writeString(data, len, to);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
struct TunnelSource : BufferedSource
|
|
||||||
{
|
|
||||||
Source & from;
|
|
||||||
TunnelSource(Source & from) : from(from) { }
|
|
||||||
protected:
|
|
||||||
size_t readUnbuffered(unsigned char * data, size_t len) override
|
|
||||||
{
|
|
||||||
to << STDERR_READ << len;
|
|
||||||
to.flush();
|
|
||||||
size_t n = readString(data, len, from);
|
|
||||||
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* If the NAR archive contains a single file at top-level, then save
|
|
||||||
the contents of the file to `s'. Otherwise barf. */
|
|
||||||
struct RetrieveRegularNARSink : ParseSink
|
|
||||||
{
|
|
||||||
bool regular;
|
|
||||||
string s;
|
|
||||||
|
|
||||||
RetrieveRegularNARSink() : regular(true) { }
|
|
||||||
|
|
||||||
void createDirectory(const Path & path)
|
|
||||||
{
|
|
||||||
regular = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void receiveContents(unsigned char * data, unsigned int len)
|
|
||||||
{
|
|
||||||
s.append((const char *) data, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
void createSymlink(const Path & path, const string & target)
|
|
||||||
{
|
|
||||||
regular = false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static void performOp(TunnelLogger * logger, ref<Store> store,
|
|
||||||
bool trusted, unsigned int clientVersion,
|
|
||||||
Source & from, Sink & to, unsigned int op)
|
|
||||||
{
|
|
||||||
switch (op) {
|
|
||||||
|
|
||||||
case wopIsValidPath: {
|
|
||||||
/* 'readStorePath' could raise an error leading to the connection
|
|
||||||
being closed. To be able to recover from an invalid path error,
|
|
||||||
call 'startWork' early, and do 'assertStorePath' afterwards so
|
|
||||||
that the 'Error' exception handler doesn't close the
|
|
||||||
connection. */
|
|
||||||
Path path = readString(from);
|
|
||||||
logger->startWork();
|
|
||||||
store->assertStorePath(path);
|
|
||||||
bool result = store->isValidPath(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << result;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryValidPaths: {
|
|
||||||
PathSet paths = readStorePaths<PathSet>(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
PathSet res = store->queryValidPaths(paths);
|
|
||||||
logger->stopWork();
|
|
||||||
to << res;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopHasSubstitutes: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
PathSet res = store->querySubstitutablePaths({path});
|
|
||||||
logger->stopWork();
|
|
||||||
to << (res.find(path) != res.end());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQuerySubstitutablePaths: {
|
|
||||||
PathSet paths = readStorePaths<PathSet>(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
PathSet res = store->querySubstitutablePaths(paths);
|
|
||||||
logger->stopWork();
|
|
||||||
to << res;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryPathHash: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
auto hash = store->queryPathInfo(path)->narHash;
|
|
||||||
logger->stopWork();
|
|
||||||
to << hash.to_string(Base16, false);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryReferences:
|
|
||||||
case wopQueryReferrers:
|
|
||||||
case wopQueryValidDerivers:
|
|
||||||
case wopQueryDerivationOutputs: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
PathSet paths;
|
|
||||||
if (op == wopQueryReferences)
|
|
||||||
paths = store->queryPathInfo(path)->references;
|
|
||||||
else if (op == wopQueryReferrers)
|
|
||||||
store->queryReferrers(path, paths);
|
|
||||||
else if (op == wopQueryValidDerivers)
|
|
||||||
paths = store->queryValidDerivers(path);
|
|
||||||
else paths = store->queryDerivationOutputs(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << paths;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryDerivationOutputNames: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
StringSet names;
|
|
||||||
names = store->queryDerivationOutputNames(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << names;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryDeriver: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
auto deriver = store->queryPathInfo(path)->deriver;
|
|
||||||
logger->stopWork();
|
|
||||||
to << deriver;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryPathFromHashPart: {
|
|
||||||
string hashPart = readString(from);
|
|
||||||
logger->startWork();
|
|
||||||
Path path = store->queryPathFromHashPart(hashPart);
|
|
||||||
logger->stopWork();
|
|
||||||
to << path;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddToStore: {
|
|
||||||
bool fixed, recursive;
|
|
||||||
std::string s, baseName;
|
|
||||||
from >> baseName >> fixed /* obsolete */ >> recursive >> s;
|
|
||||||
/* Compatibility hack. */
|
|
||||||
if (!fixed) {
|
|
||||||
s = "sha256";
|
|
||||||
recursive = true;
|
|
||||||
}
|
|
||||||
HashType hashAlgo = parseHashType(s);
|
|
||||||
|
|
||||||
TeeSource savedNAR(from);
|
|
||||||
RetrieveRegularNARSink savedRegular;
|
|
||||||
|
|
||||||
if (recursive) {
|
|
||||||
/* Get the entire NAR dump from the client and save it to
|
|
||||||
a string so that we can pass it to
|
|
||||||
addToStoreFromDump(). */
|
|
||||||
ParseSink sink; /* null sink; just parse the NAR */
|
|
||||||
parseDump(sink, savedNAR);
|
|
||||||
} else
|
|
||||||
parseDump(savedRegular, from);
|
|
||||||
|
|
||||||
logger->startWork();
|
|
||||||
if (!savedRegular.regular) throw Error("regular file expected");
|
|
||||||
|
|
||||||
auto store2 = store.dynamic_pointer_cast<LocalStore>();
|
|
||||||
if (!store2) throw Error("operation is only supported by LocalStore");
|
|
||||||
|
|
||||||
Path path = store2->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
|
|
||||||
logger->stopWork();
|
|
||||||
|
|
||||||
to << path;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddTextToStore: {
|
|
||||||
string suffix = readString(from);
|
|
||||||
string s = readString(from);
|
|
||||||
PathSet refs = readStorePaths<PathSet>(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
Path path = store->addTextToStore(suffix, s, refs, NoRepair);
|
|
||||||
logger->stopWork();
|
|
||||||
to << path;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopExportPath: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
readInt(from); // obsolete
|
|
||||||
logger->startWork();
|
|
||||||
TunnelSink sink(to);
|
|
||||||
store->exportPath(path, sink);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopImportPaths: {
|
|
||||||
logger->startWork();
|
|
||||||
TunnelSource source(from);
|
|
||||||
Paths paths = store->importPaths(source, nullptr,
|
|
||||||
trusted ? NoCheckSigs : CheckSigs);
|
|
||||||
logger->stopWork();
|
|
||||||
to << paths;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopBuildPaths: {
|
|
||||||
PathSet drvs = readStorePaths<PathSet>(*store, from);
|
|
||||||
BuildMode mode = bmNormal;
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
|
|
||||||
mode = (BuildMode) readInt(from);
|
|
||||||
|
|
||||||
/* Repairing is not atomic, so disallowed for "untrusted"
|
|
||||||
clients. */
|
|
||||||
if (mode == bmRepair && !trusted)
|
|
||||||
throw Error("repairing is not allowed because you are not in 'trusted-users'");
|
|
||||||
}
|
|
||||||
logger->startWork();
|
|
||||||
store->buildPaths(drvs, mode);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopBuildDerivation: {
|
|
||||||
Path drvPath = readStorePath(*store, from);
|
|
||||||
BasicDerivation drv;
|
|
||||||
readDerivation(from, *store, drv);
|
|
||||||
BuildMode buildMode = (BuildMode) readInt(from);
|
|
||||||
logger->startWork();
|
|
||||||
if (!trusted)
|
|
||||||
throw Error("you are not privileged to build derivations");
|
|
||||||
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
|
||||||
logger->stopWork();
|
|
||||||
to << res.status << res.errorMsg;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopEnsurePath: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
store->ensurePath(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddTempRoot: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
store->addTempRoot(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddIndirectRoot: {
|
|
||||||
Path path = absPath(readString(from));
|
|
||||||
logger->startWork();
|
|
||||||
store->addIndirectRoot(path);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopSyncWithGC: {
|
|
||||||
logger->startWork();
|
|
||||||
store->syncWithGC();
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopFindRoots: {
|
|
||||||
logger->startWork();
|
|
||||||
Roots roots = store->findRoots(!trusted);
|
|
||||||
logger->stopWork();
|
|
||||||
|
|
||||||
size_t size = 0;
|
|
||||||
for (auto & i : roots)
|
|
||||||
size += i.second.size();
|
|
||||||
|
|
||||||
to << size;
|
|
||||||
|
|
||||||
for (auto & [target, links] : roots)
|
|
||||||
for (auto & link : links)
|
|
||||||
to << link << target;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopCollectGarbage: {
|
|
||||||
GCOptions options;
|
|
||||||
options.action = (GCOptions::GCAction) readInt(from);
|
|
||||||
options.pathsToDelete = readStorePaths<PathSet>(*store, from);
|
|
||||||
from >> options.ignoreLiveness >> options.maxFreed;
|
|
||||||
// obsolete fields
|
|
||||||
readInt(from);
|
|
||||||
readInt(from);
|
|
||||||
readInt(from);
|
|
||||||
|
|
||||||
GCResults results;
|
|
||||||
|
|
||||||
logger->startWork();
|
|
||||||
if (options.ignoreLiveness)
|
|
||||||
throw Error("you are not allowed to ignore liveness");
|
|
||||||
store->collectGarbage(options, results);
|
|
||||||
logger->stopWork();
|
|
||||||
|
|
||||||
to << results.paths << results.bytesFreed << 0 /* obsolete */;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopSetOptions: {
|
|
||||||
settings.keepFailed = readInt(from);
|
|
||||||
settings.keepGoing = readInt(from);
|
|
||||||
settings.tryFallback = readInt(from);
|
|
||||||
verbosity = (Verbosity) readInt(from);
|
|
||||||
settings.maxBuildJobs.assign(readInt(from));
|
|
||||||
settings.maxSilentTime = readInt(from);
|
|
||||||
readInt(from); // obsolete useBuildHook
|
|
||||||
settings.verboseBuild = lvlError == (Verbosity) readInt(from);
|
|
||||||
readInt(from); // obsolete logType
|
|
||||||
readInt(from); // obsolete printBuildTrace
|
|
||||||
settings.buildCores = readInt(from);
|
|
||||||
settings.useSubstitutes = readInt(from);
|
|
||||||
|
|
||||||
StringMap overrides;
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
|
||||||
unsigned int n = readInt(from);
|
|
||||||
for (unsigned int i = 0; i < n; i++) {
|
|
||||||
string name = readString(from);
|
|
||||||
string value = readString(from);
|
|
||||||
overrides.emplace(name, value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger->startWork();
|
|
||||||
|
|
||||||
for (auto & i : overrides) {
|
|
||||||
auto & name(i.first);
|
|
||||||
auto & value(i.second);
|
|
||||||
|
|
||||||
auto setSubstituters = [&](Setting<Strings> & res) {
|
|
||||||
if (name != res.name && res.aliases.count(name) == 0)
|
|
||||||
return false;
|
|
||||||
StringSet trusted = settings.trustedSubstituters;
|
|
||||||
for (auto & s : settings.substituters.get())
|
|
||||||
trusted.insert(s);
|
|
||||||
Strings subs;
|
|
||||||
auto ss = tokenizeString<Strings>(value);
|
|
||||||
for (auto & s : ss)
|
|
||||||
if (trusted.count(s))
|
|
||||||
subs.push_back(s);
|
|
||||||
else
|
|
||||||
warn("ignoring untrusted substituter '%s'", s);
|
|
||||||
res = subs;
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (name == "ssh-auth-sock") // obsolete
|
|
||||||
;
|
|
||||||
else if (trusted
|
|
||||||
|| name == settings.buildTimeout.name
|
|
||||||
|| name == "connect-timeout"
|
|
||||||
|| (name == "builders" && value == ""))
|
|
||||||
settings.set(name, value);
|
|
||||||
else if (setSubstituters(settings.substituters))
|
|
||||||
;
|
|
||||||
else if (setSubstituters(settings.extraSubstituters))
|
|
||||||
;
|
|
||||||
else
|
|
||||||
warn("ignoring the user-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
|
||||||
} catch (UsageError & e) {
|
|
||||||
warn(e.what());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger->stopWork();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQuerySubstitutablePathInfo: {
|
|
||||||
Path path = absPath(readString(from));
|
|
||||||
logger->startWork();
|
|
||||||
SubstitutablePathInfos infos;
|
|
||||||
store->querySubstitutablePathInfos({path}, infos);
|
|
||||||
logger->stopWork();
|
|
||||||
SubstitutablePathInfos::iterator i = infos.find(path);
|
|
||||||
if (i == infos.end())
|
|
||||||
to << 0;
|
|
||||||
else {
|
|
||||||
to << 1 << i->second.deriver << i->second.references << i->second.downloadSize << i->second.narSize;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQuerySubstitutablePathInfos: {
|
|
||||||
PathSet paths = readStorePaths<PathSet>(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
SubstitutablePathInfos infos;
|
|
||||||
store->querySubstitutablePathInfos(paths, infos);
|
|
||||||
logger->stopWork();
|
|
||||||
to << infos.size();
|
|
||||||
for (auto & i : infos) {
|
|
||||||
to << i.first << i.second.deriver << i.second.references
|
|
||||||
<< i.second.downloadSize << i.second.narSize;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryAllValidPaths: {
|
|
||||||
logger->startWork();
|
|
||||||
PathSet paths = store->queryAllValidPaths();
|
|
||||||
logger->stopWork();
|
|
||||||
to << paths;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryPathInfo: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
std::shared_ptr<const ValidPathInfo> info;
|
|
||||||
logger->startWork();
|
|
||||||
try {
|
|
||||||
info = store->queryPathInfo(path);
|
|
||||||
} catch (InvalidPath &) {
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) < 17) throw;
|
|
||||||
}
|
|
||||||
logger->stopWork();
|
|
||||||
if (info) {
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
|
|
||||||
to << 1;
|
|
||||||
to << info->deriver << info->narHash.to_string(Base16, false) << info->references
|
|
||||||
<< info->registrationTime << info->narSize;
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
|
|
||||||
to << info->ultimate
|
|
||||||
<< info->sigs
|
|
||||||
<< info->ca;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
|
|
||||||
to << 0;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopOptimiseStore:
|
|
||||||
logger->startWork();
|
|
||||||
store->optimiseStore();
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case wopVerifyStore: {
|
|
||||||
bool checkContents, repair;
|
|
||||||
from >> checkContents >> repair;
|
|
||||||
logger->startWork();
|
|
||||||
if (repair && !trusted)
|
|
||||||
throw Error("you are not privileged to repair paths");
|
|
||||||
bool errors = store->verifyStore(checkContents, (RepairFlag) repair);
|
|
||||||
logger->stopWork();
|
|
||||||
to << errors;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddSignatures: {
|
|
||||||
Path path = readStorePath(*store, from);
|
|
||||||
StringSet sigs = readStrings<StringSet>(from);
|
|
||||||
logger->startWork();
|
|
||||||
if (!trusted)
|
|
||||||
throw Error("you are not privileged to add signatures");
|
|
||||||
store->addSignatures(path, sigs);
|
|
||||||
logger->stopWork();
|
|
||||||
to << 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopNarFromPath: {
|
|
||||||
auto path = readStorePath(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
logger->stopWork();
|
|
||||||
dumpPath(path, to);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopAddToStoreNar: {
|
|
||||||
bool repair, dontCheckSigs;
|
|
||||||
ValidPathInfo info;
|
|
||||||
info.path = readStorePath(*store, from);
|
|
||||||
from >> info.deriver;
|
|
||||||
if (!info.deriver.empty())
|
|
||||||
store->assertStorePath(info.deriver);
|
|
||||||
info.narHash = Hash(readString(from), htSHA256);
|
|
||||||
info.references = readStorePaths<PathSet>(*store, from);
|
|
||||||
from >> info.registrationTime >> info.narSize >> info.ultimate;
|
|
||||||
info.sigs = readStrings<StringSet>(from);
|
|
||||||
from >> info.ca >> repair >> dontCheckSigs;
|
|
||||||
if (!trusted && dontCheckSigs)
|
|
||||||
dontCheckSigs = false;
|
|
||||||
if (!trusted)
|
|
||||||
info.ultimate = false;
|
|
||||||
|
|
||||||
std::string saved;
|
|
||||||
std::unique_ptr<Source> source;
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
|
|
||||||
source = std::make_unique<TunnelSource>(from);
|
|
||||||
else {
|
|
||||||
TeeSink tee(from);
|
|
||||||
parseDump(tee, tee.source);
|
|
||||||
saved = std::move(*tee.source.data);
|
|
||||||
source = std::make_unique<StringSource>(saved);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger->startWork();
|
|
||||||
|
|
||||||
// FIXME: race if addToStore doesn't read source?
|
|
||||||
store->addToStore(info, *source, (RepairFlag) repair,
|
|
||||||
dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr);
|
|
||||||
|
|
||||||
logger->stopWork();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case wopQueryMissing: {
|
|
||||||
PathSet targets = readStorePaths<PathSet>(*store, from);
|
|
||||||
logger->startWork();
|
|
||||||
PathSet willBuild, willSubstitute, unknown;
|
|
||||||
unsigned long long downloadSize, narSize;
|
|
||||||
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
|
||||||
logger->stopWork();
|
|
||||||
to << willBuild << willSubstitute << unknown << downloadSize << narSize;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw Error(format("invalid operation %1%") % op);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void processConnection(bool trusted,
|
|
||||||
const std::string & userName, uid_t userId)
|
|
||||||
{
|
|
||||||
MonitorFdHup monitor(from.fd);
|
|
||||||
|
|
||||||
/* Exchange the greeting. */
|
|
||||||
unsigned int magic = readInt(from);
|
|
||||||
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
|
|
||||||
to << WORKER_MAGIC_2 << PROTOCOL_VERSION;
|
|
||||||
to.flush();
|
|
||||||
unsigned int clientVersion = readInt(from);
|
|
||||||
|
|
||||||
if (clientVersion < 0x10a)
|
|
||||||
throw Error("the Nix client version is too old");
|
|
||||||
|
|
||||||
auto tunnelLogger = new TunnelLogger(clientVersion);
|
|
||||||
auto prevLogger = nix::logger;
|
|
||||||
logger = tunnelLogger;
|
|
||||||
|
|
||||||
unsigned int opCount = 0;
|
|
||||||
|
|
||||||
Finally finally([&]() {
|
|
||||||
_isInterrupted = false;
|
|
||||||
prevLogger->log(lvlDebug, fmt("%d operations", opCount));
|
|
||||||
});
|
|
||||||
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
|
|
||||||
setAffinityTo(readInt(from));
|
|
||||||
|
|
||||||
readInt(from); // obsolete reserveSpace
|
|
||||||
|
|
||||||
/* Send startup error messages to the client. */
|
|
||||||
tunnelLogger->startWork();
|
|
||||||
|
|
||||||
try {
|
|
||||||
|
|
||||||
/* If we can't accept clientVersion, then throw an error
|
|
||||||
*here* (not above). */
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
/* Prevent users from doing something very dangerous. */
|
|
||||||
if (geteuid() == 0 &&
|
|
||||||
querySetting("build-users-group", "") == "")
|
|
||||||
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Open the store. */
|
|
||||||
Store::Params params; // FIXME: get params from somewhere
|
|
||||||
// Disable caching since the client already does that.
|
|
||||||
params["path-info-cache-size"] = "0";
|
|
||||||
auto store = openStore(settings.storeUri, params);
|
|
||||||
|
|
||||||
store->createUser(userName, userId);
|
|
||||||
|
|
||||||
tunnelLogger->stopWork();
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
/* Process client requests. */
|
|
||||||
while (true) {
|
|
||||||
WorkerOp op;
|
|
||||||
try {
|
|
||||||
op = (WorkerOp) readInt(from);
|
|
||||||
} catch (Interrupted & e) {
|
|
||||||
break;
|
|
||||||
} catch (EndOfFile & e) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
opCount++;
|
|
||||||
|
|
||||||
try {
|
|
||||||
performOp(tunnelLogger, store, trusted, clientVersion, from, to, op);
|
|
||||||
} catch (Error & e) {
|
|
||||||
/* If we're not in a state where we can send replies, then
|
|
||||||
something went wrong processing the input of the
|
|
||||||
client. This can happen especially if I/O errors occur
|
|
||||||
during addTextToStore() / importPath(). If that
|
|
||||||
happens, just send the error message and exit. */
|
|
||||||
bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr;
|
|
||||||
tunnelLogger->stopWork(false, e.msg(), e.status);
|
|
||||||
if (!errorAllowed) throw;
|
|
||||||
} catch (std::bad_alloc & e) {
|
|
||||||
tunnelLogger->stopWork(false, "Nix daemon out of memory", 1);
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
assert(!tunnelLogger->state_.lock()->canSendStderr);
|
|
||||||
};
|
|
||||||
|
|
||||||
} catch (std::exception & e) {
|
|
||||||
tunnelLogger->stopWork(false, e.what(), 1);
|
|
||||||
to.flush();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void sigChldHandler(int sigNo)
|
static void sigChldHandler(int sigNo)
|
||||||
{
|
{
|
||||||
|
@ -928,6 +140,15 @@ static PeerInfo getPeerInfo(int remote)
|
||||||
#define SD_LISTEN_FDS_START 3
|
#define SD_LISTEN_FDS_START 3
|
||||||
|
|
||||||
|
|
||||||
|
static ref<Store> openUncachedStore()
|
||||||
|
{
|
||||||
|
Store::Params params; // FIXME: get params from somewhere
|
||||||
|
// Disable caching since the client already does that.
|
||||||
|
params["path-info-cache-size"] = "0";
|
||||||
|
return openStore(settings.storeUri, params);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void daemonLoop(char * * argv)
|
static void daemonLoop(char * * argv)
|
||||||
{
|
{
|
||||||
if (chdir("/") == -1)
|
if (chdir("/") == -1)
|
||||||
|
@ -944,53 +165,15 @@ static void daemonLoop(char * * argv)
|
||||||
if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1")
|
if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1")
|
||||||
throw Error("unexpected systemd environment variables");
|
throw Error("unexpected systemd environment variables");
|
||||||
fdSocket = SD_LISTEN_FDS_START;
|
fdSocket = SD_LISTEN_FDS_START;
|
||||||
|
closeOnExec(fdSocket.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Otherwise, create and bind to a Unix domain socket. */
|
/* Otherwise, create and bind to a Unix domain socket. */
|
||||||
else {
|
else {
|
||||||
|
createDirs(dirOf(settings.nixDaemonSocketFile));
|
||||||
/* Create and bind to a Unix domain socket. */
|
fdSocket = createUnixDomainSocket(settings.nixDaemonSocketFile, 0666);
|
||||||
fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
|
|
||||||
if (!fdSocket)
|
|
||||||
throw SysError("cannot create Unix domain socket");
|
|
||||||
|
|
||||||
string socketPath = settings.nixDaemonSocketFile;
|
|
||||||
|
|
||||||
createDirs(dirOf(socketPath));
|
|
||||||
|
|
||||||
/* Urgh, sockaddr_un allows path names of only 108 characters.
|
|
||||||
So chdir to the socket directory so that we can pass a
|
|
||||||
relative path name. */
|
|
||||||
if (chdir(dirOf(socketPath).c_str()) == -1)
|
|
||||||
throw SysError("cannot change current directory");
|
|
||||||
Path socketPathRel = "./" + baseNameOf(socketPath);
|
|
||||||
|
|
||||||
struct sockaddr_un addr;
|
|
||||||
addr.sun_family = AF_UNIX;
|
|
||||||
if (socketPathRel.size() >= sizeof(addr.sun_path))
|
|
||||||
throw Error(format("socket path '%1%' is too long") % socketPathRel);
|
|
||||||
strcpy(addr.sun_path, socketPathRel.c_str());
|
|
||||||
|
|
||||||
unlink(socketPath.c_str());
|
|
||||||
|
|
||||||
/* Make sure that the socket is created with 0666 permission
|
|
||||||
(everybody can connect --- provided they have access to the
|
|
||||||
directory containing the socket). */
|
|
||||||
mode_t oldMode = umask(0111);
|
|
||||||
int res = bind(fdSocket.get(), (struct sockaddr *) &addr, sizeof(addr));
|
|
||||||
umask(oldMode);
|
|
||||||
if (res == -1)
|
|
||||||
throw SysError(format("cannot bind to socket '%1%'") % socketPath);
|
|
||||||
|
|
||||||
if (chdir("/") == -1) /* back to the root */
|
|
||||||
throw SysError("cannot change current directory");
|
|
||||||
|
|
||||||
if (listen(fdSocket.get(), 5) == -1)
|
|
||||||
throw SysError(format("cannot listen on socket '%1%'") % socketPath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
closeOnExec(fdSocket.get());
|
|
||||||
|
|
||||||
/* Loop accepting connections. */
|
/* Loop accepting connections. */
|
||||||
while (1) {
|
while (1) {
|
||||||
|
|
||||||
|
@ -1054,9 +237,9 @@ static void daemonLoop(char * * argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle the connection. */
|
/* Handle the connection. */
|
||||||
from.fd = remote.get();
|
FdSource from(remote.get());
|
||||||
to.fd = remote.get();
|
FdSink to(remote.get());
|
||||||
processConnection(trusted, user, peer.uid);
|
processConnection(openUncachedStore(), from, to, trusted, user, peer.uid);
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
}, options);
|
}, options);
|
||||||
|
@ -1136,7 +319,9 @@ static int _main(int argc, char * * argv)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
processConnection(true, "root", 0);
|
FdSource from(STDIN_FILENO);
|
||||||
|
FdSink to(STDOUT_FILENO);
|
||||||
|
processConnection(openUncachedStore(), from, to, true, "root", 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
daemonLoop(argv);
|
daemonLoop(argv);
|
||||||
|
|
|
@ -632,8 +632,7 @@ static void opDelete(Strings opFlags, Strings opArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Dump a path as a Nix archive. The archive is written to standard
|
/* Dump a path as a Nix archive. The archive is written to stdout */
|
||||||
output. */
|
|
||||||
static void opDump(Strings opFlags, Strings opArgs)
|
static void opDump(Strings opFlags, Strings opArgs)
|
||||||
{
|
{
|
||||||
if (!opFlags.empty()) throw UsageError("unknown flag");
|
if (!opFlags.empty()) throw UsageError("unknown flag");
|
||||||
|
@ -646,8 +645,7 @@ static void opDump(Strings opFlags, Strings opArgs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Restore a value from a Nix archive. The archive is read from
|
/* Restore a value from a Nix archive. The archive is read from stdin. */
|
||||||
standard input. */
|
|
||||||
static void opRestore(Strings opFlags, Strings opArgs)
|
static void opRestore(Strings opFlags, Strings opArgs)
|
||||||
{
|
{
|
||||||
if (!opFlags.empty()) throw UsageError("unknown flag");
|
if (!opFlags.empty()) throw UsageError("unknown flag");
|
||||||
|
|
|
@ -17,7 +17,7 @@ namespace flake {
|
||||||
enum HandleLockFile : unsigned int;
|
enum HandleLockFile : unsigned int;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* A command that require a Nix store. */
|
/* A command that requires a Nix store. */
|
||||||
struct StoreCommand : virtual Command
|
struct StoreCommand : virtual Command
|
||||||
{
|
{
|
||||||
StoreCommand();
|
StoreCommand();
|
||||||
|
@ -91,6 +91,7 @@ private:
|
||||||
std::vector<std::string> _installables;
|
std::vector<std::string> _installables;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* A command that operates on exactly one "installable" */
|
||||||
struct InstallableCommand : virtual Args, SourceExprCommand
|
struct InstallableCommand : virtual Args, SourceExprCommand
|
||||||
{
|
{
|
||||||
std::shared_ptr<Installable> installable;
|
std::shared_ptr<Installable> installable;
|
||||||
|
|
|
@ -36,7 +36,7 @@ struct CmdEval : MixJSON, InstallableCommand
|
||||||
},
|
},
|
||||||
Example{
|
Example{
|
||||||
"To get the current version of Nixpkgs:",
|
"To get the current version of Nixpkgs:",
|
||||||
"nix eval --raw nixpkgs.lib.nixpkgsVersion"
|
"nix eval --raw nixpkgs.lib.version"
|
||||||
},
|
},
|
||||||
Example{
|
Example{
|
||||||
"To print the store path of the Hello package:",
|
"To print the store path of the Hello package:",
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
#include <ifaddrs.h>
|
#include <ifaddrs.h>
|
||||||
#include <netdb.h>
|
#include <netdb.h>
|
||||||
|
#include <netinet/in.h>
|
||||||
|
|
||||||
extern std::string chrootHelperName;
|
extern std::string chrootHelperName;
|
||||||
|
|
||||||
|
|
|
@ -341,7 +341,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
auto width = getWindowSize().second;
|
auto width = getWindowSize().second;
|
||||||
if (width <= 0) std::numeric_limits<decltype(width)>::max();
|
if (width <= 0) width = std::numeric_limits<decltype(width)>::max();
|
||||||
|
|
||||||
writeToStderr("\r" + filterANSIEscapes(line, false, width) + "\e[K");
|
writeToStderr("\r" + filterANSIEscapes(line, false, width) + "\e[K");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
"{\"a\":123,\"b\":-456,\"c\":\"foo\",\"d\":\"foo\\n\\\"bar\\\"\",\"e\":true,\"f\":false,\"g\":[1,2,3],\"h\":[\"a\",[\"b\",{\"foo\\nbar\":{}}]],\"i\":3,\"j\":1.44}"
|
"{\"a\":123,\"b\":-456,\"c\":\"foo\",\"d\":\"foo\\n\\\"bar\\\"\",\"e\":true,\"f\":false,\"g\":[1,2,3],\"h\":[\"a\",[\"b\",{\"foo\\nbar\":{}}]],\"i\":3,\"j\":1.44,\"k\":\"foo\"}"
|
||||||
|
|
|
@ -9,4 +9,5 @@ builtins.toJSON
|
||||||
h = [ "a" [ "b" { "foo\nbar" = {}; } ] ];
|
h = [ "a" [ "b" { "foo\nbar" = {}; } ] ];
|
||||||
i = 1 + 2;
|
i = 1 + 2;
|
||||||
j = 1.44;
|
j = 1.44;
|
||||||
|
k = { __toString = self: self.a; a = "foo"; };
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,9 +7,9 @@ export IMPURE_VAR=foo
|
||||||
export SELECTED_IMPURE_VAR=baz
|
export SELECTED_IMPURE_VAR=baz
|
||||||
export NIX_BUILD_SHELL=$SHELL
|
export NIX_BUILD_SHELL=$SHELL
|
||||||
output=$(nix-shell --pure shell.nix -A shellDrv --run \
|
output=$(nix-shell --pure shell.nix -A shellDrv --run \
|
||||||
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"')
|
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"')
|
||||||
|
|
||||||
[ "$output" = " - foo - bar" ]
|
[ "$output" = " - foo - bar - true" ]
|
||||||
|
|
||||||
# Test --keep
|
# Test --keep
|
||||||
output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR shell.nix -A shellDrv --run \
|
output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR shell.nix -A shellDrv --run \
|
||||||
|
@ -19,10 +19,10 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR shell.nix -A shellDrv --run
|
||||||
|
|
||||||
# Test nix-shell on a .drv
|
# Test nix-shell on a .drv
|
||||||
[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \
|
[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \
|
||||||
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]]
|
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]]
|
||||||
|
|
||||||
[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \
|
[[ $(nix-shell --pure $(nix-instantiate shell.nix -A shellDrv) --run \
|
||||||
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]]
|
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX - $TEST_inNixShell"') = " - foo - bar - false" ]]
|
||||||
|
|
||||||
# Test nix-shell on a .drv symlink
|
# Test nix-shell on a .drv symlink
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{ }:
|
{ inNixShell ? false }:
|
||||||
|
|
||||||
with import ./config.nix;
|
with import ./config.nix;
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ let pkgs = rec {
|
||||||
name = "shellDrv";
|
name = "shellDrv";
|
||||||
builder = "/does/not/exist";
|
builder = "/does/not/exist";
|
||||||
VAR_FROM_NIX = "bar";
|
VAR_FROM_NIX = "bar";
|
||||||
|
TEST_inNixShell = if inNixShell then "true" else "false";
|
||||||
inherit stdenv;
|
inherit stdenv;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue