2004-08-04 10:59:20 +00:00
|
|
|
|
#include "eval.hh"
|
2010-10-04 17:55:38 +00:00
|
|
|
|
#include "misc.hh"
|
2003-10-31 17:09:31 +00:00
|
|
|
|
#include "globals.hh"
|
2006-11-30 17:43:04 +00:00
|
|
|
|
#include "store-api.hh"
|
2006-09-04 21:06:23 +00:00
|
|
|
|
#include "util.hh"
|
2006-12-12 23:05:01 +00:00
|
|
|
|
#include "archive.hh"
|
2010-04-07 13:59:45 +00:00
|
|
|
|
#include "value-to-xml.hh"
|
2013-11-18 23:03:11 +00:00
|
|
|
|
#include "value-to-json.hh"
|
2008-07-01 10:10:32 +00:00
|
|
|
|
#include "names.hh"
|
2012-02-04 13:50:25 +00:00
|
|
|
|
#include "eval-inline.hh"
|
2006-09-04 21:06:23 +00:00
|
|
|
|
|
2007-01-15 08:54:51 +00:00
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
|
#include <algorithm>
|
2010-03-30 09:22:33 +00:00
|
|
|
|
#include <cstring>
|
2006-09-04 21:06:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace nix {
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Miscellaneous
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2012-01-26 13:13:00 +00:00
|
|
|
|
/* Decode a context string ‘!<name>!<path>’ into a pair <path,
|
|
|
|
|
name>. */
|
|
|
|
|
std::pair<string, string> decodeContext(const string & s)
|
|
|
|
|
{
|
|
|
|
|
if (s.at(0) == '!') {
|
|
|
|
|
size_t index = s.find("!", 1);
|
|
|
|
|
return std::pair<string, string>(string(s, index + 1), string(s, 1, index - 1));
|
|
|
|
|
} else
|
|
|
|
|
return std::pair<string, string>(s, "");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2004-08-04 10:59:20 +00:00
|
|
|
|
/* Load and evaluate an expression from path specified by the
|
2013-09-02 14:29:15 +00:00
|
|
|
|
argument. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_import(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2003-10-31 17:09:31 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[0], context);
|
2006-11-03 16:17:39 +00:00
|
|
|
|
|
2012-01-26 13:13:00 +00:00
|
|
|
|
foreach (PathSet::iterator, i, context) {
|
|
|
|
|
Path ctx = decodeContext(*i).first;
|
|
|
|
|
assert(isStorePath(ctx));
|
|
|
|
|
if (!store->isValidPath(ctx))
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("cannot import `%1%', since path `%2%' is not valid, at %3%")
|
|
|
|
|
% path % ctx % pos);
|
2012-01-26 13:13:00 +00:00
|
|
|
|
if (isDerivation(ctx))
|
2010-06-01 11:19:32 +00:00
|
|
|
|
try {
|
2012-07-09 13:59:34 +00:00
|
|
|
|
/* For performance, prefetch all substitute info. */
|
|
|
|
|
PathSet willBuild, willSubstitute, unknown;
|
|
|
|
|
unsigned long long downloadSize, narSize;
|
|
|
|
|
queryMissing(*store, singleton<PathSet>(ctx),
|
|
|
|
|
willBuild, willSubstitute, unknown, downloadSize, narSize);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2012-01-26 13:13:00 +00:00
|
|
|
|
/* !!! If using a substitute, we only need to fetch
|
|
|
|
|
the selected output of this derivation. */
|
2012-06-27 20:58:15 +00:00
|
|
|
|
store->buildPaths(singleton<PathSet>(ctx));
|
2010-06-01 11:19:32 +00:00
|
|
|
|
} catch (Error & e) {
|
|
|
|
|
throw ImportError(e.msg());
|
|
|
|
|
}
|
2006-11-03 16:17:39 +00:00
|
|
|
|
}
|
2006-09-24 17:48:41 +00:00
|
|
|
|
|
import: If the path is a valid .drv file, parse it and generate a derivation attrset.
The generated attrset has drvPath and outPath with the right string context, type 'derivation', outputName with
the right name, all with a list of outputs, and an attribute for each output.
I see three uses for this (though certainly there may be more):
* Using derivations generated by something besides nix-instantiate (e.g. guix)
* Allowing packages provided by channels to be used in nix expressions. If a channel installed a valid deriver
for each package it provides into the store, then those could be imported and used as dependencies or installed
in environment.systemPackages, for example.
* Enable hydra to be consistent in how it treats inputs that are outputs of another build. Right now, if an
input is passed as an argument to the job, it is passed as a derivation, but if it is accessed via NIX_PATH
(i.e. through the <> syntax), then it is a path that can be imported. This is problematic because the build
being depended upon may have been built with non-obvious arguments passed to its jobset file. With this
feature, hydra can just set the name of that input to the path to its drv file in NIX_PATH
2012-07-23 17:41:28 +00:00
|
|
|
|
if (isStorePath(path) && store->isValidPath(path) && isDerivation(path)) {
|
2014-04-08 17:24:29 +00:00
|
|
|
|
Derivation drv = readDerivation(path);
|
2012-07-24 16:05:27 +00:00
|
|
|
|
Value & w = *state.allocValue();
|
import: If the path is a valid .drv file, parse it and generate a derivation attrset.
The generated attrset has drvPath and outPath with the right string context, type 'derivation', outputName with
the right name, all with a list of outputs, and an attribute for each output.
I see three uses for this (though certainly there may be more):
* Using derivations generated by something besides nix-instantiate (e.g. guix)
* Allowing packages provided by channels to be used in nix expressions. If a channel installed a valid deriver
for each package it provides into the store, then those could be imported and used as dependencies or installed
in environment.systemPackages, for example.
* Enable hydra to be consistent in how it treats inputs that are outputs of another build. Right now, if an
input is passed as an argument to the job, it is passed as a derivation, but if it is accessed via NIX_PATH
(i.e. through the <> syntax), then it is a path that can be imported. This is problematic because the build
being depended upon may have been built with non-obvious arguments passed to its jobset file. With this
feature, hydra can just set the name of that input to the path to its drv file in NIX_PATH
2012-07-23 17:41:28 +00:00
|
|
|
|
state.mkAttrs(w, 1 + drv.outputs.size());
|
|
|
|
|
mkString(*state.allocAttr(w, state.sDrvPath), path, singleton<PathSet>("=" + path));
|
|
|
|
|
state.mkList(*state.allocAttr(w, state.symbols.create("outputs")), drv.outputs.size());
|
|
|
|
|
unsigned int outputs_index = 0;
|
|
|
|
|
|
|
|
|
|
Value * outputsVal = w.attrs->find(state.symbols.create("outputs"))->value;
|
|
|
|
|
foreach (DerivationOutputs::iterator, i, drv.outputs) {
|
|
|
|
|
mkString(*state.allocAttr(w, state.symbols.create(i->first)),
|
|
|
|
|
i->second.path, singleton<PathSet>("!" + i->first + "!" + path));
|
|
|
|
|
mkString(*(outputsVal->list.elems[outputs_index++] = state.allocValue()),
|
|
|
|
|
i->first);
|
|
|
|
|
}
|
|
|
|
|
w.attrs->sort();
|
|
|
|
|
Value fun;
|
2013-09-03 10:56:33 +00:00
|
|
|
|
state.evalFile(state.findFile("nix/imported-drv-to-derivation.nix"), fun);
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceFunction(fun, pos);
|
import: If the path is a valid .drv file, parse it and generate a derivation attrset.
The generated attrset has drvPath and outPath with the right string context, type 'derivation', outputName with
the right name, all with a list of outputs, and an attribute for each output.
I see three uses for this (though certainly there may be more):
* Using derivations generated by something besides nix-instantiate (e.g. guix)
* Allowing packages provided by channels to be used in nix expressions. If a channel installed a valid deriver
for each package it provides into the store, then those could be imported and used as dependencies or installed
in environment.systemPackages, for example.
* Enable hydra to be consistent in how it treats inputs that are outputs of another build. Right now, if an
input is passed as an argument to the job, it is passed as a derivation, but if it is accessed via NIX_PATH
(i.e. through the <> syntax), then it is a path that can be imported. This is problematic because the build
being depended upon may have been built with non-obvious arguments passed to its jobset file. With this
feature, hydra can just set the name of that input to the path to its drv file in NIX_PATH
2012-07-23 17:41:28 +00:00
|
|
|
|
mkApp(v, fun, w);
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(v, pos);
|
import: If the path is a valid .drv file, parse it and generate a derivation attrset.
The generated attrset has drvPath and outPath with the right string context, type 'derivation', outputName with
the right name, all with a list of outputs, and an attribute for each output.
I see three uses for this (though certainly there may be more):
* Using derivations generated by something besides nix-instantiate (e.g. guix)
* Allowing packages provided by channels to be used in nix expressions. If a channel installed a valid deriver
for each package it provides into the store, then those could be imported and used as dependencies or installed
in environment.systemPackages, for example.
* Enable hydra to be consistent in how it treats inputs that are outputs of another build. Right now, if an
input is passed as an argument to the job, it is passed as a derivation, but if it is accessed via NIX_PATH
(i.e. through the <> syntax), then it is a path that can be imported. This is problematic because the build
being depended upon may have been built with non-obvious arguments passed to its jobset file. With this
feature, hydra can just set the name of that input to the path to its drv file in NIX_PATH
2012-07-23 17:41:28 +00:00
|
|
|
|
} else {
|
|
|
|
|
state.evalFile(path, v);
|
|
|
|
|
}
|
2003-10-31 17:09:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
Add primop ‘scopedImport’
‘scopedImport’ works like ‘import’, except that it takes a set of
attributes to be added to the lexical scope of the expression,
essentially extending or overriding the builtin variables. For
instance, the expression
scopedImport { x = 1; } ./foo.nix
where foo.nix contains ‘x’, will evaluate to 1.
This has a few applications:
* It allows getting rid of function argument specifications in package
expressions. For instance, a package expression like:
{ stdenv, fetchurl, libfoo }:
stdenv.mkDerivation { ... buildInputs = [ libfoo ]; }
can now we written as just
stdenv.mkDerivation { ... buildInputs = [ libfoo ]; }
and imported in all-packages.nix as:
bar = scopedImport pkgs ./bar.nix;
So whereas we once had dependencies listed in three places
(buildInputs, the function, and the call site), they now only need
to appear in one place.
* It allows overriding builtin functions. For instance, to trace all
calls to ‘map’:
let
overrides = {
map = f: xs: builtins.trace "map called!" (map f xs);
# Ensure that our override gets propagated by calls to
# import/scopedImport.
import = fn: scopedImport overrides fn;
scopedImport = attrs: fn: scopedImport (overrides // attrs) fn;
# Also update ‘builtins’.
builtins = builtins // overrides;
};
in scopedImport overrides ./bla.nix
* Similarly, it allows extending the set of builtin functions. For
instance, during Nixpkgs/NixOS evaluation, the Nixpkgs library
functions could be added to the default scope.
There is a downside: calls to scopedImport are not memoized, unlike
import. So importing a file multiple times leads to multiple parsings
/ evaluations. It would be possible to construct the AST only once,
but that would require careful handling of variables/environments.
2014-05-26 11:46:11 +00:00
|
|
|
|
static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
|
|
|
|
state.forceAttrs(*args[0]);
|
|
|
|
|
Path path = resolveExprPath(state.coerceToPath(pos, *args[1], context));
|
|
|
|
|
|
|
|
|
|
Env * env = &state.allocEnv(args[0]->attrs->size());
|
|
|
|
|
env->up = &state.baseEnv;
|
|
|
|
|
|
|
|
|
|
StaticEnv staticEnv(false, &state.staticBaseEnv);
|
|
|
|
|
|
|
|
|
|
unsigned int displ = 0;
|
|
|
|
|
for (auto & attr : *args[0]->attrs) {
|
|
|
|
|
staticEnv.vars[attr.name] = displ;
|
|
|
|
|
env->values[displ++] = attr.value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
startNest(nest, lvlTalkative, format("evaluating file `%1%'") % path);
|
|
|
|
|
Expr * e = state.parseExprFromFile(path, staticEnv);
|
|
|
|
|
|
|
|
|
|
e->eval(state, *env, v);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-10-24 00:49:13 +00:00
|
|
|
|
/* Return a string representing the type of the expression. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2013-10-24 00:49:13 +00:00
|
|
|
|
{
|
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
string t;
|
|
|
|
|
switch (args[0]->type) {
|
|
|
|
|
case tInt: t = "int"; break;
|
|
|
|
|
case tBool: t = "bool"; break;
|
|
|
|
|
case tString: t = "string"; break;
|
|
|
|
|
case tPath: t = "path"; break;
|
|
|
|
|
case tNull: t = "null"; break;
|
2013-10-24 14:41:04 +00:00
|
|
|
|
case tAttrs: t = "set"; break;
|
2013-10-24 00:49:13 +00:00
|
|
|
|
case tList: t = "list"; break;
|
|
|
|
|
case tLambda:
|
|
|
|
|
case tPrimOp:
|
|
|
|
|
case tPrimOpApp:
|
|
|
|
|
t = "lambda";
|
|
|
|
|
break;
|
|
|
|
|
default: abort();
|
|
|
|
|
}
|
|
|
|
|
mkString(v, state.symbols.create(t));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Determine whether the argument is the null value. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tNull);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-05-16 16:17:04 +00:00
|
|
|
|
/* Determine whether the argument is a function. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-05-16 16:17:04 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tLambda);
|
2007-05-16 16:17:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
|
2013-10-24 00:49:13 +00:00
|
|
|
|
/* Determine whether the argument is an integer. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isInt(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-02-05 19:35:40 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tInt);
|
2009-02-05 19:35:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
|
2013-10-24 00:49:13 +00:00
|
|
|
|
/* Determine whether the argument is a string. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-02-05 19:35:40 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tString);
|
2009-02-05 19:35:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
|
2013-10-24 00:49:13 +00:00
|
|
|
|
/* Determine whether the argument is a Boolean. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isBool(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-02-05 19:35:40 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tBool);
|
2009-02-05 19:35:40 +00:00
|
|
|
|
}
|
2007-05-16 16:17:04 +00:00
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
|
2010-04-21 15:57:11 +00:00
|
|
|
|
struct CompareValues
|
|
|
|
|
{
|
2013-10-28 17:50:58 +00:00
|
|
|
|
bool operator () (const Value * v1, const Value * v2) const
|
2010-04-21 15:57:11 +00:00
|
|
|
|
{
|
2013-10-28 17:50:58 +00:00
|
|
|
|
if (v1->type != v2->type)
|
2010-04-21 15:57:11 +00:00
|
|
|
|
throw EvalError("cannot compare values of different types");
|
2013-10-28 17:50:58 +00:00
|
|
|
|
switch (v1->type) {
|
2010-04-21 15:57:11 +00:00
|
|
|
|
case tInt:
|
2013-10-28 17:50:58 +00:00
|
|
|
|
return v1->integer < v2->integer;
|
2010-04-21 15:57:11 +00:00
|
|
|
|
case tString:
|
2013-10-28 17:50:58 +00:00
|
|
|
|
return strcmp(v1->string.s, v2->string.s) < 0;
|
2010-04-21 15:57:11 +00:00
|
|
|
|
case tPath:
|
2013-10-28 17:50:58 +00:00
|
|
|
|
return strcmp(v1->path, v2->path) < 0;
|
2010-04-21 15:57:11 +00:00
|
|
|
|
default:
|
2013-10-28 17:50:58 +00:00
|
|
|
|
throw EvalError(format("cannot compare %1% with %2%") % showType(*v1) % showType(*v2));
|
2010-04-21 15:57:11 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2013-10-28 17:50:58 +00:00
|
|
|
|
#if HAVE_BOEHMGC
|
2013-10-28 21:51:12 +00:00
|
|
|
|
typedef list<Value *, gc_allocator<Value *> > ValueList;
|
2013-10-28 17:50:58 +00:00
|
|
|
|
#else
|
2013-10-28 21:51:12 +00:00
|
|
|
|
typedef list<Value *> ValueList;
|
2013-10-28 17:50:58 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
{
|
|
|
|
|
startNest(nest, lvlDebug, "finding dependencies");
|
|
|
|
|
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[0], pos);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
|
|
|
|
/* Get the start set. */
|
2010-04-21 15:08:58 +00:00
|
|
|
|
Bindings::iterator startSet =
|
|
|
|
|
args[0]->attrs->find(state.symbols.create("startSet"));
|
|
|
|
|
if (startSet == args[0]->attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("attribute `startSet' required, at %1%") % pos);
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*startSet->value, pos);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2013-10-28 21:51:12 +00:00
|
|
|
|
ValueList workSet;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
for (unsigned int n = 0; n < startSet->value->list.length; ++n)
|
|
|
|
|
workSet.push_back(startSet->value->list.elems[n]);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2008-07-11 13:29:04 +00:00
|
|
|
|
/* Get the operator. */
|
2010-04-21 15:08:58 +00:00
|
|
|
|
Bindings::iterator op =
|
|
|
|
|
args[0]->attrs->find(state.symbols.create("operator"));
|
|
|
|
|
if (op == args[0]->attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("attribute `operator' required, at %1%") % pos);
|
2010-10-24 00:41:29 +00:00
|
|
|
|
state.forceValue(*op->value);
|
2010-04-21 15:57:11 +00:00
|
|
|
|
|
2008-07-11 13:29:04 +00:00
|
|
|
|
/* Construct the closure by applying the operator to element of
|
|
|
|
|
`workSet', adding the result to `workSet', continuing until
|
|
|
|
|
no new elements are found. */
|
2013-10-28 21:51:12 +00:00
|
|
|
|
ValueList res;
|
2013-10-28 17:50:58 +00:00
|
|
|
|
// `doneKeys' doesn't need to be a GC root, because its values are
|
|
|
|
|
// reachable from res.
|
|
|
|
|
set<Value *, CompareValues> doneKeys;
|
2007-01-29 15:11:32 +00:00
|
|
|
|
while (!workSet.empty()) {
|
2013-08-02 16:53:02 +00:00
|
|
|
|
Value * e = *(workSet.begin());
|
|
|
|
|
workSet.pop_front();
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*e, pos);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2010-04-21 15:57:11 +00:00
|
|
|
|
Bindings::iterator key =
|
|
|
|
|
e->attrs->find(state.symbols.create("key"));
|
|
|
|
|
if (key == e->attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("attribute `key' required, at %1%") % pos);
|
2010-10-24 00:41:29 +00:00
|
|
|
|
state.forceValue(*key->value);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2013-10-28 17:50:58 +00:00
|
|
|
|
if (doneKeys.find(key->value) != doneKeys.end()) continue;
|
|
|
|
|
doneKeys.insert(key->value);
|
|
|
|
|
res.push_back(e);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2008-07-11 13:29:04 +00:00
|
|
|
|
/* Call the `operator' function with `e' as argument. */
|
2010-04-21 15:57:11 +00:00
|
|
|
|
Value call;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
mkApp(call, *op->value, *e);
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(call, pos);
|
2010-04-21 15:57:11 +00:00
|
|
|
|
|
|
|
|
|
/* Add the values returned by the operator to the work set. */
|
|
|
|
|
for (unsigned int n = 0; n < call.list.length; ++n) {
|
|
|
|
|
state.forceValue(*call.list.elems[n]);
|
|
|
|
|
workSet.push_back(call.list.elems[n]);
|
|
|
|
|
}
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-04-21 15:57:11 +00:00
|
|
|
|
/* Create the result list. */
|
|
|
|
|
state.mkList(v, res.size());
|
|
|
|
|
unsigned int n = 0;
|
2013-10-28 21:51:12 +00:00
|
|
|
|
foreach (ValueList::iterator, i, res)
|
2013-10-28 17:50:58 +00:00
|
|
|
|
v.list.elems[n++] = *i;
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_abort(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
|
|
|
|
throw Abort(format("evaluation aborted with the following error message: `%1%'") %
|
2014-04-04 20:19:33 +00:00
|
|
|
|
state.coerceToString(pos, *args[0], context));
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_throw(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-04-16 15:03:19 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
throw ThrownError(format("%1%") % state.coerceToString(pos, *args[0], context));
|
2007-04-16 15:03:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_addErrorContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-01-27 14:36:44 +00:00
|
|
|
|
{
|
|
|
|
|
try {
|
2010-05-07 12:33:14 +00:00
|
|
|
|
state.forceValue(*args[1]);
|
|
|
|
|
v = *args[1];
|
2009-01-27 14:36:44 +00:00
|
|
|
|
} catch (Error & e) {
|
2010-05-07 12:33:14 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
e.addPrefix(format("%1%\n") % state.coerceToString(pos, *args[0], context));
|
2009-01-27 14:36:44 +00:00
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-05-07 12:33:14 +00:00
|
|
|
|
|
2013-09-02 14:29:15 +00:00
|
|
|
|
/* Try evaluating the argument. Success => {success=true; value=something;},
|
2009-08-25 16:06:46 +00:00
|
|
|
|
* else => {success=false; value=false;} */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-08-25 16:06:46 +00:00
|
|
|
|
{
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, 2);
|
2009-08-25 16:06:46 +00:00
|
|
|
|
try {
|
2010-05-12 11:23:44 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
2013-10-28 06:34:44 +00:00
|
|
|
|
v.attrs->push_back(Attr(state.sValue, args[0]));
|
2010-10-22 14:47:42 +00:00
|
|
|
|
mkBool(*state.allocAttr(v, state.symbols.create("success")), true);
|
2009-09-23 19:19:26 +00:00
|
|
|
|
} catch (AssertionError & e) {
|
2013-10-28 06:34:44 +00:00
|
|
|
|
mkBool(*state.allocAttr(v, state.sValue), false);
|
2010-10-22 14:47:42 +00:00
|
|
|
|
mkBool(*state.allocAttr(v, state.symbols.create("success")), false);
|
2009-08-25 16:06:46 +00:00
|
|
|
|
}
|
2010-10-24 19:52:33 +00:00
|
|
|
|
v.attrs->sort();
|
2009-08-25 16:06:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-01-27 14:36:44 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Return an environment variable. Use with care. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string name = state.forceStringNoCtx(*args[0], pos);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
mkString(v, getEnv(name));
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-26 18:25:50 +00:00
|
|
|
|
|
2010-03-31 20:09:20 +00:00
|
|
|
|
/* Evaluate the first expression and print it on standard error. Then
|
|
|
|
|
return the second expression. Useful for debugging. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-08-18 22:12:00 +00:00
|
|
|
|
{
|
2010-03-31 20:09:20 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
if (args[0]->type == tString)
|
|
|
|
|
printMsg(lvlError, format("trace: %1%") % args[0]->string.s);
|
2009-10-22 08:10:12 +00:00
|
|
|
|
else
|
2010-03-31 20:09:20 +00:00
|
|
|
|
printMsg(lvlError, format("trace: %1%") % *args[0]);
|
|
|
|
|
state.forceValue(*args[1]);
|
|
|
|
|
v = *args[1];
|
2007-08-18 22:12:00 +00:00
|
|
|
|
}
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2007-10-26 18:25:50 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Derivations
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2004-08-04 10:59:20 +00:00
|
|
|
|
/* Construct (as a unobservable side effect) a Nix derivation
|
|
|
|
|
expression that performs the derivation described by the argument
|
|
|
|
|
set. Returns the original set extended with the following
|
|
|
|
|
attributes: `outPath' containing the primary output path of the
|
|
|
|
|
derivation; `drvPath' containing the path of the Nix expression;
|
|
|
|
|
and `type' set to `derivation' to indicate that this is a
|
|
|
|
|
derivation. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2003-10-31 17:09:31 +00:00
|
|
|
|
{
|
2003-11-09 10:35:45 +00:00
|
|
|
|
startNest(nest, lvlVomit, "evaluating derivation");
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[0], pos);
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
2010-03-31 15:38:03 +00:00
|
|
|
|
/* Figure out the name first (for stack backtraces). */
|
2010-04-13 12:25:42 +00:00
|
|
|
|
Bindings::iterator attr = args[0]->attrs->find(state.sName);
|
2010-03-31 15:38:03 +00:00
|
|
|
|
if (attr == args[0]->attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("required attribute `name' missing, at %1%") % pos);
|
2006-10-23 16:45:19 +00:00
|
|
|
|
string drvName;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
Pos & posDrvName(*attr->pos);
|
2012-11-27 14:01:32 +00:00
|
|
|
|
try {
|
2014-04-04 19:14:11 +00:00
|
|
|
|
drvName = state.forceStringNoCtx(*attr->value, pos);
|
2006-10-23 16:45:19 +00:00
|
|
|
|
} catch (Error & e) {
|
2010-05-07 12:11:05 +00:00
|
|
|
|
e.addPrefix(format("while evaluating the derivation attribute `name' at %1%:\n") % posDrvName);
|
2006-10-23 16:45:19 +00:00
|
|
|
|
throw;
|
|
|
|
|
}
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
|
|
|
|
/* Check whether null attributes should be ignored. */
|
|
|
|
|
bool ignoreNulls = false;
|
|
|
|
|
attr = args[0]->attrs->find(state.sIgnoreNulls);
|
|
|
|
|
if (attr != args[0]->attrs->end())
|
|
|
|
|
ignoreNulls = state.forceBool(*attr->value);
|
|
|
|
|
|
2003-10-31 17:09:31 +00:00
|
|
|
|
/* Build the derivation expression by processing the attributes. */
|
2005-01-19 11:16:11 +00:00
|
|
|
|
Derivation drv;
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
|
|
|
|
|
2008-12-03 15:06:30 +00:00
|
|
|
|
string outputHash, outputHashAlgo;
|
2005-02-22 21:14:41 +00:00
|
|
|
|
bool outputHashRecursive = false;
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
StringSet outputs;
|
|
|
|
|
outputs.insert("out");
|
|
|
|
|
|
2010-03-31 15:38:03 +00:00
|
|
|
|
foreach (Bindings::iterator, i, *args[0]->attrs) {
|
2012-11-27 14:01:32 +00:00
|
|
|
|
if (i->name == state.sIgnoreNulls) continue;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
string key = i->name;
|
2003-11-09 10:35:45 +00:00
|
|
|
|
startNest(nest, lvlVomit, format("processing attribute `%1%'") % key);
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
2004-04-02 10:49:37 +00:00
|
|
|
|
try {
|
2006-08-28 13:31:06 +00:00
|
|
|
|
|
2012-11-27 14:01:32 +00:00
|
|
|
|
if (ignoreNulls) {
|
|
|
|
|
state.forceValue(*i->value);
|
|
|
|
|
if (i->value->type == tNull) continue;
|
|
|
|
|
}
|
|
|
|
|
|
2006-08-28 13:31:06 +00:00
|
|
|
|
/* The `args' attribute is special: it supplies the
|
|
|
|
|
command-line arguments to the builder. */
|
|
|
|
|
if (key == "args") {
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*i->value, pos);
|
2010-10-24 00:41:29 +00:00
|
|
|
|
for (unsigned int n = 0; n < i->value->list.length; ++n) {
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(posDrvName, *i->value->list.elems[n], context, true);
|
2006-08-28 13:31:06 +00:00
|
|
|
|
drv.args.push_back(s);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* All other attributes are passed to the builder through
|
|
|
|
|
the environment. */
|
|
|
|
|
else {
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(posDrvName, *i->value, context, true);
|
2006-08-28 13:31:06 +00:00
|
|
|
|
drv.env[key] = s;
|
|
|
|
|
if (key == "builder") drv.builder = s;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
else if (i->name == state.sSystem) drv.platform = s;
|
2012-01-19 23:07:42 +00:00
|
|
|
|
else if (i->name == state.sName) {
|
|
|
|
|
drvName = s;
|
|
|
|
|
printMsg(lvlVomit, format("derivation name is `%1%'") % drvName);
|
|
|
|
|
}
|
2006-08-28 13:31:06 +00:00
|
|
|
|
else if (key == "outputHash") outputHash = s;
|
|
|
|
|
else if (key == "outputHashAlgo") outputHashAlgo = s;
|
|
|
|
|
else if (key == "outputHashMode") {
|
2012-11-27 14:01:32 +00:00
|
|
|
|
if (s == "recursive") outputHashRecursive = true;
|
2006-08-28 13:31:06 +00:00
|
|
|
|
else if (s == "flat") outputHashRecursive = false;
|
2014-04-04 16:51:01 +00:00
|
|
|
|
else throw EvalError(format("invalid value `%1%' for `outputHashMode' attribute, at %2%") % s % posDrvName);
|
2006-08-28 13:31:06 +00:00
|
|
|
|
}
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
else if (key == "outputs") {
|
2012-09-19 19:43:23 +00:00
|
|
|
|
Strings tmp = tokenizeString<Strings>(s);
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
outputs.clear();
|
|
|
|
|
foreach (Strings::iterator, j, tmp) {
|
|
|
|
|
if (outputs.find(*j) != outputs.end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("duplicate derivation output `%1%', at %2%") % *j % posDrvName);
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
/* !!! Check whether *j is a valid attribute
|
|
|
|
|
name. */
|
|
|
|
|
/* Derivations cannot be named ‘drv’, because
|
|
|
|
|
then we'd have an attribute ‘drvPath’ in
|
|
|
|
|
the resulting set. */
|
|
|
|
|
if (*j == "drv")
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("invalid derivation output name `drv', at %1%") % posDrvName);
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
outputs.insert(*j);
|
|
|
|
|
}
|
|
|
|
|
if (outputs.empty())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("derivation cannot have an empty set of outputs, at %1%") % posDrvName);
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
}
|
2006-08-28 13:31:06 +00:00
|
|
|
|
}
|
|
|
|
|
|
2004-04-02 10:49:37 +00:00
|
|
|
|
} catch (Error & e) {
|
2013-10-17 09:53:40 +00:00
|
|
|
|
e.addPrefix(format("while evaluating the attribute `%1%' of the derivation `%2%' at %3%:\n")
|
|
|
|
|
% key % drvName % posDrvName);
|
2006-03-08 14:11:19 +00:00
|
|
|
|
throw;
|
2004-04-02 10:49:37 +00:00
|
|
|
|
}
|
2003-10-31 17:09:31 +00:00
|
|
|
|
}
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2006-10-16 15:55:34 +00:00
|
|
|
|
/* Everything in the context of the strings in the derivation
|
|
|
|
|
attributes should be added as dependencies of the resulting
|
|
|
|
|
derivation. */
|
2009-03-18 17:36:42 +00:00
|
|
|
|
foreach (PathSet::iterator, i, context) {
|
2008-12-04 10:40:41 +00:00
|
|
|
|
Path path = *i;
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2009-03-18 17:36:42 +00:00
|
|
|
|
/* Paths marked with `=' denote that the path of a derivation
|
|
|
|
|
is explicitly passed to the builder. Since that allows the
|
|
|
|
|
builder to gain access to every path in the dependency
|
|
|
|
|
graph of the derivation (including all outputs), all paths
|
|
|
|
|
in the graph must be added to this derivation's list of
|
|
|
|
|
inputs to ensure that they are available when the builder
|
|
|
|
|
runs. */
|
2008-12-04 10:40:41 +00:00
|
|
|
|
if (path.at(0) == '=') {
|
2011-12-21 18:19:05 +00:00
|
|
|
|
/* !!! This doesn't work if readOnlyMode is set. */
|
2011-12-21 15:33:30 +00:00
|
|
|
|
PathSet refs; computeFSClosure(*store, string(path, 1), refs);
|
2009-03-18 17:36:42 +00:00
|
|
|
|
foreach (PathSet::iterator, j, refs) {
|
|
|
|
|
drv.inputSrcs.insert(*j);
|
|
|
|
|
if (isDerivation(*j))
|
2011-12-16 23:33:01 +00:00
|
|
|
|
drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j);
|
2009-03-18 17:36:42 +00:00
|
|
|
|
}
|
2008-12-04 10:40:41 +00:00
|
|
|
|
}
|
2009-10-21 15:05:30 +00:00
|
|
|
|
|
|
|
|
|
/* See prim_unsafeDiscardOutputDependency. */
|
2011-12-21 15:33:30 +00:00
|
|
|
|
else if (path.at(0) == '~')
|
|
|
|
|
drv.inputSrcs.insert(string(path, 1));
|
|
|
|
|
|
|
|
|
|
/* Handle derivation outputs of the form ‘!<name>!<path>’. */
|
|
|
|
|
else if (path.at(0) == '!') {
|
2012-01-26 13:13:00 +00:00
|
|
|
|
std::pair<string, string> ctx = decodeContext(path);
|
|
|
|
|
drv.inputDrvs[ctx.first].insert(ctx.second);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
2011-12-21 15:33:30 +00:00
|
|
|
|
/* Handle derivation contexts returned by
|
|
|
|
|
‘builtins.storePath’. */
|
|
|
|
|
else if (isDerivation(path))
|
2011-12-21 18:19:05 +00:00
|
|
|
|
drv.inputDrvs[path] = store->queryDerivationOutputNames(path);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
|
2011-12-21 15:33:30 +00:00
|
|
|
|
/* Otherwise it's a source file. */
|
2006-10-16 15:55:34 +00:00
|
|
|
|
else
|
2008-12-04 10:40:41 +00:00
|
|
|
|
drv.inputSrcs.insert(path);
|
2006-10-16 15:55:34 +00:00
|
|
|
|
}
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2003-10-31 17:09:31 +00:00
|
|
|
|
/* Do we have all required attributes? */
|
2005-01-19 11:16:11 +00:00
|
|
|
|
if (drv.builder == "")
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("required attribute `builder' missing, at %1%") % posDrvName);
|
2005-01-19 11:16:11 +00:00
|
|
|
|
if (drv.platform == "")
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("required attribute `system' missing, at %1%") % posDrvName);
|
2004-08-24 11:46:05 +00:00
|
|
|
|
|
2006-09-21 18:52:05 +00:00
|
|
|
|
/* Check whether the derivation name is valid. */
|
2005-04-07 14:01:51 +00:00
|
|
|
|
checkStoreName(drvName);
|
2005-01-20 15:25:01 +00:00
|
|
|
|
if (isDerivation(drvName))
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("derivation names are not allowed to end in `%1%', at %2%")
|
|
|
|
|
% drvExtension % posDrvName);
|
2005-01-20 15:25:01 +00:00
|
|
|
|
|
2011-07-20 18:26:00 +00:00
|
|
|
|
if (outputHash != "") {
|
|
|
|
|
/* Handle fixed-output derivations. */
|
|
|
|
|
if (outputs.size() != 1 || *(outputs.begin()) != "out")
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw Error(format("multiple outputs are not supported in fixed-output derivations, at %1%") % posDrvName);
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2011-07-20 18:26:00 +00:00
|
|
|
|
HashType ht = parseHashType(outputHashAlgo);
|
|
|
|
|
if (ht == htUnknown)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("unknown hash algorithm `%1%', at %2%") % outputHashAlgo % posDrvName);
|
2011-12-02 11:47:06 +00:00
|
|
|
|
Hash h = parseHash16or32(ht, outputHash);
|
2011-07-20 18:26:00 +00:00
|
|
|
|
outputHash = printHash(h);
|
|
|
|
|
if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
2011-07-20 18:26:00 +00:00
|
|
|
|
Path outPath = makeFixedOutputPath(outputHashRecursive, ht, h, drvName);
|
|
|
|
|
drv.env["out"] = outPath;
|
|
|
|
|
drv.outputs["out"] = DerivationOutput(outPath, outputHashAlgo, outputHash);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
else {
|
|
|
|
|
/* Construct the "masked" store derivation, which is the final
|
|
|
|
|
one except that in the list of outputs, the output paths
|
|
|
|
|
are empty, and the corresponding environment variables have
|
|
|
|
|
an empty value. This ensures that changes in the set of
|
|
|
|
|
output names do get reflected in the hash. */
|
|
|
|
|
foreach (StringSet::iterator, i, outputs) {
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
drv.env[*i] = "";
|
|
|
|
|
drv.outputs[*i] = DerivationOutput("", "", "");
|
|
|
|
|
}
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
2011-07-20 18:26:00 +00:00
|
|
|
|
/* Use the masked derivation expression to compute the output
|
|
|
|
|
path. */
|
2011-08-31 21:11:50 +00:00
|
|
|
|
Hash h = hashDerivationModulo(*store, drv);
|
2012-11-27 14:01:32 +00:00
|
|
|
|
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
foreach (DerivationOutputs::iterator, i, drv.outputs)
|
|
|
|
|
if (i->second.path == "") {
|
2011-07-20 18:10:47 +00:00
|
|
|
|
Path outPath = makeOutputPath(i->first, h, drvName);
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
drv.env[i->first] = outPath;
|
|
|
|
|
i->second.path = outPath;
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-07-20 18:26:00 +00:00
|
|
|
|
|
2003-10-31 17:09:31 +00:00
|
|
|
|
/* Write the resulting term into the Nix store directory. */
|
2012-10-03 19:09:18 +00:00
|
|
|
|
Path drvPath = writeDerivation(*store, drv, drvName, state.repair);
|
2003-10-31 17:09:31 +00:00
|
|
|
|
|
2003-11-09 10:35:45 +00:00
|
|
|
|
printMsg(lvlChatty, format("instantiated `%1%' -> `%2%'")
|
2003-10-31 17:09:31 +00:00
|
|
|
|
% drvName % drvPath);
|
|
|
|
|
|
2005-01-18 11:15:50 +00:00
|
|
|
|
/* Optimisation, but required in read-only mode! because in that
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
case we don't actually write store derivations, so we can't
|
2005-01-18 11:15:50 +00:00
|
|
|
|
read them later. */
|
2011-08-31 21:11:50 +00:00
|
|
|
|
drvHashes[drvPath] = hashDerivationModulo(*store, drv);
|
2005-01-18 11:15:50 +00:00
|
|
|
|
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
state.mkAttrs(v, 1 + drv.outputs.size());
|
2011-11-06 07:18:19 +00:00
|
|
|
|
mkString(*state.allocAttr(v, state.sDrvPath), drvPath, singleton<PathSet>("=" + drvPath));
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
foreach (DerivationOutputs::iterator, i, drv.outputs) {
|
2012-01-03 15:27:18 +00:00
|
|
|
|
mkString(*state.allocAttr(v, state.symbols.create(i->first)),
|
2011-11-06 06:28:30 +00:00
|
|
|
|
i->second.path, singleton<PathSet>("!" + i->first + "!" + drvPath));
|
* Support multiple outputs. A derivation can declare multiple outputs
by setting the ‘outputs’ attribute. For example:
stdenv.mkDerivation {
name = "aterm-2.5";
src = ...;
outputs = [ "out" "tools" "dev" ];
configureFlags = "--bindir=$(tools)/bin --includedir=$(dev)/include";
}
This derivation creates three outputs, named like this:
/nix/store/gcnqgllbh01p3d448q8q6pzn2nc2gpyl-aterm-2.5
/nix/store/gjf1sgirwfnrlr0bdxyrwzpw2r304j02-aterm-2.5-tools
/nix/store/hp6108bqfgxvza25nnxfs7kj88xi2vdx-aterm-2.5-dev
That is, the symbolic name of the output is suffixed to the store
path (except for the ‘out’ output). Each path is passed to the
builder through the corresponding environment variable, e.g.,
${tools}.
The main reason for multiple outputs is to allow parts of a package
to be distributed and garbage-collected separately. For instance,
most packages depend on Glibc for its libraries, but don't need its
header files. If these are separated into different store paths,
then a package that depends on the Glibc libraries only causes the
libraries and not the headers to be downloaded.
The main problem with multiple outputs is that if one output exists
while the others have been garbage-collected (or never downloaded in
the first place), and we want to rebuild the other outputs, then
this isn't possible because we can't clobber a valid output (it
might be in active use). This currently gives an error message
like:
error: derivation `/nix/store/1s9zw4c8qydpjyrayxamx2z7zzp5pcgh-aterm-2.5.drv' is blocked by its output paths
There are two solutions: 1) Do the build in a chroot. Then we don't
need to overwrite the existing path. 2) Use hash rewriting (see the
ASE-2005 paper). Scary but it should work.
This is not finished yet. There is not yet an easy way to refer to
non-default outputs in Nix expressions. Also, mutually recursive
outputs aren't detected yet and cause the garbage collector to
crash.
2011-07-18 23:31:03 +00:00
|
|
|
|
}
|
2010-10-24 19:52:33 +00:00
|
|
|
|
v.attrs->sort();
|
2010-03-31 15:38:03 +00:00
|
|
|
|
}
|
2003-11-02 16:31:35 +00:00
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Paths
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Convert the argument to a path. !!! obsolete? */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_toPath(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2003-11-02 16:31:35 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[0], context);
|
2012-11-15 22:04:34 +00:00
|
|
|
|
mkString(v, canonPath(path), context);
|
2003-11-02 16:31:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-11-19 23:26:19 +00:00
|
|
|
|
/* Allow a valid store path to be used in an expression. This is
|
|
|
|
|
useful in some generated expressions such as in nix-push, which
|
|
|
|
|
generates a call to a function with an already existing store path
|
|
|
|
|
as argument. You don't want to use `toPath' here because it copies
|
|
|
|
|
the path to the Nix store, which yields a copy like
|
|
|
|
|
/nix/store/newhash-oldhash-oldname. In the past, `toPath' had
|
|
|
|
|
special case behaviour for store paths, but that created weird
|
|
|
|
|
corner cases. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-11-19 23:26:19 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[0], context);
|
2012-07-12 22:25:01 +00:00
|
|
|
|
/* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink
|
|
|
|
|
directly in the store. The latter condition is necessary so
|
|
|
|
|
e.g. nix-push does the right thing. */
|
|
|
|
|
if (!isStorePath(path)) path = canonPath(path, true);
|
2008-11-19 23:26:19 +00:00
|
|
|
|
if (!isInStore(path))
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("path `%1%' is not in the Nix store, at %2%") % path % pos);
|
2008-12-04 10:40:41 +00:00
|
|
|
|
Path path2 = toStorePath(path);
|
2013-12-05 16:51:54 +00:00
|
|
|
|
if (!settings.readOnlyMode)
|
|
|
|
|
store->ensurePath(path2);
|
2008-12-04 10:40:41 +00:00
|
|
|
|
context.insert(path2);
|
2010-04-16 15:13:47 +00:00
|
|
|
|
mkString(v, path, context);
|
2008-11-19 23:26:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_pathExists(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2005-08-14 14:00:39 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[0], context);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
if (!context.empty())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("string `%1%' cannot refer to other paths, at %2%") % path % pos);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
mkBool(v, pathExists(path));
|
2006-03-10 16:20:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Return the base name of the given string, i.e., everything
|
|
|
|
|
following the last slash. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_baseNameOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2003-11-02 16:31:35 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
mkString(v, baseNameOf(state.coerceToString(pos, *args[0], context)), context);
|
2003-11-02 16:31:35 +00:00
|
|
|
|
}
|
2003-11-05 16:27:40 +00:00
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Return the directory of the given path, i.e., everything before the
|
|
|
|
|
last slash. Return either a path or a string depending on the type
|
|
|
|
|
of the argument. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_dirOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-24 18:23:32 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path dir = dirOf(state.coerceToPath(pos, *args[0], context));
|
2010-03-30 22:39:48 +00:00
|
|
|
|
if (args[0]->type == tPath) mkPath(v, dir.c_str()); else mkString(v, dir, context);
|
2006-09-24 18:23:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-11-21 13:49:59 +00:00
|
|
|
|
/* Return the contents of a file as a string. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_readFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-11-21 13:49:59 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[0], context);
|
2007-11-21 13:49:59 +00:00
|
|
|
|
if (!context.empty())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("string `%1%' cannot refer to other paths, at %2%") % path % pos);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
mkString(v, readFile(path).c_str());
|
2007-11-21 13:49:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Creating files
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2006-09-01 12:07:31 +00:00
|
|
|
|
/* Convert the argument (which can be any Nix expression) to an XML
|
|
|
|
|
representation returned in a string. Not all Nix expressions can
|
|
|
|
|
be sensibly or completely represented (e.g., functions). */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_toXML(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-08-24 14:34:29 +00:00
|
|
|
|
{
|
2006-09-04 21:06:23 +00:00
|
|
|
|
std::ostringstream out;
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2010-05-07 14:46:47 +00:00
|
|
|
|
printValueAsXML(state, true, false, *args[0], out, context);
|
2010-04-07 13:55:46 +00:00
|
|
|
|
mkString(v, out.str(), context);
|
2006-08-24 14:34:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-11-18 23:03:11 +00:00
|
|
|
|
/* Convert the argument (which can be any Nix expression) to a JSON
|
|
|
|
|
string. Not all Nix expressions can be sensibly or completely
|
|
|
|
|
represented (e.g., functions). */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_toJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2013-11-18 23:03:11 +00:00
|
|
|
|
{
|
|
|
|
|
std::ostringstream out;
|
|
|
|
|
PathSet context;
|
|
|
|
|
printValueAsJSON(state, true, *args[0], out, context);
|
|
|
|
|
mkString(v, out.str(), context);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-09-01 12:07:31 +00:00
|
|
|
|
/* Store a string in the Nix store as a source file that can be used
|
|
|
|
|
as an input by derivations. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-01 12:07:31 +00:00
|
|
|
|
{
|
2006-10-16 15:55:34 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string name = state.forceStringNoCtx(*args[0], pos);
|
2010-03-31 19:52:29 +00:00
|
|
|
|
string contents = state.forceString(*args[1], context);
|
2006-10-03 14:55:54 +00:00
|
|
|
|
|
|
|
|
|
PathSet refs;
|
|
|
|
|
|
2010-03-31 15:38:03 +00:00
|
|
|
|
foreach (PathSet::iterator, i, context) {
|
2008-12-04 10:45:47 +00:00
|
|
|
|
Path path = *i;
|
|
|
|
|
if (path.at(0) == '=') path = string(path, 1);
|
|
|
|
|
if (isDerivation(path))
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("in `toFile': the file `%1%' cannot refer to derivation outputs, at %2%") % name % pos);
|
2008-12-04 10:45:47 +00:00
|
|
|
|
refs.insert(path);
|
2006-10-03 14:55:54 +00:00
|
|
|
|
}
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2012-07-30 23:55:41 +00:00
|
|
|
|
Path storePath = settings.readOnlyMode
|
2007-01-29 15:51:37 +00:00
|
|
|
|
? computeStorePathForText(name, contents, refs)
|
2012-10-03 19:09:18 +00:00
|
|
|
|
: store->addTextToStore(name, contents, refs, state.repair);
|
2006-10-03 14:55:54 +00:00
|
|
|
|
|
2006-10-16 15:55:34 +00:00
|
|
|
|
/* Note: we don't need to add `context' to the context of the
|
|
|
|
|
result, since `storePath' itself has references to the paths
|
|
|
|
|
used in args[1]. */
|
2010-03-31 15:38:03 +00:00
|
|
|
|
|
|
|
|
|
mkString(v, storePath, singleton<PathSet>(storePath));
|
2006-09-01 12:07:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
struct FilterFromExpr : PathFilter
|
2004-02-04 16:03:29 +00:00
|
|
|
|
{
|
2007-01-29 15:11:32 +00:00
|
|
|
|
EvalState & state;
|
2010-04-07 13:55:46 +00:00
|
|
|
|
Value & filter;
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-04-07 13:55:46 +00:00
|
|
|
|
FilterFromExpr(EvalState & state, Value & filter)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
: state(state), filter(filter)
|
|
|
|
|
{
|
|
|
|
|
}
|
2004-02-04 16:03:29 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
bool operator () (const Path & path)
|
|
|
|
|
{
|
|
|
|
|
struct stat st;
|
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
2004-02-04 16:03:29 +00:00
|
|
|
|
|
2010-04-07 13:55:46 +00:00
|
|
|
|
/* Call the filter function. The first argument is the path,
|
|
|
|
|
the second is a string indicating the type of the file. */
|
|
|
|
|
Value arg1;
|
|
|
|
|
mkString(arg1, path);
|
|
|
|
|
|
|
|
|
|
Value fun2;
|
2014-04-04 15:53:52 +00:00
|
|
|
|
state.callFunction(filter, arg1, fun2, noPos);
|
2010-04-07 13:55:46 +00:00
|
|
|
|
|
|
|
|
|
Value arg2;
|
2013-09-02 14:29:15 +00:00
|
|
|
|
mkString(arg2,
|
2010-04-07 13:55:46 +00:00
|
|
|
|
S_ISREG(st.st_mode) ? "regular" :
|
|
|
|
|
S_ISDIR(st.st_mode) ? "directory" :
|
|
|
|
|
S_ISLNK(st.st_mode) ? "symlink" :
|
|
|
|
|
"unknown" /* not supported, will fail! */);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-04-07 13:55:46 +00:00
|
|
|
|
Value res;
|
2014-04-04 15:53:52 +00:00
|
|
|
|
state.callFunction(fun2, arg2, res, noPos);
|
2010-04-07 13:55:46 +00:00
|
|
|
|
|
|
|
|
|
return state.forceBool(res);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
|
|
|
|
};
|
2004-02-04 16:03:29 +00:00
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2003-11-05 16:27:40 +00:00
|
|
|
|
{
|
2007-01-29 15:11:32 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
Path path = state.coerceToPath(pos, *args[1], context);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
if (!context.empty())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("string `%1%' cannot refer to other paths, at %2%") % path % pos);
|
2003-11-05 16:27:40 +00:00
|
|
|
|
|
2010-04-07 13:55:46 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
if (args[0]->type != tLambda)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw TypeError(format("first argument in call to `filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos);
|
2010-04-07 13:55:46 +00:00
|
|
|
|
|
|
|
|
|
FilterFromExpr filter(state, *args[0]);
|
2004-08-04 10:59:20 +00:00
|
|
|
|
|
2012-07-30 23:55:41 +00:00
|
|
|
|
Path dstPath = settings.readOnlyMode
|
2008-12-03 16:10:17 +00:00
|
|
|
|
? computeStorePathForPath(path, true, htSHA256, filter).first
|
2012-10-03 19:09:18 +00:00
|
|
|
|
: store->addToStore(path, true, htSHA256, filter, state.repair);
|
2004-08-04 10:59:20 +00:00
|
|
|
|
|
2010-04-07 13:55:46 +00:00
|
|
|
|
mkString(v, dstPath, singleton<PathSet>(dstPath));
|
2006-09-22 14:55:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
2013-10-24 14:41:04 +00:00
|
|
|
|
* Sets
|
2007-01-29 15:11:32 +00:00
|
|
|
|
*************************************************************/
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
/* Return the names of the attributes in a set as a sorted list of
|
|
|
|
|
strings. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_attrNames(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
{
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[0], pos);
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.mkList(v, args[0]->attrs->size());
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
StringSet names;
|
|
|
|
|
foreach (Bindings::iterator, i, *args[0]->attrs)
|
2010-10-24 00:41:29 +00:00
|
|
|
|
names.insert(i->name);
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
2010-03-30 22:39:48 +00:00
|
|
|
|
unsigned int n = 0;
|
2010-10-23 18:18:07 +00:00
|
|
|
|
foreach (StringSet::iterator, i, names)
|
|
|
|
|
mkString(*(v.list.elems[n++] = state.allocValue()), *i);
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Dynamic version of the `.' operator. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string attr = state.forceStringNoCtx(*args[0], pos);
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[1], pos);
|
2010-04-13 12:25:42 +00:00
|
|
|
|
// !!! Should we create a symbol here or just do a lookup?
|
|
|
|
|
Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
|
2010-03-30 18:05:54 +00:00
|
|
|
|
if (i == args[1]->attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw EvalError(format("attribute `%1%' missing, at %2%") % attr % pos);
|
2010-05-07 12:11:05 +00:00
|
|
|
|
// !!! add to stack trace?
|
2012-08-13 03:29:28 +00:00
|
|
|
|
if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
|
2010-10-24 00:41:29 +00:00
|
|
|
|
state.forceValue(*i->value);
|
|
|
|
|
v = *i->value;
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
|
|
|
|
|
2013-11-18 21:22:35 +00:00
|
|
|
|
/* Return position information of the specified attribute. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
void prim_unsafeGetAttrPos(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2013-11-18 21:22:35 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string attr = state.forceStringNoCtx(*args[0], pos);
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[1], pos);
|
2013-11-18 21:22:35 +00:00
|
|
|
|
Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
|
|
|
|
|
if (i == args[1]->attrs->end())
|
|
|
|
|
mkNull(v);
|
|
|
|
|
else
|
|
|
|
|
state.mkPos(v, i->pos);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Dynamic version of the `?' operator. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_hasAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:11:32 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string attr = state.forceStringNoCtx(*args[0], pos);
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[1], pos);
|
2010-04-13 12:25:42 +00:00
|
|
|
|
mkBool(v, args[1]->attrs->find(state.symbols.create(attr)) != args[1]->attrs->end());
|
2007-01-29 15:11:32 +00:00
|
|
|
|
}
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
2005-08-14 14:00:39 +00:00
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
/* Determine whether the argument is a set. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2010-03-30 22:39:48 +00:00
|
|
|
|
{
|
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tAttrs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_removeAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2010-03-30 22:39:48 +00:00
|
|
|
|
{
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[0], pos);
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[1], pos);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
|
2010-10-24 00:41:29 +00:00
|
|
|
|
/* Get the attribute names to be removed. */
|
|
|
|
|
std::set<Symbol> names;
|
2010-03-30 22:39:48 +00:00
|
|
|
|
for (unsigned int i = 0; i < args[1]->list.length; ++i) {
|
2014-04-04 19:14:11 +00:00
|
|
|
|
state.forceStringNoCtx(*args[1]->list.elems[i], pos);
|
2010-10-24 00:41:29 +00:00
|
|
|
|
names.insert(state.symbols.create(args[1]->list.elems[i]->string.s));
|
|
|
|
|
}
|
|
|
|
|
|
2010-10-24 19:52:33 +00:00
|
|
|
|
/* Copy all attributes not in that set. Note that we don't need
|
|
|
|
|
to sort v.attrs because it's a subset of an already sorted
|
|
|
|
|
vector. */
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, args[0]->attrs->size());
|
2010-10-24 00:41:29 +00:00
|
|
|
|
foreach (Bindings::iterator, i, *args[0]->attrs) {
|
|
|
|
|
if (names.find(i->name) == names.end())
|
|
|
|
|
v.attrs->push_back(*i);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
/* Builds a set from a list specifying (name, value) pairs. To be
|
|
|
|
|
precise, a list [{name = "name1"; value = value1;} ... {name =
|
|
|
|
|
"nameN"; value = valueN;}] is transformed to {name1 = value1;
|
2013-10-28 06:34:44 +00:00
|
|
|
|
... nameN = valueN;}. In case of duplicate occurences of the same
|
|
|
|
|
name, the first takes precedence. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-08-18 22:12:00 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[0], pos);
|
2010-03-31 15:38:03 +00:00
|
|
|
|
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, args[0]->list.length);
|
2010-03-31 15:38:03 +00:00
|
|
|
|
|
2010-10-24 19:52:33 +00:00
|
|
|
|
std::set<Symbol> seen;
|
|
|
|
|
|
2010-03-31 15:38:03 +00:00
|
|
|
|
for (unsigned int i = 0; i < args[0]->list.length; ++i) {
|
2010-04-15 00:37:36 +00:00
|
|
|
|
Value & v2(*args[0]->list.elems[i]);
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(v2, pos);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-04-13 12:25:42 +00:00
|
|
|
|
Bindings::iterator j = v2.attrs->find(state.sName);
|
2010-03-31 15:38:03 +00:00
|
|
|
|
if (j == v2.attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw TypeError(format("`name' attribute missing in a call to `listToAttrs', at %1%") % pos);
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string name = state.forceStringNoCtx(*j->value, pos);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-10-24 19:52:33 +00:00
|
|
|
|
Symbol sym = state.symbols.create(name);
|
|
|
|
|
if (seen.find(sym) == seen.end()) {
|
2013-10-28 06:34:44 +00:00
|
|
|
|
Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue));
|
|
|
|
|
if (j2 == v2.attrs->end())
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw TypeError(format("`value' attribute missing in a call to `listToAttrs', at %1%") % pos);
|
2013-10-28 06:34:44 +00:00
|
|
|
|
|
2010-10-24 19:52:33 +00:00
|
|
|
|
v.attrs->push_back(Attr(sym, j2->value, j2->pos));
|
|
|
|
|
seen.insert(sym);
|
|
|
|
|
}
|
2007-10-09 12:51:25 +00:00
|
|
|
|
}
|
2010-10-24 19:52:33 +00:00
|
|
|
|
|
|
|
|
|
v.attrs->sort();
|
2007-08-18 22:12:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
2007-10-31 18:01:56 +00:00
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
/* Return the right-biased intersection of two sets as1 and as2,
|
|
|
|
|
i.e. a set that contains every attribute from as2 that is also a
|
|
|
|
|
member of as1. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_intersectAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
{
|
2014-04-04 17:11:40 +00:00
|
|
|
|
state.forceAttrs(*args[0], pos);
|
|
|
|
|
state.forceAttrs(*args[1], pos);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, std::min(args[0]->attrs->size(), args[1]->attrs->size()));
|
2010-08-02 11:54:44 +00:00
|
|
|
|
|
|
|
|
|
foreach (Bindings::iterator, i, *args[0]->attrs) {
|
2010-10-24 00:41:29 +00:00
|
|
|
|
Bindings::iterator j = args[1]->attrs->find(i->name);
|
2010-10-22 14:47:42 +00:00
|
|
|
|
if (j != args[1]->attrs->end())
|
2010-10-24 00:41:29 +00:00
|
|
|
|
v.attrs->push_back(*j);
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Return a set containing the names of the formal arguments expected
|
|
|
|
|
by the function `f'. The value of each attribute is a Boolean
|
|
|
|
|
denoting whether has a default value. For instance,
|
|
|
|
|
|
|
|
|
|
functionArgs ({ x, y ? 123}: ...)
|
|
|
|
|
=> { x = false; y = true; }
|
|
|
|
|
|
|
|
|
|
"Formal argument" here refers to the attributes pattern-matched by
|
|
|
|
|
the function. Plain lambdas are not included, e.g.
|
|
|
|
|
|
|
|
|
|
functionArgs (x: ...)
|
|
|
|
|
=> { }
|
|
|
|
|
*/
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
{
|
2010-04-16 15:13:47 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
if (args[0]->type != tLambda)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw TypeError(format("`functionArgs' requires a function, at %1%") % pos);
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
|
2010-10-24 20:09:37 +00:00
|
|
|
|
if (!args[0]->lambda.fun->matchAttrs) {
|
|
|
|
|
state.mkAttrs(v, 0);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2010-04-16 15:13:47 +00:00
|
|
|
|
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, args[0]->lambda.fun->formals->formals.size());
|
2010-04-16 15:13:47 +00:00
|
|
|
|
foreach (Formals::Formals_::iterator, i, args[0]->lambda.fun->formals->formals)
|
2010-10-22 14:47:42 +00:00
|
|
|
|
// !!! should optimise booleans (allocate only once)
|
|
|
|
|
mkBool(*state.allocAttr(v, i->name), i->def);
|
2010-10-24 19:52:33 +00:00
|
|
|
|
v.attrs->sort();
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Lists
|
|
|
|
|
*************************************************************/
|
* A primitive operation `dependencyClosure' to do automatic dependency
determination (e.g., finding the header files dependencies of a C
file) in Nix low-level builds automatically.
For instance, in the function `compileC' in make/lib/default.nix, we
find the header file dependencies of C file `main' as follows:
localIncludes =
dependencyClosure {
scanner = file:
import (findIncludes {
inherit file;
});
startSet = [main];
};
The function works by "growing" the set of dependencies, starting
with the set `startSet', and calling the function `scanner' for each
file to get its dependencies (which should yield a list of strings
representing relative paths). For instance, when `scanner' is
called on a file `foo.c' that includes the line
#include "../bar/fnord.h"
then `scanner' should yield ["../bar/fnord.h"]. This list of
dependencies is absolutised relative to the including file and added
to the set of dependencies. The process continues until no more
dependencies are found (hence its a closure).
`dependencyClosure' yields a list that contains in alternation a
dependency, and its relative path to the directory of the start
file, e.g.,
[ /bla/bla/foo.c
"foo.c"
/bla/bar/fnord.h
"../bar/fnord.h"
]
These relative paths are necessary for the builder that compiles
foo.c to reconstruct the relative directory structure expected by
foo.c.
The advantage of `dependencyClosure' over the old approach (using
the impure `__currentTime') is that it's completely pure, and more
efficient because it only rescans for dependencies (i.e., by
building the derivations yielded by `scanner') if sources have
actually changed. The old approach rescanned every time.
2005-08-14 12:38:47 +00:00
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/* Determine whether the argument is a list. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_isList(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-08-23 15:46:00 +00:00
|
|
|
|
{
|
2010-03-30 22:39:48 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
mkBool(v, args[0]->type == tList);
|
2006-08-23 15:46:00 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void elemAt(EvalState & state, const Pos & pos, Value & list, int n, Value & v)
|
2012-08-13 17:46:42 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(list, pos);
|
2014-05-26 10:34:15 +00:00
|
|
|
|
if (n < 0 || (unsigned int) n >= list.list.length)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw Error(format("list index %1% is out of bounds, at %2%") % n % pos);
|
2012-08-13 17:46:42 +00:00
|
|
|
|
state.forceValue(*list.list.elems[n]);
|
|
|
|
|
v = *list.list.elems[n];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Return the n-1'th element of a list. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_elemAt(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2012-08-13 17:46:42 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
elemAt(state, pos, *args[0], state.forceInt(*args[1], pos), v);
|
2012-08-13 17:46:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-09-22 14:46:36 +00:00
|
|
|
|
/* Return the first element of a list. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_head(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-22 14:46:36 +00:00
|
|
|
|
{
|
2014-04-04 16:51:01 +00:00
|
|
|
|
elemAt(state, pos, *args[0], 0, v);
|
2006-09-22 14:46:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Return a list consisting of everything but the the first element of
|
2012-08-13 05:05:35 +00:00
|
|
|
|
a list. Warning: this function takes O(n) time, so you probably
|
|
|
|
|
don't want to use it! */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_tail(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-22 14:46:36 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[0], pos);
|
2010-03-30 14:39:27 +00:00
|
|
|
|
if (args[0]->list.length == 0)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw Error(format("`tail' called on an empty list, at %1%") % pos);
|
2010-03-30 14:39:27 +00:00
|
|
|
|
state.mkList(v, args[0]->list.length - 1);
|
|
|
|
|
for (unsigned int n = 0; n < v.list.length; ++n)
|
|
|
|
|
v.list.elems[n] = args[0]->list.elems[n + 1];
|
2006-09-22 14:46:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2004-08-04 10:59:20 +00:00
|
|
|
|
/* Apply a function to every element of a list. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_map(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2004-08-04 10:59:20 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceFunction(*args[0], pos);
|
|
|
|
|
state.forceList(*args[1], pos);
|
2004-08-04 11:27:53 +00:00
|
|
|
|
|
2010-03-30 14:39:27 +00:00
|
|
|
|
state.mkList(v, args[1]->list.length);
|
2004-08-04 11:27:53 +00:00
|
|
|
|
|
2010-10-23 18:18:07 +00:00
|
|
|
|
for (unsigned int n = 0; n < v.list.length; ++n)
|
2013-09-02 14:29:15 +00:00
|
|
|
|
mkApp(*(v.list.elems[n] = state.allocValue()),
|
2010-10-23 18:18:07 +00:00
|
|
|
|
*args[0], *args[1]->list.elems[n]);
|
2004-08-04 10:59:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-13 04:28:08 +00:00
|
|
|
|
/* Filter a list using a predicate; that is, return a list containing
|
|
|
|
|
every element from the list for which the predicate function
|
|
|
|
|
returns true. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_filter(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2012-08-13 04:28:08 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceFunction(*args[0], pos);
|
|
|
|
|
state.forceList(*args[1], pos);
|
2012-08-13 04:28:08 +00:00
|
|
|
|
|
|
|
|
|
// FIXME: putting this on the stack is risky.
|
|
|
|
|
Value * vs[args[1]->list.length];
|
|
|
|
|
unsigned int k = 0;
|
|
|
|
|
|
2012-12-04 16:22:20 +00:00
|
|
|
|
bool same = true;
|
2012-08-13 04:28:08 +00:00
|
|
|
|
for (unsigned int n = 0; n < args[1]->list.length; ++n) {
|
|
|
|
|
Value res;
|
2014-04-04 15:53:52 +00:00
|
|
|
|
state.callFunction(*args[0], *args[1]->list.elems[n], res, noPos);
|
2012-08-13 04:28:08 +00:00
|
|
|
|
if (state.forceBool(res))
|
|
|
|
|
vs[k++] = args[1]->list.elems[n];
|
2012-12-04 16:22:20 +00:00
|
|
|
|
else
|
|
|
|
|
same = false;
|
2012-08-13 04:28:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-04 16:22:20 +00:00
|
|
|
|
if (same)
|
|
|
|
|
v = *args[1];
|
|
|
|
|
else {
|
|
|
|
|
state.mkList(v, k);
|
|
|
|
|
for (unsigned int n = 0; n < k; ++n) v.list.elems[n] = vs[n];
|
|
|
|
|
}
|
2012-08-13 04:28:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-13 05:53:10 +00:00
|
|
|
|
/* Return true if a list contains a given element. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_elem(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2012-08-13 05:05:35 +00:00
|
|
|
|
{
|
|
|
|
|
bool res = false;
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[1], pos);
|
2012-08-13 05:05:35 +00:00
|
|
|
|
for (unsigned int n = 0; n < args[1]->list.length; ++n)
|
|
|
|
|
if (state.eqValues(*args[0], *args[1]->list.elems[n])) {
|
|
|
|
|
res = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
mkBool(v, res);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-08-13 05:53:10 +00:00
|
|
|
|
/* Concatenate a list of lists. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_concatLists(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2012-08-13 05:53:10 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[0], pos);
|
2014-04-04 20:43:52 +00:00
|
|
|
|
state.concatLists(v, args[0]->list.length, args[0]->list.elems, pos);
|
2012-08-13 05:53:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-07-11 13:29:04 +00:00
|
|
|
|
/* Return the length of a list. This is an O(1) time operation. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_length(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-07-11 13:29:04 +00:00
|
|
|
|
{
|
2014-04-04 17:05:36 +00:00
|
|
|
|
state.forceList(*args[0], pos);
|
2010-03-31 15:38:03 +00:00
|
|
|
|
mkInt(v, args[0]->list.length);
|
2008-07-11 13:29:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Integer arithmetic
|
|
|
|
|
*************************************************************/
|
2005-08-14 14:00:39 +00:00
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-22 15:29:21 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
mkInt(v, state.forceInt(*args[0], pos) + state.forceInt(*args[1], pos));
|
2006-09-22 15:29:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 14:23:09 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
mkInt(v, state.forceInt(*args[0], pos) - state.forceInt(*args[1], pos));
|
2007-01-29 14:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-07-11 13:29:04 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
mkInt(v, state.forceInt(*args[0], pos) * state.forceInt(*args[1], pos));
|
2008-07-11 13:29:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-07-11 13:29:04 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
NixInt i2 = state.forceInt(*args[1], pos);
|
2014-04-04 16:51:01 +00:00
|
|
|
|
if (i2 == 0) throw EvalError(format("division by zero, at %1%") % pos);
|
2014-04-04 16:58:15 +00:00
|
|
|
|
mkInt(v, state.forceInt(*args[0], pos) / i2);
|
2008-07-11 13:29:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2006-09-24 15:21:48 +00:00
|
|
|
|
{
|
2013-08-02 16:53:02 +00:00
|
|
|
|
state.forceValue(*args[0]);
|
|
|
|
|
state.forceValue(*args[1]);
|
|
|
|
|
CompareValues comp;
|
2013-10-28 17:50:58 +00:00
|
|
|
|
mkBool(v, comp(args[0], args[1]));
|
2006-09-24 15:21:48 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 14:23:09 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* String manipulation
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:15:37 +00:00
|
|
|
|
/* Convert the argument to a string. Paths are *not* copied to the
|
|
|
|
|
store, so `toString /foo/bar' yields `"/foo/bar"', not
|
|
|
|
|
`"/nix/store/whatever..."'. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_toString(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 15:15:37 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(pos, *args[0], context, true, false);
|
2010-03-31 19:52:29 +00:00
|
|
|
|
mkString(v, s, context);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-12-31 00:08:09 +00:00
|
|
|
|
/* `substring start len str' returns the substring of `str' starting
|
|
|
|
|
at character position `min(start, stringLength str)' inclusive and
|
2007-01-29 14:23:09 +00:00
|
|
|
|
ending at `min(start + len, stringLength str)'. `start' must be
|
|
|
|
|
non-negative. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_substring(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 14:23:09 +00:00
|
|
|
|
{
|
2014-04-04 16:58:15 +00:00
|
|
|
|
int start = state.forceInt(*args[0], pos);
|
|
|
|
|
int len = state.forceInt(*args[1], pos);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(pos, *args[2], context);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
if (start < 0) throw EvalError(format("negative start position in `substring', at %1%") % pos);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
|
2014-05-26 10:34:15 +00:00
|
|
|
|
mkString(v, (unsigned int) start >= s.size() ? "" : string(s, start, len), context);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_stringLength(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2007-01-29 14:23:09 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(pos, *args[0], context);
|
2010-03-30 18:05:54 +00:00
|
|
|
|
mkInt(v, s.size());
|
2007-01-29 14:23:09 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-01-04 14:22:49 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(pos, *args[0], context);
|
2010-04-07 13:55:46 +00:00
|
|
|
|
mkString(v, s, PathSet());
|
2008-01-04 14:22:49 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-01-20 20:44:03 +00:00
|
|
|
|
|
2009-10-21 15:05:30 +00:00
|
|
|
|
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
|
|
|
|
|
builder without causing the derivation to be built (for instance,
|
|
|
|
|
in the derivation that builds NARs in nix-push, when doing
|
|
|
|
|
source-only deployment). This primop marks the string context so
|
|
|
|
|
that builtins.derivation adds the path to drv.inputSrcs rather than
|
|
|
|
|
drv.inputDrvs. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2009-10-21 15:05:30 +00:00
|
|
|
|
{
|
|
|
|
|
PathSet context;
|
2014-04-04 20:19:33 +00:00
|
|
|
|
string s = state.coerceToString(pos, *args[0], context);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
|
|
|
|
|
PathSet context2;
|
|
|
|
|
foreach (PathSet::iterator, i, context) {
|
|
|
|
|
Path p = *i;
|
2011-11-06 07:03:14 +00:00
|
|
|
|
if (p.at(0) == '=') p = "~" + string(p, 1);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
context2.insert(p);
|
|
|
|
|
}
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-04-16 15:13:47 +00:00
|
|
|
|
mkString(v, s, context2);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
}
|
2008-01-04 14:22:49 +00:00
|
|
|
|
|
2008-07-01 10:10:32 +00:00
|
|
|
|
|
2013-03-08 00:24:59 +00:00
|
|
|
|
/* Return the cryptographic hash of a string in base-16. */
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_hashString(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2013-03-08 00:24:59 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string type = state.forceStringNoCtx(*args[0], pos);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
HashType ht = parseHashType(type);
|
|
|
|
|
if (ht == htUnknown)
|
2014-04-04 16:51:01 +00:00
|
|
|
|
throw Error(format("unknown hash type `%1%', at %2%") % type % pos);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
|
|
|
|
|
PathSet context; // discarded
|
|
|
|
|
string s = state.forceString(*args[1], context);
|
|
|
|
|
|
|
|
|
|
mkString(v, printHash(hashString(ht, s)), context);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2008-07-01 10:10:32 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Versions
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_parseDrvName(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-07-01 10:10:32 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string name = state.forceStringNoCtx(*args[0], pos);
|
2008-07-01 10:10:32 +00:00
|
|
|
|
DrvName parsed(name);
|
2010-10-24 20:09:37 +00:00
|
|
|
|
state.mkAttrs(v, 2);
|
2010-10-22 14:47:42 +00:00
|
|
|
|
mkString(*state.allocAttr(v, state.sName), parsed.name);
|
|
|
|
|
mkString(*state.allocAttr(v, state.symbols.create("version")), parsed.version);
|
2010-10-24 19:52:33 +00:00
|
|
|
|
v.attrs->sort();
|
2008-07-01 10:10:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-04 16:51:01 +00:00
|
|
|
|
static void prim_compareVersions(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
2008-07-01 10:10:32 +00:00
|
|
|
|
{
|
2014-04-04 19:14:11 +00:00
|
|
|
|
string version1 = state.forceStringNoCtx(*args[0], pos);
|
|
|
|
|
string version2 = state.forceStringNoCtx(*args[1], pos);
|
2010-03-30 22:39:48 +00:00
|
|
|
|
mkInt(v, compareVersions(version1, version2));
|
2008-07-01 10:10:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2007-01-29 15:15:37 +00:00
|
|
|
|
/*************************************************************
|
|
|
|
|
* Primop registration
|
|
|
|
|
*************************************************************/
|
|
|
|
|
|
|
|
|
|
|
2010-03-29 14:37:56 +00:00
|
|
|
|
void EvalState::createBaseEnv()
|
2004-08-04 10:59:20 +00:00
|
|
|
|
{
|
2010-03-29 14:37:56 +00:00
|
|
|
|
baseEnv.up = 0;
|
|
|
|
|
|
|
|
|
|
/* Add global constants such as `true' to the base environment. */
|
2010-03-30 14:39:27 +00:00
|
|
|
|
Value v;
|
|
|
|
|
|
2010-04-14 22:59:39 +00:00
|
|
|
|
/* `builtins' must be first! */
|
2010-10-24 20:09:37 +00:00
|
|
|
|
mkAttrs(v, 128);
|
2010-04-14 22:59:39 +00:00
|
|
|
|
addConstant("builtins", v);
|
|
|
|
|
|
2010-03-30 14:39:27 +00:00
|
|
|
|
mkBool(v, true);
|
|
|
|
|
addConstant("true", v);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2010-03-30 14:39:27 +00:00
|
|
|
|
mkBool(v, false);
|
|
|
|
|
addConstant("false", v);
|
2013-09-02 14:29:15 +00:00
|
|
|
|
|
2013-11-18 21:22:35 +00:00
|
|
|
|
mkNull(v);
|
2010-03-30 14:39:27 +00:00
|
|
|
|
addConstant("null", v);
|
|
|
|
|
|
|
|
|
|
mkInt(v, time(0));
|
|
|
|
|
addConstant("__currentTime", v);
|
2010-03-29 14:37:56 +00:00
|
|
|
|
|
2012-07-30 23:55:41 +00:00
|
|
|
|
mkString(v, settings.thisSystem.c_str());
|
2010-03-30 14:39:27 +00:00
|
|
|
|
addConstant("__currentSystem", v);
|
2004-08-04 10:59:20 +00:00
|
|
|
|
|
2012-11-27 12:29:55 +00:00
|
|
|
|
mkString(v, nixVersion.c_str());
|
|
|
|
|
addConstant("__nixVersion", v);
|
|
|
|
|
|
|
|
|
|
/* Language version. This should be increased every time a new
|
|
|
|
|
language feature gets added. It's not necessary to increase it
|
|
|
|
|
when primops get added, because you can just use `builtins ?
|
|
|
|
|
primOp' to check. */
|
2013-12-05 17:07:05 +00:00
|
|
|
|
mkInt(v, 2);
|
2012-11-27 12:29:55 +00:00
|
|
|
|
addConstant("__langVersion", v);
|
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
// Miscellaneous
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("import", 1, prim_import);
|
Add primop ‘scopedImport’
‘scopedImport’ works like ‘import’, except that it takes a set of
attributes to be added to the lexical scope of the expression,
essentially extending or overriding the builtin variables. For
instance, the expression
scopedImport { x = 1; } ./foo.nix
where foo.nix contains ‘x’, will evaluate to 1.
This has a few applications:
* It allows getting rid of function argument specifications in package
expressions. For instance, a package expression like:
{ stdenv, fetchurl, libfoo }:
stdenv.mkDerivation { ... buildInputs = [ libfoo ]; }
can now we written as just
stdenv.mkDerivation { ... buildInputs = [ libfoo ]; }
and imported in all-packages.nix as:
bar = scopedImport pkgs ./bar.nix;
So whereas we once had dependencies listed in three places
(buildInputs, the function, and the call site), they now only need
to appear in one place.
* It allows overriding builtin functions. For instance, to trace all
calls to ‘map’:
let
overrides = {
map = f: xs: builtins.trace "map called!" (map f xs);
# Ensure that our override gets propagated by calls to
# import/scopedImport.
import = fn: scopedImport overrides fn;
scopedImport = attrs: fn: scopedImport (overrides // attrs) fn;
# Also update ‘builtins’.
builtins = builtins // overrides;
};
in scopedImport overrides ./bla.nix
* Similarly, it allows extending the set of builtin functions. For
instance, during Nixpkgs/NixOS evaluation, the Nixpkgs library
functions could be added to the default scope.
There is a downside: calls to scopedImport are not memoized, unlike
import. So importing a file multiple times leads to multiple parsings
/ evaluations. It would be possible to construct the AST only once,
but that would require careful handling of variables/environments.
2014-05-26 11:46:11 +00:00
|
|
|
|
addPrimOp("scopedImport", 2, prim_scopedImport);
|
2013-10-24 00:49:13 +00:00
|
|
|
|
addPrimOp("__typeOf", 1, prim_typeOf);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("isNull", 1, prim_isNull);
|
2007-05-16 16:17:04 +00:00
|
|
|
|
addPrimOp("__isFunction", 1, prim_isFunction);
|
2009-02-05 19:35:40 +00:00
|
|
|
|
addPrimOp("__isString", 1, prim_isString);
|
|
|
|
|
addPrimOp("__isInt", 1, prim_isInt);
|
|
|
|
|
addPrimOp("__isBool", 1, prim_isBool);
|
2008-07-11 13:29:04 +00:00
|
|
|
|
addPrimOp("__genericClosure", 1, prim_genericClosure);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("abort", 1, prim_abort);
|
2007-04-16 15:03:19 +00:00
|
|
|
|
addPrimOp("throw", 1, prim_throw);
|
2009-01-27 14:36:44 +00:00
|
|
|
|
addPrimOp("__addErrorContext", 2, prim_addErrorContext);
|
2009-08-25 16:06:46 +00:00
|
|
|
|
addPrimOp("__tryEval", 1, prim_tryEval);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__getEnv", 1, prim_getEnv);
|
2007-08-18 22:12:00 +00:00
|
|
|
|
addPrimOp("__trace", 2, prim_trace);
|
2004-08-04 11:27:53 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
// Paths
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__toPath", 1, prim_toPath);
|
2008-11-19 23:26:19 +00:00
|
|
|
|
addPrimOp("__storePath", 1, prim_storePath);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__pathExists", 1, prim_pathExists);
|
|
|
|
|
addPrimOp("baseNameOf", 1, prim_baseNameOf);
|
|
|
|
|
addPrimOp("dirOf", 1, prim_dirOf);
|
2007-11-21 13:49:59 +00:00
|
|
|
|
addPrimOp("__readFile", 1, prim_readFile);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
|
|
|
|
// Creating files
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__toXML", 1, prim_toXML);
|
2013-11-18 23:03:11 +00:00
|
|
|
|
addPrimOp("__toJSON", 1, prim_toJSON);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__toFile", 2, prim_toFile);
|
|
|
|
|
addPrimOp("__filterSource", 2, prim_filterSource);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
// Sets
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__attrNames", 1, prim_attrNames);
|
|
|
|
|
addPrimOp("__getAttr", 2, prim_getAttr);
|
2013-11-18 21:22:35 +00:00
|
|
|
|
addPrimOp("__unsafeGetAttrPos", 2, prim_unsafeGetAttrPos);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__hasAttr", 2, prim_hasAttr);
|
2007-08-18 22:12:00 +00:00
|
|
|
|
addPrimOp("__isAttrs", 1, prim_isAttrs);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("removeAttrs", 2, prim_removeAttrs);
|
2007-08-18 22:12:00 +00:00
|
|
|
|
addPrimOp("__listToAttrs", 1, prim_listToAttrs);
|
* Two primops: builtins.intersectAttrs and builtins.functionArgs.
intersectAttrs returns the (right-biased) intersection between two
attribute sets, e.g. every attribute from the second set that also
exists in the first. functionArgs returns the set of attributes
expected by a function.
The main goal of these is to allow the elimination of most of
all-packages.nix. Most package instantiations in all-packages.nix
have this form:
foo = import ./foo.nix {
inherit a b c;
};
With intersectAttrs and functionArgs, this can be written as:
foo = callPackage (import ./foo.nix) { };
where
callPackage = f: args:
f ((builtins.intersectAttrs (builtins.functionArgs f) pkgs) // args);
I.e., foo.nix is called with all attributes from "pkgs" that it
actually needs (e.g., pkgs.a, pkgs.b and pkgs.c). (callPackage can
do any other generic package-level stuff we might want, such as
applying makeOverridable.) Of course, the automatically supplied
arguments can be overriden if needed, e.g.
foo = callPackage (import ./foo.nix) {
c = c_version_2;
};
but for the vast majority of packages, this won't be needed.
The advantages are to reduce the amount of typing needed to add a
dependency (from three sites to two), and to reduce the number of
trivial commits to all-packages.nix. For the former, there have
been two previous attempts:
- Use "args: with args;" in the package's function definition.
This however obscures the actual expected arguments of a
function, which is very bad.
- Use "{ arg1, arg2, ... }:" in the package's function definition
(i.e. use the ellipis "..." to allow arbitrary additional
arguments), and then call the function with all of "pkgs" as an
argument. But this inhibits error detection if you call it with
an misspelled (or obsolete) argument.
2009-09-15 13:01:46 +00:00
|
|
|
|
addPrimOp("__intersectAttrs", 2, prim_intersectAttrs);
|
|
|
|
|
addPrimOp("__functionArgs", 1, prim_functionArgs);
|
2007-01-29 15:11:32 +00:00
|
|
|
|
|
|
|
|
|
// Lists
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__isList", 1, prim_isList);
|
2012-08-13 17:46:42 +00:00
|
|
|
|
addPrimOp("__elemAt", 2, prim_elemAt);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__head", 1, prim_head);
|
|
|
|
|
addPrimOp("__tail", 1, prim_tail);
|
|
|
|
|
addPrimOp("map", 2, prim_map);
|
2012-08-13 04:28:08 +00:00
|
|
|
|
addPrimOp("__filter", 2, prim_filter);
|
2012-08-13 05:05:35 +00:00
|
|
|
|
addPrimOp("__elem", 2, prim_elem);
|
2012-08-13 05:53:10 +00:00
|
|
|
|
addPrimOp("__concatLists", 1, prim_concatLists);
|
2008-07-11 13:29:04 +00:00
|
|
|
|
addPrimOp("__length", 1, prim_length);
|
2012-08-13 05:05:35 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
// Integer arithmetic
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__add", 2, prim_add);
|
|
|
|
|
addPrimOp("__sub", 2, prim_sub);
|
2008-07-11 13:29:04 +00:00
|
|
|
|
addPrimOp("__mul", 2, prim_mul);
|
|
|
|
|
addPrimOp("__div", 2, prim_div);
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("__lessThan", 2, prim_lessThan);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
|
2007-01-29 15:11:32 +00:00
|
|
|
|
// String manipulation
|
2007-01-29 15:15:37 +00:00
|
|
|
|
addPrimOp("toString", 1, prim_toString);
|
2007-01-29 14:23:09 +00:00
|
|
|
|
addPrimOp("__substring", 3, prim_substring);
|
|
|
|
|
addPrimOp("__stringLength", 1, prim_stringLength);
|
2008-01-04 14:22:49 +00:00
|
|
|
|
addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
|
2009-10-21 15:05:30 +00:00
|
|
|
|
addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
addPrimOp("__hashString", 2, prim_hashString);
|
2013-02-06 23:03:46 +00:00
|
|
|
|
|
2008-07-01 10:10:32 +00:00
|
|
|
|
// Versions
|
|
|
|
|
addPrimOp("__parseDrvName", 1, prim_parseDrvName);
|
2010-10-24 19:52:33 +00:00
|
|
|
|
addPrimOp("__compareVersions", 2, prim_compareVersions);
|
|
|
|
|
|
2011-09-14 05:59:17 +00:00
|
|
|
|
// Derivations
|
|
|
|
|
addPrimOp("derivationStrict", 1, prim_derivationStrict);
|
|
|
|
|
|
|
|
|
|
/* Add a wrapper around the derivation primop that computes the
|
|
|
|
|
`drvPath' and `outPath' attributes lazily. */
|
2013-10-17 09:47:38 +00:00
|
|
|
|
string path = findFile("nix/derivation.nix");
|
2013-11-23 17:04:27 +00:00
|
|
|
|
assert(!path.empty());
|
2013-10-17 09:47:38 +00:00
|
|
|
|
sDerivationNix = symbols.create(path);
|
|
|
|
|
evalFile(path, v);
|
2011-09-14 05:59:17 +00:00
|
|
|
|
addConstant("derivation", v);
|
|
|
|
|
|
2014-05-26 12:55:47 +00:00
|
|
|
|
/* Add a value containing the current Nix expression search path. */
|
|
|
|
|
mkList(v, searchPath.size());
|
|
|
|
|
int n = 0;
|
|
|
|
|
for (auto & i : searchPath) {
|
|
|
|
|
Value * v2 = v.list.elems[n++] = allocValue();
|
|
|
|
|
mkAttrs(*v2, 2);
|
|
|
|
|
mkString(*allocAttr(*v2, symbols.create("path")), i.second);
|
|
|
|
|
mkString(*allocAttr(*v2, symbols.create("prefix")), i.first);
|
|
|
|
|
}
|
|
|
|
|
addConstant("nixPath", v);
|
|
|
|
|
|
2013-10-24 14:41:04 +00:00
|
|
|
|
/* Now that we've added all primops, sort the `builtins' set,
|
|
|
|
|
because attribute lookups expect it to be sorted. */
|
2010-10-24 19:52:33 +00:00
|
|
|
|
baseEnv.values[0]->attrs->sort();
|
2004-08-04 10:59:20 +00:00
|
|
|
|
}
|
2006-09-04 21:06:23 +00:00
|
|
|
|
|
2007-01-29 14:23:09 +00:00
|
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
|
}
|