2006-09-04 21:06:23 +00:00
# include "references.hh"
# include "pathlocks.hh"
# include "globals.hh"
2006-11-30 17:43:04 +00:00
# include "local-store.hh"
2006-09-04 21:06:23 +00:00
# include "util.hh"
2009-10-22 08:28:33 +00:00
# include "archive.hh"
2013-08-07 11:51:55 +00:00
# include "affinity.hh"
2015-07-20 02:30:16 +00:00
# include "builtins.hh"
2019-10-21 15:17:15 +00:00
# include "builtins/buildenv.hh"
2020-04-06 21:57:28 +00:00
# include "filetransfer.hh"
2016-04-29 11:57:08 +00:00
# include "finally.hh"
2016-05-04 13:46:25 +00:00
# include "compression.hh"
exportReferencesGraph: Export more complete info in JSON format
This writes info about every path in the closure in the same format as
‘nix path-info --json’. Thus it also includes NAR hashes and sizes.
Example:
[
{
"path": "/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"narHash": "sha256:0ckdc4z20kkmpqdilx0wl6cricxv90lh85xpv2qljppcmz6vzcxl",
"narSize": 197648,
"references": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/27binbdy296qvjycdgr1535v8872vz3z-glibc-2.24"
],
"closureSize": 20939776
},
{
"path": "/nix/store/27binbdy296qvjycdgr1535v8872vz3z-glibc-2.24",
"narHash": "sha256:1nfn3m3p98y1c0kd0brp80dn9n5mycwgrk183j17rajya0h7gax3",
"narSize": 20742128,
"references": [
"/nix/store/27binbdy296qvjycdgr1535v8872vz3z-glibc-2.24"
],
"closureSize": 20742128
}
]
Fixes #1134.
2017-01-26 19:36:20 +00:00
# include "json.hh"
2017-08-14 18:14:55 +00:00
# include "nar-info.hh"
2018-09-28 12:31:16 +00:00
# include "parsed-derivations.hh"
2019-04-01 19:09:49 +00:00
# include "machines.hh"
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
# include "daemon.hh"
2019-12-05 18:11:09 +00:00
# include "worker-protocol.hh"
2020-08-07 19:09:26 +00:00
# include "topo-sort.hh"
2020-09-21 16:40:11 +00:00
# include "callback.hh"
2006-09-04 21:06:23 +00:00
2015-08-04 09:12:31 +00:00
# include <algorithm>
# include <iostream>
2003-07-20 19:29:38 +00:00
# include <map>
2005-10-17 15:33:24 +00:00
# include <sstream>
2016-04-29 11:57:08 +00:00
# include <thread>
# include <future>
2016-12-06 20:58:04 +00:00
# include <chrono>
2017-10-25 11:01:50 +00:00
# include <regex>
2018-10-22 19:49:56 +00:00
# include <queue>
2019-11-06 15:53:02 +00:00
# include <climits>
2003-07-20 19:29:38 +00:00
2006-05-30 11:37:21 +00:00
# include <sys/time.h>
2009-09-23 12:57:15 +00:00
# include <sys/wait.h>
2004-05-11 18:05:44 +00:00
# include <sys/types.h>
# include <sys/stat.h>
2013-08-22 15:57:39 +00:00
# include <sys/utsname.h>
2016-04-14 11:39:14 +00:00
# include <sys/resource.h>
2018-06-12 11:05:14 +00:00
# include <sys/socket.h>
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
# include <sys/un.h>
2004-05-11 18:05:44 +00:00
# include <fcntl.h>
2018-06-12 11:05:14 +00:00
# include <netdb.h>
2004-05-11 18:05:44 +00:00
# include <unistd.h>
2004-07-01 11:11:16 +00:00
# include <errno.h>
2010-06-24 17:51:04 +00:00
# include <cstring>
2019-05-17 20:29:15 +00:00
# include <termios.h>
2020-04-21 00:32:50 +00:00
# include <poll.h>
2004-05-11 18:05:44 +00:00
2005-10-17 15:33:24 +00:00
# include <pwd.h>
# include <grp.h>
2015-12-03 15:30:19 +00:00
/* Includes required for chroot support. */
# if __linux__
2012-06-23 04:28:35 +00:00
# include <sys/socket.h>
# include <sys/ioctl.h>
# include <net/if.h>
# include <netinet/ip.h>
2009-01-12 16:30:32 +00:00
# include <sys/personality.h>
2015-10-21 12:45:56 +00:00
# include <sys/mman.h>
2015-12-03 15:30:19 +00:00
# include <sched.h>
# include <sys/param.h>
# include <sys/mount.h>
# include <sys/syscall.h>
2018-02-18 07:35:01 +00:00
# if HAVE_SECCOMP
2017-05-29 09:34:24 +00:00
# include <seccomp.h>
2018-02-18 07:35:01 +00:00
# endif
2015-12-03 15:30:19 +00:00
# define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old))
2009-01-12 16:30:32 +00:00
# endif
2014-02-17 13:15:56 +00:00
# if HAVE_STATVFS
# include <sys/statvfs.h>
# endif
2017-10-25 11:01:50 +00:00
# include <nlohmann/json.hpp>
2009-01-12 16:30:32 +00:00
2006-09-04 21:06:23 +00:00
namespace nix {
using std : : map ;
2012-07-27 13:59:18 +00:00
2003-07-20 19:29:38 +00:00
2004-05-11 18:05:44 +00:00
static string pathNullDevice = " /dev/null " ;
2004-06-18 18:09:32 +00:00
/* Forward definition. */
class Worker ;
2014-01-21 17:29:55 +00:00
struct HookInstance ;
2004-06-18 18:09:32 +00:00
/* A pointer to a goal. */
2020-06-17 17:26:37 +00:00
struct Goal ;
2015-07-20 01:15:45 +00:00
class DerivationGoal ;
2014-03-29 23:49:23 +00:00
typedef std : : shared_ptr < Goal > GoalPtr ;
typedef std : : weak_ptr < Goal > WeakGoalPtr ;
2004-06-18 18:09:32 +00:00
2014-11-24 15:48:04 +00:00
struct CompareGoalPtrs {
2017-12-11 18:05:14 +00:00
bool operator ( ) ( const GoalPtr & a , const GoalPtr & b ) const ;
2014-11-24 15:48:04 +00:00
} ;
2004-06-25 15:36:09 +00:00
/* Set of goals. */
2014-11-24 15:48:04 +00:00
typedef set < GoalPtr , CompareGoalPtrs > Goals ;
2014-03-29 23:49:23 +00:00
typedef list < WeakGoalPtr > WeakGoals ;
2004-06-18 18:09:32 +00:00
/* A map of paths to goals (and the other way around). */
2019-12-05 18:11:09 +00:00
typedef std : : map < StorePath , WeakGoalPtr > WeakGoalMap ;
2004-06-18 18:09:32 +00:00
2020-06-15 17:25:35 +00:00
struct Goal : public std : : enable_shared_from_this < Goal >
2004-06-18 18:09:32 +00:00
{
2013-01-02 11:38:28 +00:00
typedef enum { ecBusy , ecSuccess , ecFailed , ecNoSubstituters , ecIncompleteClosure } ExitCode ;
2012-07-27 13:59:18 +00:00
2004-06-18 18:09:32 +00:00
/* Backlink to the worker. */
Worker & worker ;
2004-06-25 15:36:09 +00:00
/* Goals that this goal is waiting for. */
Goals waitees ;
/* Goals waiting for this one to finish. Must use weak pointers
here to prevent cycles . */
WeakGoals waiters ;
2004-06-18 18:09:32 +00:00
2004-08-30 11:51:36 +00:00
/* Number of goals we are/were waiting for that have failed. */
2004-06-25 10:21:44 +00:00
unsigned int nrFailed ;
2012-07-08 22:39:24 +00:00
/* Number of substitution goals we are/were waiting for that
failed because there are no substituters . */
unsigned int nrNoSubstituters ;
2013-01-02 11:38:28 +00:00
/* Number of substitution goals we are/were waiting for that
failed because othey had unsubstitutable references . */
unsigned int nrIncompleteClosure ;
2005-02-18 09:50:20 +00:00
/* Name of this goal for debugging purposes. */
string name ;
2005-02-23 11:19:27 +00:00
/* Whether the goal is finished. */
ExitCode exitCode ;
2020-06-15 17:25:35 +00:00
/* Exception containing an error message, if any. */
std : : optional < Error > ex ;
2005-01-19 11:16:11 +00:00
Goal ( Worker & worker ) : worker ( worker )
2004-06-18 18:09:32 +00:00
{
2013-01-02 11:38:28 +00:00
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0 ;
2005-02-23 11:19:27 +00:00
exitCode = ecBusy ;
2004-06-18 18:09:32 +00:00
}
virtual ~ Goal ( )
{
2005-02-18 09:50:20 +00:00
trace ( " goal destroyed " ) ;
2004-06-18 18:09:32 +00:00
}
virtual void work ( ) = 0 ;
2004-06-25 15:36:09 +00:00
void addWaitee ( GoalPtr waitee ) ;
2004-06-18 18:09:32 +00:00
2006-12-08 17:26:21 +00:00
virtual void waiteeDone ( GoalPtr waitee , ExitCode result ) ;
2004-06-25 10:21:44 +00:00
2005-10-17 15:33:24 +00:00
virtual void handleChildOutput ( int fd , const string & data )
{
abort ( ) ;
}
virtual void handleEOF ( int fd )
2004-06-29 09:41:50 +00:00
{
abort ( ) ;
}
2018-03-14 18:01:22 +00:00
void trace ( const FormatOrString & fs ) ;
2005-02-18 09:50:20 +00:00
string getName ( )
{
return name ;
}
2012-07-27 13:59:18 +00:00
2015-07-20 01:15:45 +00:00
/* Callback in case of a timeout. It should wake up its waiters,
get rid of any running child processes that are being monitored
by the worker ( important ! ) , etc . */
2020-06-15 17:25:35 +00:00
virtual void timedOut ( Error & & ex ) = 0 ;
2006-12-08 17:26:21 +00:00
2014-11-24 15:48:04 +00:00
virtual string key ( ) = 0 ;
2020-06-15 17:25:35 +00:00
void amDone ( ExitCode result , std : : optional < Error > ex = { } ) ;
2004-06-18 18:09:32 +00:00
} ;
2017-12-11 18:05:14 +00:00
bool CompareGoalPtrs : : operator ( ) ( const GoalPtr & a , const GoalPtr & b ) const {
2014-11-24 15:48:04 +00:00
string s1 = a - > key ( ) ;
string s2 = b - > key ( ) ;
return s1 < s2 ;
}
2016-12-06 20:58:04 +00:00
typedef std : : chrono : : time_point < std : : chrono : : steady_clock > steady_time_point ;
2004-06-18 18:09:32 +00:00
/* A mapping used to remember for each child process to what goal it
2005-10-17 15:33:24 +00:00
belongs , and file descriptors for receiving log data and output
path creation commands . */
2004-06-19 21:45:04 +00:00
struct Child
{
2004-06-25 15:36:09 +00:00
WeakGoalPtr goal ;
2016-08-30 13:45:39 +00:00
Goal * goal2 ; // ugly hackery
2005-10-17 15:33:24 +00:00
set < int > fds ;
2013-04-23 16:04:59 +00:00
bool respectTimeouts ;
2004-06-19 21:45:04 +00:00
bool inBuildSlot ;
2016-12-06 20:58:04 +00:00
steady_time_point lastOutput ; /* time we last got output on stdout/stderr */
steady_time_point timeStarted ;
2004-06-19 21:45:04 +00:00
} ;
2004-06-18 18:09:32 +00:00
/* The worker class. */
class Worker
{
private :
2004-06-25 15:36:09 +00:00
/* Note: the worker should only have strong pointers to the
top - level goals . */
/* The top-level goals of the worker. */
Goals topGoals ;
2004-06-18 18:09:32 +00:00
/* Goals that are ready to do some work. */
2004-06-25 15:36:09 +00:00
WeakGoals awake ;
2004-06-18 18:09:32 +00:00
/* Goals waiting for a build slot. */
2004-06-25 15:36:09 +00:00
WeakGoals wantingToBuild ;
2004-06-18 18:09:32 +00:00
/* Child processes currently running. */
2016-04-29 11:57:08 +00:00
std : : list < Child > children ;
2004-06-18 18:09:32 +00:00
2009-03-31 21:14:07 +00:00
/* Number of build slots occupied. This includes local builds and
substitutions but not remote builds via the build hook . */
unsigned int nrLocalBuilds ;
2004-06-19 21:45:04 +00:00
2005-01-19 11:16:11 +00:00
/* Maps used to prevent multiple instantiations of a goal for the
2005-01-20 16:01:07 +00:00
same derivation / path . */
2005-01-19 11:16:11 +00:00
WeakGoalMap derivationGoals ;
2004-06-25 15:36:09 +00:00
WeakGoalMap substitutionGoals ;
2004-06-18 18:09:32 +00:00
2007-08-28 11:36:17 +00:00
/* Goals waiting for busy paths to be unlocked. */
WeakGoals waitingForAnyGoal ;
2012-07-27 13:59:18 +00:00
2009-03-23 01:05:54 +00:00
/* Goals sleeping for a few seconds (polling a lock). */
WeakGoals waitingForAWhile ;
/* Last time the goals in `waitingForAWhile' where woken up. */
2016-12-06 20:58:04 +00:00
steady_time_point lastWokenUp ;
2011-06-30 15:19:13 +00:00
2016-04-08 16:07:13 +00:00
/* Cache for pathContentsGood(). */
2019-12-05 18:11:09 +00:00
std : : map < StorePath , bool > pathContentsGoodCache ;
2016-04-08 16:07:13 +00:00
2004-06-18 18:09:32 +00:00
public :
2017-08-14 18:14:55 +00:00
const Activity act ;
2017-08-15 13:31:59 +00:00
const Activity actDerivations ;
2017-08-14 20:12:36 +00:00
const Activity actSubstitutions ;
2017-08-14 18:14:55 +00:00
2010-12-13 16:53:23 +00:00
/* Set if at least one derivation had a BuildError (i.e. permanent
failure ) . */
bool permanentFailure ;
2014-08-17 17:09:03 +00:00
/* Set if at least one derivation had a timeout. */
bool timedOut ;
2019-07-01 22:12:12 +00:00
/* Set if at least one derivation fails with a hash mismatch. */
bool hashMismatch ;
/* Set if at least one derivation is not deterministic in check mode. */
bool checkMismatch ;
2008-06-09 13:52:45 +00:00
LocalStore & store ;
2017-01-19 14:15:09 +00:00
std : : unique_ptr < HookInstance > hook ;
2012-07-27 13:59:18 +00:00
2017-08-15 13:31:59 +00:00
uint64_t expectedBuilds = 0 ;
uint64_t doneBuilds = 0 ;
uint64_t failedBuilds = 0 ;
uint64_t runningBuilds = 0 ;
2017-08-14 20:12:36 +00:00
uint64_t expectedSubstitutions = 0 ;
uint64_t doneSubstitutions = 0 ;
2017-08-15 13:31:59 +00:00
uint64_t failedSubstitutions = 0 ;
2017-08-14 20:42:17 +00:00
uint64_t runningSubstitutions = 0 ;
2017-08-14 18:14:55 +00:00
uint64_t expectedDownloadSize = 0 ;
uint64_t doneDownloadSize = 0 ;
uint64_t expectedNarSize = 0 ;
uint64_t doneNarSize = 0 ;
2017-10-24 09:00:16 +00:00
/* Whether to ask the build hook if it can build a derivation. If
it answers with " decline-permanently " , we don ' t try again . */
bool tryBuildHook = true ;
2008-06-09 13:52:45 +00:00
Worker ( LocalStore & store ) ;
2004-06-18 18:09:32 +00:00
~ Worker ( ) ;
2004-06-25 15:36:09 +00:00
/* Make a goal (with caching). */
2020-08-22 20:44:47 +00:00
/* derivation goal */
private :
std : : shared_ptr < DerivationGoal > makeDerivationGoalCommon (
const StorePath & drvPath , const StringSet & wantedOutputs ,
std : : function < std : : shared_ptr < DerivationGoal > ( ) > mkDrvGoal ) ;
public :
std : : shared_ptr < DerivationGoal > makeDerivationGoal (
const StorePath & drvPath ,
const StringSet & wantedOutputs , BuildMode buildMode = bmNormal ) ;
std : : shared_ptr < DerivationGoal > makeBasicDerivationGoal (
const StorePath & drvPath , const BasicDerivation & drv ,
const StringSet & wantedOutputs , BuildMode buildMode = bmNormal ) ;
/* substitution goal */
2020-06-22 17:08:11 +00:00
GoalPtr makeSubstitutionGoal ( const StorePath & storePath , RepairFlag repair = NoRepair , std : : optional < ContentAddress > ca = std : : nullopt ) ;
2004-06-18 18:09:32 +00:00
2004-06-25 15:36:09 +00:00
/* Remove a dead goal. */
2004-06-18 18:09:32 +00:00
void removeGoal ( GoalPtr goal ) ;
/* Wake up a goal (i.e., there is something for it to do). */
void wakeUp ( GoalPtr goal ) ;
2009-03-31 21:14:07 +00:00
/* Return the number of local build and substitution processes
currently running ( but not remote builds via the build
hook ) . */
unsigned int getNrLocalBuilds ( ) ;
2004-06-18 18:09:32 +00:00
2006-12-08 17:26:21 +00:00
/* Registers a running child process. `inBuildSlot' means that
the process counts towards the jobs limit . */
2016-04-29 11:57:08 +00:00
void childStarted ( GoalPtr goal , const set < int > & fds ,
bool inBuildSlot , bool respectTimeouts ) ;
2006-12-08 17:26:21 +00:00
/* Unregisters a running child process. `wakeSleepers' should be
false if there is no sense in waking up goals that are sleeping
because they can ' t run yet ( e . g . , there is no free build slot ,
or the hook would still say ` postpone ' ) . */
2016-08-30 13:45:39 +00:00
void childTerminated ( Goal * goal , bool wakeSleepers = true ) ;
2004-06-18 18:09:32 +00:00
2006-12-08 17:26:21 +00:00
/* Put `goal' to sleep until a build slot becomes available (which
might be right away ) . */
void waitForBuildSlot ( GoalPtr goal ) ;
2007-08-28 11:36:17 +00:00
/* Wait for any goal to finish. Pretty indiscriminate way to
wait for some resource that some other goal is holding . */
void waitForAnyGoal ( GoalPtr goal ) ;
2012-07-27 13:59:18 +00:00
2009-03-23 01:05:54 +00:00
/* Wait for a few seconds and then retry this goal. Used when
waiting for a lock held by another process . This kind of
polling is inefficient , but POSIX doesn ' t really provide a way
to wait for multiple locks in the main select ( ) loop . */
void waitForAWhile ( GoalPtr goal ) ;
2012-07-27 13:59:18 +00:00
2005-02-23 11:19:27 +00:00
/* Loop until the specified top-level goals have finished. */
void run ( const Goals & topGoals ) ;
2004-06-18 18:09:32 +00:00
/* Wait for input to become available. */
void waitForInput ( ) ;
2010-12-13 16:53:23 +00:00
unsigned int exitStatus ( ) ;
2016-04-08 16:07:13 +00:00
/* Check whether the given valid path exists and has the right
contents . */
2019-12-05 18:11:09 +00:00
bool pathContentsGood ( const StorePath & path ) ;
2016-04-08 16:07:13 +00:00
2020-06-16 20:20:18 +00:00
void markContentsGood ( const StorePath & path ) ;
2017-08-14 18:14:55 +00:00
void updateProgress ( )
{
2017-08-15 13:31:59 +00:00
actDerivations . progress ( doneBuilds , expectedBuilds + doneBuilds , runningBuilds , failedBuilds ) ;
actSubstitutions . progress ( doneSubstitutions , expectedSubstitutions + doneSubstitutions , runningSubstitutions , failedSubstitutions ) ;
2020-04-06 21:43:43 +00:00
act . setExpected ( actFileTransfer , expectedDownloadSize + doneDownloadSize ) ;
2017-08-16 14:38:23 +00:00
act . setExpected ( actCopyPath , expectedNarSize + doneNarSize ) ;
2017-08-14 18:14:55 +00:00
}
2004-06-18 18:09:32 +00:00
} ;
//////////////////////////////////////////////////////////////////////
2014-03-29 23:49:23 +00:00
void addToWeakGoals ( WeakGoals & goals , GoalPtr p )
{
// FIXME: necessary?
2014-11-24 15:48:04 +00:00
// FIXME: O(n)
2015-07-17 17:24:28 +00:00
for ( auto & i : goals )
if ( i . lock ( ) = = p ) return ;
2014-03-29 23:49:23 +00:00
goals . push_back ( p ) ;
}
2004-06-25 15:36:09 +00:00
void Goal : : addWaitee ( GoalPtr waitee )
2004-06-18 18:09:32 +00:00
{
2004-06-25 15:36:09 +00:00
waitees . insert ( waitee ) ;
2014-03-29 23:49:23 +00:00
addToWeakGoals ( waitee - > waiters , shared_from_this ( ) ) ;
2004-06-18 18:09:32 +00:00
}
2006-12-08 17:26:21 +00:00
void Goal : : waiteeDone ( GoalPtr waitee , ExitCode result )
2004-06-18 18:09:32 +00:00
{
2004-06-25 15:36:09 +00:00
assert ( waitees . find ( waitee ) ! = waitees . end ( ) ) ;
waitees . erase ( waitee ) ;
2005-02-18 09:50:20 +00:00
2020-06-15 17:25:35 +00:00
trace ( fmt ( " waitee '%s' done; %d left " , waitee - > name , waitees . size ( ) ) ) ;
2012-07-27 13:59:18 +00:00
2013-01-02 11:38:28 +00:00
if ( result = = ecFailed | | result = = ecNoSubstituters | | result = = ecIncompleteClosure ) + + nrFailed ;
2012-07-08 22:39:24 +00:00
if ( result = = ecNoSubstituters ) + + nrNoSubstituters ;
2012-07-27 13:59:18 +00:00
2013-01-02 11:38:28 +00:00
if ( result = = ecIncompleteClosure ) + + nrIncompleteClosure ;
2012-07-30 23:55:41 +00:00
if ( waitees . empty ( ) | | ( result = = ecFailed & & ! settings . keepGoing ) ) {
2004-06-28 10:42:57 +00:00
/* If we failed and keepGoing is not set, we remove all
remaining waitees . */
2015-07-17 17:24:28 +00:00
for ( auto & goal : waitees ) {
2004-06-28 10:42:57 +00:00
WeakGoals waiters2 ;
2015-07-17 17:24:28 +00:00
for ( auto & j : goal - > waiters )
if ( j . lock ( ) ! = shared_from_this ( ) ) waiters2 . push_back ( j ) ;
2004-06-28 10:42:57 +00:00
goal - > waiters = waiters2 ;
}
waitees . clear ( ) ;
2004-07-01 16:24:35 +00:00
2004-06-25 15:36:09 +00:00
worker . wakeUp ( shared_from_this ( ) ) ;
2004-06-28 10:42:57 +00:00
}
2004-06-18 18:09:32 +00:00
}
2020-06-15 17:25:35 +00:00
void Goal : : amDone ( ExitCode result , std : : optional < Error > ex )
2004-06-18 18:09:32 +00:00
{
2004-06-25 15:36:09 +00:00
trace ( " done " ) ;
2005-02-23 11:19:27 +00:00
assert ( exitCode = = ecBusy ) ;
2013-01-02 11:38:28 +00:00
assert ( result = = ecSuccess | | result = = ecFailed | | result = = ecNoSubstituters | | result = = ecIncompleteClosure ) ;
2006-12-08 17:26:21 +00:00
exitCode = result ;
2020-06-15 17:25:35 +00:00
if ( ex ) {
if ( ! waiters . empty ( ) )
logError ( ex - > info ( ) ) ;
else
this - > ex = std : : move ( * ex ) ;
}
2015-07-17 17:24:28 +00:00
for ( auto & i : waiters ) {
GoalPtr goal = i . lock ( ) ;
2006-12-08 17:26:21 +00:00
if ( goal ) goal - > waiteeDone ( shared_from_this ( ) , result ) ;
2004-06-25 15:36:09 +00:00
}
waiters . clear ( ) ;
2004-06-18 18:09:32 +00:00
worker . removeGoal ( shared_from_this ( ) ) ;
}
2018-03-14 18:01:22 +00:00
void Goal : : trace ( const FormatOrString & fs )
2004-06-25 15:36:09 +00:00
{
2018-03-14 18:01:22 +00:00
debug ( " %1%: %2% " , name , fs . s ) ;
2004-06-25 15:36:09 +00:00
}
2004-06-18 18:09:32 +00:00
2004-06-22 10:21:44 +00:00
//////////////////////////////////////////////////////////////////////
/* Common initialisation performed in child processes. */
2012-11-15 18:35:18 +00:00
static void commonChildInit ( Pipe & logPipe )
2004-06-22 10:21:44 +00:00
{
2017-02-01 12:00:21 +00:00
restoreSignals ( ) ;
2006-02-03 14:20:59 +00:00
/* Put the child in a separate session (and thus a separate
process group ) so that it has no controlling terminal ( meaning
that e . g . ssh cannot open / dev / tty ) and it doesn ' t receive
terminal signals . */
if ( setsid ( ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " creating a new session " ) ;
2012-07-27 13:59:18 +00:00
2004-06-22 10:21:44 +00:00
/* Dup the write side of the logger pipe into stderr. */
2016-07-11 19:44:44 +00:00
if ( dup2 ( logPipe . writeSide . get ( ) , STDERR_FILENO ) = = - 1 )
2004-06-22 10:21:44 +00:00
throw SysError ( " cannot pipe standard error into log file " ) ;
2012-07-27 13:59:18 +00:00
2006-02-03 14:20:59 +00:00
/* Dup stderr to stdout. */
2004-06-22 10:21:44 +00:00
if ( dup2 ( STDERR_FILENO , STDOUT_FILENO ) = = - 1 )
throw SysError ( " cannot dup stderr into stdout " ) ;
/* Reroute stdin to /dev/null. */
int fdDevNull = open ( pathNullDevice . c_str ( ) , O_RDWR ) ;
if ( fdDevNull = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot open '%1%' " , pathNullDevice ) ;
2004-06-22 10:21:44 +00:00
if ( dup2 ( fdDevNull , STDIN_FILENO ) = = - 1 )
throw SysError ( " cannot dup null device into stdin " ) ;
2012-03-05 19:28:09 +00:00
close ( fdDevNull ) ;
2004-06-22 10:21:44 +00:00
}
2019-12-05 18:11:09 +00:00
void handleDiffHook (
uid_t uid , uid_t gid ,
const Path & tryA , const Path & tryB ,
const Path & drvPath , const Path & tmpDir )
2019-05-10 20:39:31 +00:00
{
auto diffHook = settings . diffHook ;
if ( diffHook ! = " " & & settings . runDiffHook ) {
2019-05-11 20:35:53 +00:00
try {
RunOptions diffHookOptions ( diffHook , { tryA , tryB , drvPath , tmpDir } ) ;
diffHookOptions . searchPath = true ;
diffHookOptions . uid = uid ;
diffHookOptions . gid = gid ;
diffHookOptions . chdir = " / " ;
auto diffRes = runProgram ( diffHookOptions ) ;
if ( ! statusOk ( diffRes . first ) )
2020-05-04 22:19:57 +00:00
throw ExecError ( diffRes . first ,
" diff-hook program '%1%' %2% " ,
diffHook ,
statusToString ( diffRes . first ) ) ;
2019-05-11 20:35:53 +00:00
if ( diffRes . second ! = " " )
printError ( chomp ( diffRes . second ) ) ;
} catch ( Error & error ) {
2020-05-04 22:19:57 +00:00
ErrorInfo ei = error . info ( ) ;
2020-05-13 21:56:39 +00:00
ei . hint = hintfmt ( " diff hook execution failed: %s " ,
( error . info ( ) . hint . has_value ( ) ? error . info ( ) . hint - > str ( ) : " " ) ) ;
2020-05-04 22:19:57 +00:00
logError ( ei ) ;
2019-05-11 20:35:53 +00:00
}
2019-05-10 20:39:31 +00:00
}
}
2004-06-22 10:21:44 +00:00
2005-10-20 16:58:34 +00:00
//////////////////////////////////////////////////////////////////////
class UserLock
{
private :
Path fnUserLock ;
AutoCloseFD fdUserLock ;
2020-05-18 13:50:29 +00:00
bool isEnabled = false ;
2006-12-07 11:27:32 +00:00
string user ;
2020-05-18 13:50:29 +00:00
uid_t uid = 0 ;
gid_t gid = 0 ;
2015-07-01 12:56:34 +00:00
std : : vector < gid_t > supplementaryGIDs ;
2012-07-27 13:59:18 +00:00
2005-10-20 16:58:34 +00:00
public :
2017-01-25 11:45:38 +00:00
UserLock ( ) ;
2005-10-20 16:58:34 +00:00
2006-12-07 16:33:31 +00:00
void kill ( ) ;
2006-12-07 11:27:32 +00:00
string getUser ( ) { return user ; }
2016-06-03 13:45:11 +00:00
uid_t getUID ( ) { assert ( uid ) ; return uid ; }
uid_t getGID ( ) { assert ( gid ) ; return gid ; }
2015-07-01 12:56:34 +00:00
std : : vector < gid_t > getSupplementaryGIDs ( ) { return supplementaryGIDs ; }
2006-12-07 00:16:07 +00:00
2020-05-08 09:22:39 +00:00
bool findFreeUser ( ) ;
2020-05-14 14:00:54 +00:00
bool enabled ( ) { return isEnabled ; }
2012-07-27 13:59:18 +00:00
2005-10-20 16:58:34 +00:00
} ;
2017-01-25 11:45:38 +00:00
UserLock : : UserLock ( )
2005-10-20 16:58:34 +00:00
{
2012-07-30 23:55:41 +00:00
assert ( settings . buildUsersGroup ! = " " ) ;
2020-05-05 10:04:36 +00:00
createDirs ( settings . nixStateDir + " /userpool " ) ;
}
bool UserLock : : findFreeUser ( ) {
2020-05-14 14:00:54 +00:00
if ( enabled ( ) ) return true ;
2006-12-06 20:00:15 +00:00
/* Get the members of the build-users-group. */
2017-04-13 18:53:23 +00:00
struct group * gr = getgrnam ( settings . buildUsersGroup . get ( ) . c_str ( ) ) ;
2006-12-06 20:00:15 +00:00
if ( ! gr )
2020-04-21 23:07:07 +00:00
throw Error ( " the group '%1%' specified in 'build-users-group' does not exist " ,
settings . buildUsersGroup ) ;
2006-12-06 20:00:15 +00:00
gid = gr - > gr_gid ;
/* Copy the result of getgrnam. */
Strings users ;
for ( char * * p = gr - > gr_mem ; * p ; + + p ) {
2020-05-11 21:52:15 +00:00
debug ( " found build user '%1%' " , * p ) ;
2006-12-06 20:00:15 +00:00
users . push_back ( * p ) ;
}
if ( users . empty ( ) )
2020-04-21 23:07:07 +00:00
throw Error ( " the build users group '%1%' has no members " ,
settings . buildUsersGroup ) ;
2005-10-20 16:58:34 +00:00
2006-12-06 20:00:15 +00:00
/* Find a user account that isn't currently in use for another
build . */
2015-07-17 17:24:28 +00:00
for ( auto & i : users ) {
2020-05-11 21:52:15 +00:00
debug ( " trying user '%1%' " , i ) ;
2005-10-20 16:58:34 +00:00
2015-07-17 17:24:28 +00:00
struct passwd * pw = getpwnam ( i . c_str ( ) ) ;
2005-10-20 16:58:34 +00:00
if ( ! pw )
2020-04-21 23:07:07 +00:00
throw Error ( " the user '%1%' in the group '%2%' does not exist " ,
i , settings . buildUsersGroup ) ;
2009-09-23 17:05:51 +00:00
2012-07-27 13:59:18 +00:00
2012-07-30 23:55:41 +00:00
fnUserLock = ( format ( " %1%/userpool/%2% " ) % settings . nixStateDir % pw - > pw_uid ) . str ( ) ;
2005-10-20 16:58:34 +00:00
2019-12-09 22:57:33 +00:00
AutoCloseFD fd = open ( fnUserLock . c_str ( ) , O_RDWR | O_CREAT | O_CLOEXEC , 0600 ) ;
if ( ! fd )
2020-04-21 23:07:07 +00:00
throw SysError ( " opening user lock '%1%' " , fnUserLock ) ;
2005-10-20 16:58:34 +00:00
2019-12-09 22:57:33 +00:00
if ( lockFile ( fd . get ( ) , ltWrite , false ) ) {
fdUserLock = std : : move ( fd ) ;
user = i ;
uid = pw - > pw_uid ;
2006-12-07 00:19:27 +00:00
2019-12-09 22:57:33 +00:00
/* Sanity check... */
if ( uid = = getuid ( ) | | uid = = geteuid ( ) )
2020-04-21 23:07:07 +00:00
throw Error ( " the Nix user should not be a member of '%1%' " ,
settings . buildUsersGroup ) ;
2012-07-27 13:59:18 +00:00
2015-07-21 12:45:24 +00:00
# if __linux__
2019-12-09 22:57:33 +00:00
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as " kvm " . */
supplementaryGIDs . resize ( 10 ) ;
int ngroups = supplementaryGIDs . size ( ) ;
int err = getgrouplist ( pw - > pw_name , pw - > pw_gid ,
supplementaryGIDs . data ( ) , & ngroups ) ;
if ( err = = - 1 )
2020-04-21 23:07:07 +00:00
throw Error ( " failed to get list of supplementary groups for '%1%' " , pw - > pw_name ) ;
2019-12-09 22:57:33 +00:00
supplementaryGIDs . resize ( ngroups ) ;
2015-07-21 12:45:24 +00:00
# endif
2015-07-01 12:56:34 +00:00
2020-05-14 14:00:54 +00:00
isEnabled = true ;
2020-05-05 10:04:36 +00:00
return true ;
2005-10-20 16:58:34 +00:00
}
}
2020-05-05 10:04:36 +00:00
return false ;
2005-10-20 16:58:34 +00:00
}
2006-12-07 16:33:31 +00:00
void UserLock : : kill ( )
{
2013-11-14 10:57:37 +00:00
killUser ( uid ) ;
2006-12-07 14:14:35 +00:00
}
2004-06-18 18:09:32 +00:00
//////////////////////////////////////////////////////////////////////
2010-08-25 20:44:28 +00:00
struct HookInstance
{
/* Pipes for talking to the build hook. */
Pipe toHook ;
/* Pipe for the hook's standard output/error. */
Pipe fromHook ;
2010-08-30 14:53:03 +00:00
/* Pipe for the builder's standard output/error. */
Pipe builderOut ;
2012-07-27 13:59:18 +00:00
2010-08-25 20:44:28 +00:00
/* The process ID of the hook. */
Pid pid ;
2017-10-23 18:43:04 +00:00
FdSink sink ;
2017-10-24 11:41:52 +00:00
std : : map < ActivityId , Activity > activities ;
2010-08-25 20:44:28 +00:00
HookInstance ( ) ;
~ HookInstance ( ) ;
} ;
HookInstance : : HookInstance ( )
{
2017-07-30 11:27:57 +00:00
debug ( " starting build hook '%s' " , settings . buildHook ) ;
2012-07-27 13:59:18 +00:00
2010-08-25 20:44:28 +00:00
/* Create a pipe to get the output of the child. */
fromHook . create ( ) ;
2012-07-27 13:59:18 +00:00
2010-08-25 20:44:28 +00:00
/* Create the communication pipes. */
toHook . create ( ) ;
2010-08-30 14:53:03 +00:00
/* Create a pipe to get the output of the builder. */
builderOut . create ( ) ;
2010-08-25 20:44:28 +00:00
/* Fork the hook. */
2014-07-10 14:50:51 +00:00
pid = startProcess ( [ & ] ( ) {
2012-07-27 13:59:18 +00:00
2014-07-10 14:50:51 +00:00
commonChildInit ( fromHook ) ;
2010-08-25 20:44:28 +00:00
2014-08-20 15:00:17 +00:00
if ( chdir ( " / " ) = = - 1 ) throw SysError ( " changing into / " ) ;
2010-08-25 20:44:28 +00:00
2014-07-10 14:50:51 +00:00
/* Dup the communication pipes. */
2016-07-11 19:44:44 +00:00
if ( dup2 ( toHook . readSide . get ( ) , STDIN_FILENO ) = = - 1 )
2014-07-10 14:50:51 +00:00
throw SysError ( " dupping to-hook read side " ) ;
2010-08-25 20:44:28 +00:00
2014-07-10 14:50:51 +00:00
/* Use fd 4 for the builder's stdout/stderr. */
2016-07-11 19:44:44 +00:00
if ( dup2 ( builderOut . writeSide . get ( ) , 4 ) = = - 1 )
2014-07-10 14:50:51 +00:00
throw SysError ( " dupping builder's stdout/stderr " ) ;
2012-07-27 13:59:18 +00:00
2018-03-20 14:17:59 +00:00
/* Hack: pass the read side of that fd to allow build-remote
to read SSH error messages . */
if ( dup2 ( builderOut . readSide . get ( ) , 5 ) = = - 1 )
throw SysError ( " dupping builder's stdout/stderr " ) ;
2016-01-07 14:10:14 +00:00
Strings args = {
2019-12-05 18:11:09 +00:00
std : : string ( baseNameOf ( settings . buildHook . get ( ) ) ) ,
2017-05-02 11:44:10 +00:00
std : : to_string ( verbosity ) ,
2016-01-07 14:10:14 +00:00
} ;
2017-05-01 13:46:47 +00:00
execv ( settings . buildHook . get ( ) . c_str ( ) , stringsToCharPtrs ( args ) . data ( ) ) ;
2010-08-25 20:44:28 +00:00
2017-07-30 11:27:57 +00:00
throw SysError ( " executing '%s' " , settings . buildHook ) ;
2014-07-10 14:50:51 +00:00
} ) ;
2011-06-30 15:19:13 +00:00
2010-08-25 20:44:28 +00:00
pid . setSeparatePG ( true ) ;
2016-07-11 19:44:44 +00:00
fromHook . writeSide = - 1 ;
toHook . readSide = - 1 ;
2017-10-23 18:43:04 +00:00
sink = FdSink ( toHook . writeSide . get ( ) ) ;
2018-03-27 16:41:31 +00:00
std : : map < std : : string , Config : : SettingInfo > settings ;
globalConfig . getSettings ( settings ) ;
for ( auto & setting : settings )
sink < < 1 < < setting . first < < setting . second . value ;
2017-10-23 18:43:04 +00:00
sink < < 0 ;
2010-08-25 20:44:28 +00:00
}
HookInstance : : ~ HookInstance ( )
{
try {
2016-07-11 19:44:44 +00:00
toHook . writeSide = - 1 ;
2017-03-16 09:52:28 +00:00
if ( pid ! = - 1 ) pid . kill ( ) ;
2010-08-25 20:44:28 +00:00
} catch ( . . . ) {
ignoreException ( ) ;
}
}
//////////////////////////////////////////////////////////////////////
typedef enum { rpAccept , rpDecline , rpPostpone } HookReply ;
2012-07-08 22:39:24 +00:00
class SubstitutionGoal ;
2020-09-15 15:19:45 +00:00
/* Unless we are repairing, we don't both to test validity and just assume it,
so the choices are ` Absent ` or ` Valid ` . */
enum struct PathStatus {
Corrupt ,
Absent ,
Valid ,
} ;
2020-09-04 15:15:51 +00:00
struct InitialOutputStatus {
2020-08-07 19:09:26 +00:00
StorePath path ;
2020-09-15 15:19:45 +00:00
PathStatus status ;
2020-08-07 19:09:26 +00:00
/* Valid in the store, and additionally non-corrupt if we are repairing */
bool isValid ( ) const {
2020-09-15 15:19:45 +00:00
return status = = PathStatus : : Valid ;
}
/* Merely present, allowed to be corrupt */
bool isPresent ( ) const {
return status = = PathStatus : : Corrupt
| | status = = PathStatus : : Valid ;
2020-08-07 19:09:26 +00:00
}
} ;
2020-09-04 15:15:51 +00:00
struct InitialOutput {
2020-08-07 19:09:26 +00:00
bool wanted ;
2020-09-04 15:15:51 +00:00
std : : optional < InitialOutputStatus > known ;
2020-08-07 19:09:26 +00:00
} ;
2005-01-19 11:16:11 +00:00
class DerivationGoal : public Goal
2004-05-11 18:05:44 +00:00
{
2004-06-18 18:09:32 +00:00
private :
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
/* Whether to use an on-disk .drv file. */
bool useDerivation ;
2005-01-20 16:01:07 +00:00
/* The path of the derivation. */
2019-12-05 18:11:09 +00:00
StorePath drvPath ;
2004-05-11 18:05:44 +00:00
2012-11-26 16:15:09 +00:00
/* The specific outputs that we need to build. Empty means all of
them . */
StringSet wantedOutputs ;
/* Whether additional wanted outputs have been added. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
bool needRestart = false ;
2012-11-26 16:15:09 +00:00
2013-01-02 11:38:28 +00:00
/* Whether to retry substituting the outputs after building the
inputs . */
2018-06-05 14:04:41 +00:00
bool retrySubstitution ;
2013-01-02 11:38:28 +00:00
2005-01-20 16:01:07 +00:00
/* The derivation stored at drvPath. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
std : : unique_ptr < BasicDerivation > drv ;
2012-07-27 13:59:18 +00:00
2018-09-28 12:31:16 +00:00
std : : unique_ptr < ParsedDerivation > parsedDrv ;
2018-09-28 10:43:01 +00:00
2004-05-11 18:05:44 +00:00
/* The remainder is state held during the build. */
2020-08-07 19:09:26 +00:00
/* Locks on (fixed) output paths. */
2004-05-11 18:05:44 +00:00
PathLocks outputLocks ;
2005-01-19 11:16:11 +00:00
/* All input paths (that is, the union of FS closures of the
immediate input paths ) . */
2019-12-05 18:11:09 +00:00
StorePathSet inputPaths ;
2004-05-11 18:05:44 +00:00
2020-09-04 15:15:51 +00:00
std : : map < std : : string , InitialOutput > initialOutputs ;
2013-06-13 14:43:20 +00:00
2005-10-17 15:33:24 +00:00
/* User selected for running the builder. */
2017-01-25 11:45:38 +00:00
std : : unique_ptr < UserLock > buildUser ;
2005-10-17 15:33:24 +00:00
2004-05-11 18:05:44 +00:00
/* The process ID of the builder. */
2004-06-22 09:51:44 +00:00
Pid pid ;
2004-05-11 18:05:44 +00:00
/* The temporary directory. */
Path tmpDir ;
2015-12-02 13:59:07 +00:00
/* The path of the temporary directory in the sandbox. */
Path tmpDirInSandbox ;
2004-05-11 18:05:44 +00:00
/* File descriptor for the log file. */
2012-07-17 13:40:12 +00:00
AutoCloseFD fdLogFile ;
2016-05-04 13:46:25 +00:00
std : : shared_ptr < BufferedSink > logFileSink , logSink ;
2004-05-11 18:05:44 +00:00
2013-09-02 09:58:18 +00:00
/* Number of bytes received from the builder's stdout/stderr. */
unsigned long logSize ;
2016-04-25 14:47:46 +00:00
/* The most recent log lines. */
std : : list < std : : string > logTail ;
std : : string currentLogLine ;
2016-04-28 12:27:00 +00:00
size_t currentLogLinePos = 0 ; // to handle carriage return
2016-04-25 14:47:46 +00:00
2017-10-24 11:41:52 +00:00
std : : string currentHookLine ;
2004-05-11 18:05:44 +00:00
/* Pipe for the builder's standard output/error. */
2010-08-30 14:53:03 +00:00
Pipe builderOut ;
2004-05-13 19:14:49 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Pipe for synchronising updates to the builder namespaces. */
2016-06-09 16:27:39 +00:00
Pipe userNamespaceSync ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* The mount namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls . */
AutoCloseFD sandboxMountNamespace ;
2010-08-25 20:44:28 +00:00
/* The build hook. */
2017-01-19 14:15:09 +00:00
std : : unique_ptr < HookInstance > hook ;
2012-07-27 13:59:18 +00:00
2007-10-27 00:46:59 +00:00
/* Whether we're currently doing a chroot build. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
bool useChroot = false ;
2012-07-27 13:59:18 +00:00
2008-12-11 18:57:10 +00:00
Path chrootRootDir ;
2007-10-27 00:46:59 +00:00
2008-12-11 17:00:12 +00:00
/* RAII object to delete the chroot directory. */
2014-03-29 23:49:23 +00:00
std : : shared_ptr < AutoDelete > autoDelChroot ;
2009-03-25 21:05:42 +00:00
2020-03-15 06:23:17 +00:00
/* The sort of derivation we are building. */
DerivationType derivationType ;
2012-07-27 13:59:18 +00:00
2016-06-03 13:45:11 +00:00
/* Whether to run the build in a private network namespace. */
bool privateNetwork = false ;
2005-01-19 11:16:11 +00:00
typedef void ( DerivationGoal : : * GoalState ) ( ) ;
2004-06-18 18:09:32 +00:00
GoalState state ;
2012-07-27 13:59:18 +00:00
2012-06-25 19:45:16 +00:00
/* Stuff we need to pass to initChild(). */
2016-10-31 16:09:52 +00:00
struct ChrootPath {
Path source ;
bool optional ;
ChrootPath ( Path source = " " , bool optional = false )
: source ( source ) , optional ( optional )
{ }
} ;
typedef map < Path , ChrootPath > DirsInChroot ; // maps target path to source path
2012-12-29 22:04:02 +00:00
DirsInChroot dirsInChroot ;
2017-01-25 11:00:28 +00:00
2012-06-25 19:45:16 +00:00
typedef map < string , string > Environment ;
Environment env ;
2015-12-03 15:30:19 +00:00
# if __APPLE__
2015-11-13 03:00:16 +00:00
typedef string SandboxProfile ;
SandboxProfile additionalSandboxProfile ;
2015-11-15 11:08:50 +00:00
# endif
2012-06-25 19:45:16 +00:00
2012-09-11 22:39:22 +00:00
/* Hash rewriting. */
2018-03-29 22:56:13 +00:00
StringMap inputRewrites , outputRewrites ;
2019-12-05 18:11:09 +00:00
typedef map < StorePath , StorePath > RedirectedOutputs ;
2014-02-18 00:01:14 +00:00
RedirectedOutputs redirectedOutputs ;
2012-09-11 22:39:22 +00:00
2020-08-07 19:09:26 +00:00
/* The outputs paths used during the build.
- Input - addressed derivations or fixed content - addressed outputs are
sometimes built when some of their outputs already exist , and can not
be hidden via sandboxing . We use temporary locations instead and
rewrite after the build . Otherwise the regular predetermined paths are
put here .
- Floating content - addressed derivations do not know their final build
output paths until the outputs are hashed , so random locations are
used , and then renamed . The randomness helps guard against hidden
self - references .
*/
OutputPathMap scratchOutputs ;
/* The final output paths of the build.
- For input - addressed derivations , always the precomputed paths
- For content - addressed derivations , calcuated from whatever the hash
ends up being . ( Note that fixed outputs derivations that produce the
" wrong " output still install that data under its true content - address . )
*/
2020-08-11 20:49:10 +00:00
OutputPathMap finalOutputs ;
2020-08-07 19:09:26 +00:00
2014-02-17 22:04:52 +00:00
BuildMode buildMode ;
/* If we're repairing without a chroot, there may be outputs that
are valid but corrupt . So we redirect these outputs to
temporary paths . */
2019-12-05 18:11:09 +00:00
StorePathSet redirectedBadOutputs ;
2012-10-02 21:13:46 +00:00
2015-07-20 01:15:45 +00:00
BuildResult result ;
2015-11-09 22:16:24 +00:00
/* The current round, if we're building multiple times. */
2018-05-02 11:56:34 +00:00
size_t curRound = 1 ;
2015-11-09 22:16:24 +00:00
2018-05-02 11:56:34 +00:00
size_t nrRounds ;
2015-11-09 22:16:24 +00:00
/* Path registration info from the previous round, if we're
building multiple times . Since this contains the hash , it
allows us to compare whether two rounds produced the same
result . */
2018-10-22 19:49:56 +00:00
std : : map < Path , ValidPathInfo > prevInfos ;
2015-11-09 22:16:24 +00:00
2016-12-19 10:52:57 +00:00
const uid_t sandboxUid = 1000 ;
const gid_t sandboxGid = 100 ;
2017-01-25 11:00:28 +00:00
const static Path homeDir ;
2017-08-15 13:31:59 +00:00
std : : unique_ptr < MaintainCount < uint64_t > > mcExpectedBuilds , mcRunningBuilds ;
std : : unique_ptr < Activity > act ;
2020-06-15 14:03:29 +00:00
/* Activity that denotes waiting for a lock. */
std : : unique_ptr < Activity > actLock ;
2017-08-21 10:01:21 +00:00
std : : map < ActivityId , Activity > builderActivities ;
2017-10-24 12:24:57 +00:00
/* The remote machine on which we're building. */
std : : string machineName ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* The recursive Nix daemon socket. */
AutoCloseFD daemonSocket ;
/* The daemon main thread. */
std : : thread daemonThread ;
2019-11-04 13:27:28 +00:00
/* The daemon worker threads. */
std : : vector < std : : thread > daemonWorkerThreads ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Paths that were added via recursive Nix calls. */
2019-12-05 18:11:09 +00:00
StorePathSet addedPaths ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
( so e . g . you can ' t do ' nix - store - r / nix / store / < bla > ' where
/ nix / store / < bla > is some arbitrary path in a binary cache ) . */
2019-12-05 18:11:09 +00:00
bool isAllowed ( const StorePath & path )
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
return inputPaths . count ( path ) | | addedPaths . count ( path ) ;
}
2019-12-05 18:11:09 +00:00
friend struct RestrictedStore ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2004-06-18 18:09:32 +00:00
public :
2020-08-22 20:44:47 +00:00
DerivationGoal ( const StorePath & drvPath ,
const StringSet & wantedOutputs , Worker & worker ,
BuildMode buildMode = bmNormal ) ;
2020-06-16 20:20:18 +00:00
DerivationGoal ( const StorePath & drvPath , const BasicDerivation & drv ,
2020-08-22 20:44:47 +00:00
const StringSet & wantedOutputs , Worker & worker ,
BuildMode buildMode = bmNormal ) ;
2005-01-19 11:16:11 +00:00
~ DerivationGoal ( ) ;
2004-05-11 18:05:44 +00:00
2019-05-12 20:47:41 +00:00
/* Whether we need to perform hash rewriting if there are valid output paths. */
bool needsHashRewrite ( ) ;
2020-06-15 17:25:35 +00:00
void timedOut ( Error & & ex ) override ;
2012-07-27 13:59:18 +00:00
2015-09-17 23:22:06 +00:00
string key ( ) override
2014-11-24 15:48:04 +00:00
{
/* Ensure that derivations get built in order of their name,
i . e . a derivation named " aardvark " always comes before
" baboon " . And substitution goals always happen before
derivation goals ( due to " b$ " ) . */
2019-12-05 18:11:09 +00:00
return " b$ " + std : : string ( drvPath . name ( ) ) + " $ " + worker . store . printStorePath ( drvPath ) ;
2014-11-24 15:48:04 +00:00
}
2015-09-17 23:22:06 +00:00
void work ( ) override ;
2004-06-19 21:45:04 +00:00
2019-12-05 18:11:09 +00:00
StorePath getDrvPath ( )
2005-02-23 11:19:27 +00:00
{
2020-06-16 20:20:18 +00:00
return drvPath ;
2005-02-23 11:19:27 +00:00
}
2008-01-15 04:32:08 +00:00
2012-11-26 16:15:09 +00:00
/* Add wanted outputs to an already existing derivation goal. */
void addWantedOutputs ( const StringSet & outputs ) ;
2015-07-20 01:15:45 +00:00
BuildResult getResult ( ) { return result ; }
2004-06-19 21:45:04 +00:00
private :
2004-06-18 18:09:32 +00:00
/* The states. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
void getDerivation ( ) ;
void loadDerivation ( ) ;
2006-12-07 23:58:36 +00:00
void haveDerivation ( ) ;
2020-08-07 19:09:26 +00:00
void outputsSubstitutionTried ( ) ;
void gaveUpOnSubstitution ( ) ;
2012-10-03 14:38:09 +00:00
void closureRepaired ( ) ;
2005-01-19 11:16:11 +00:00
void inputsRealised ( ) ;
2004-06-18 18:09:32 +00:00
void tryToBuild ( ) ;
2020-05-14 14:00:54 +00:00
void tryLocalBuild ( ) ;
2004-06-18 18:09:32 +00:00
void buildDone ( ) ;
2020-08-22 20:44:47 +00:00
void resolvedFinished ( ) ;
2004-06-19 21:45:04 +00:00
/* Is the build hook willing to perform the build? */
HookReply tryBuildHook ( ) ;
2004-06-18 18:09:32 +00:00
/* Start building a derivation. */
void startBuilder ( ) ;
2017-01-25 11:00:28 +00:00
/* Fill in the environment for the builder. */
void initEnv ( ) ;
2019-10-12 23:02:57 +00:00
/* Setup tmp dir location. */
void initTmpDir ( ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
/* Write a JSON file containing the derivation attributes. */
void writeStructuredAttrs ( ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
void startDaemon ( ) ;
void stopDaemon ( ) ;
/* Add 'path' to the set of paths that may be referenced by the
outputs , and make it appear in the sandbox . */
2019-12-05 18:11:09 +00:00
void addDependency ( const StorePath & path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2017-01-25 11:00:28 +00:00
/* Make a file owned by the builder. */
void chownToBuilder ( const Path & path ) ;
2014-12-10 16:25:12 +00:00
/* Run the builder's process. */
void runChild ( ) ;
2012-06-25 19:45:16 +00:00
friend int childEntry ( void * ) ;
2014-02-17 21:25:15 +00:00
/* Check that the derivation outputs all exist and register them
as valid . */
void registerOutputs ( ) ;
2004-06-18 18:09:32 +00:00
2018-10-22 19:49:56 +00:00
/* Check that an output meets the requirements specified by the
' outputChecks ' attribute ( or the legacy
' { allowed , disallowed } { References , Requisites } ' attributes ) . */
void checkOutputs ( const std : : map < std : : string , ValidPathInfo > & outputs ) ;
2004-06-18 18:09:32 +00:00
/* Open a log file and a pipe to it. */
2008-11-12 11:08:27 +00:00
Path openLogFile ( ) ;
2004-06-18 18:09:32 +00:00
2012-05-30 14:12:29 +00:00
/* Close the log file. */
void closeLogFile ( ) ;
2004-06-18 18:09:32 +00:00
/* Delete the temporary directory, if we have one. */
2004-05-11 18:05:44 +00:00
void deleteTmpDir ( bool force ) ;
2004-06-18 18:09:32 +00:00
2004-06-29 09:41:50 +00:00
/* Callback used by the worker to write to the log. */
2015-09-17 23:22:06 +00:00
void handleChildOutput ( int fd , const string & data ) override ;
void handleEOF ( int fd ) override ;
2016-04-25 14:47:46 +00:00
void flushLine ( ) ;
2004-06-29 09:41:50 +00:00
2020-08-07 19:09:26 +00:00
/* Wrappers around the corresponding Store methods that first consult the
derivation . This is currently needed because when there is no drv file
there also is no DB entry . */
2020-08-20 18:14:12 +00:00
std : : map < std : : string , std : : optional < StorePath > > queryPartialDerivationOutputMap ( ) ;
OutputPathMap queryDerivationOutputMap ( ) ;
2020-08-07 19:09:26 +00:00
2005-01-25 10:55:33 +00:00
/* Return the set of (in)valid paths. */
2020-08-07 19:09:26 +00:00
void checkPathValidity ( ) ;
2009-03-25 21:05:42 +00:00
2006-12-08 18:41:48 +00:00
/* Forcibly kill the child process, if any. */
void killChild ( ) ;
2012-10-02 21:13:46 +00:00
2020-09-04 15:15:51 +00:00
/* Create alternative path calculated from but distinct from the
input , so we can avoid overwriting outputs ( or other store paths )
2020-08-07 19:09:26 +00:00
that already exist . */
StorePath makeFallbackPath ( const StorePath & path ) ;
2020-09-04 15:15:51 +00:00
/* Make a path to another based on the output name along with the
derivation hash . */
/* FIXME add option to randomize, so we can audit whether our
rewrites caught everything */
2020-08-11 20:49:10 +00:00
StorePath makeFallbackPath ( std : : string_view outputName ) ;
2012-10-03 14:38:09 +00:00
void repairClosure ( ) ;
2015-07-20 01:15:45 +00:00
2020-05-14 14:00:54 +00:00
void started ( ) ;
2020-06-15 17:25:35 +00:00
void done (
BuildResult : : Status status ,
std : : optional < Error > ex = { } ) ;
2018-04-17 10:03:27 +00:00
2019-12-05 18:11:09 +00:00
StorePathSet exportReferences ( const StorePathSet & storePaths ) ;
2004-05-11 18:05:44 +00:00
} ;
2017-01-25 11:00:28 +00:00
const Path DerivationGoal : : homeDir = " /homeless-shelter " ;
2020-08-22 20:44:47 +00:00
DerivationGoal : : DerivationGoal ( const StorePath & drvPath ,
const StringSet & wantedOutputs , Worker & worker , BuildMode buildMode )
2005-01-19 11:16:11 +00:00
: Goal ( worker )
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
, useDerivation ( true )
2020-06-16 20:20:18 +00:00
, drvPath ( drvPath )
2012-11-26 16:15:09 +00:00
, wantedOutputs ( wantedOutputs )
2014-02-17 22:04:52 +00:00
, buildMode ( buildMode )
2004-05-11 18:05:44 +00:00
{
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
state = & DerivationGoal : : getDerivation ;
2020-08-22 20:44:47 +00:00
name = fmt (
" building of '%s' from .drv file " ,
StorePathWithOutputs { drvPath , wantedOutputs } . to_string ( worker . store ) ) ;
2005-02-18 09:50:20 +00:00
trace ( " created " ) ;
2017-05-16 14:09:57 +00:00
2017-08-15 13:31:59 +00:00
mcExpectedBuilds = std : : make_unique < MaintainCount < uint64_t > > ( worker . expectedBuilds ) ;
worker . updateProgress ( ) ;
2004-05-11 18:05:44 +00:00
}
2010-08-25 20:44:28 +00:00
2020-06-16 20:20:18 +00:00
DerivationGoal : : DerivationGoal ( const StorePath & drvPath , const BasicDerivation & drv ,
2020-08-22 20:44:47 +00:00
const StringSet & wantedOutputs , Worker & worker , BuildMode buildMode )
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
: Goal ( worker )
, useDerivation ( false )
2020-06-16 20:20:18 +00:00
, drvPath ( drvPath )
2020-08-22 20:44:47 +00:00
, wantedOutputs ( wantedOutputs )
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
, buildMode ( buildMode )
{
2019-12-05 18:11:09 +00:00
this - > drv = std : : make_unique < BasicDerivation > ( BasicDerivation ( drv ) ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
state = & DerivationGoal : : haveDerivation ;
2020-08-22 20:44:47 +00:00
name = fmt (
" building of '%s' from in-memory derivation " ,
StorePathWithOutputs { drvPath , drv . outputNames ( ) } . to_string ( worker . store ) ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
trace ( " created " ) ;
2015-08-24 09:13:31 +00:00
2017-08-15 13:31:59 +00:00
mcExpectedBuilds = std : : make_unique < MaintainCount < uint64_t > > ( worker . expectedBuilds ) ;
worker . updateProgress ( ) ;
2017-05-16 14:09:57 +00:00
2015-08-24 09:13:31 +00:00
/* Prevent the .chroot directory from being
garbage - collected . ( See isActiveTempFile ( ) in gc . cc . ) */
2020-01-06 21:18:00 +00:00
worker . store . addTempRoot ( this - > drvPath ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
}
2005-01-19 11:16:11 +00:00
DerivationGoal : : ~ DerivationGoal ( )
2004-05-11 18:05:44 +00:00
{
/* Careful: we should never ever throw an exception from a
destructor . */
2014-11-12 10:35:53 +00:00
try { killChild ( ) ; } catch ( . . . ) { ignoreException ( ) ; }
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
try { stopDaemon ( ) ; } catch ( . . . ) { ignoreException ( ) ; }
2014-11-12 10:35:53 +00:00
try { deleteTmpDir ( false ) ; } catch ( . . . ) { ignoreException ( ) ; }
try { closeLogFile ( ) ; } catch ( . . . ) { ignoreException ( ) ; }
2004-05-11 18:05:44 +00:00
}
2010-08-25 20:44:28 +00:00
2019-05-12 20:47:41 +00:00
inline bool DerivationGoal : : needsHashRewrite ( )
{
# if __linux__
return ! useChroot ;
# else
/* Darwin requires hash rewriting even when sandboxing is enabled. */
return true ;
# endif
}
2006-12-08 18:41:48 +00:00
void DerivationGoal : : killChild ( )
{
if ( pid ! = - 1 ) {
2016-08-30 13:45:39 +00:00
worker . childTerminated ( this ) ;
2006-12-08 18:41:48 +00:00
2017-01-25 11:45:38 +00:00
if ( buildUser ) {
2013-11-14 10:57:37 +00:00
/* If we're using a build user, then there is a tricky
race condition : if we kill the build user before the
child has done its setuid ( ) to the build user uid , then
it won ' t be killed , and we ' ll potentially lock up in
pid . wait ( ) . So also send a conventional kill to the
2006-12-08 18:41:48 +00:00
child . */
: : kill ( - pid , SIGKILL ) ; /* ignore the result */
2017-01-25 11:45:38 +00:00
buildUser - > kill ( ) ;
2017-01-19 15:58:39 +00:00
pid . wait ( ) ;
2006-12-08 18:41:48 +00:00
} else
pid . kill ( ) ;
2012-07-27 13:59:18 +00:00
2006-12-08 18:41:48 +00:00
assert ( pid = = - 1 ) ;
}
2010-08-25 20:44:28 +00:00
hook . reset ( ) ;
2006-12-08 18:41:48 +00:00
}
2020-06-15 17:25:35 +00:00
void DerivationGoal : : timedOut ( Error & & ex )
2006-12-08 18:41:48 +00:00
{
killChild ( ) ;
2020-06-15 17:25:35 +00:00
done ( BuildResult : : TimedOut , ex ) ;
2006-12-08 18:41:48 +00:00
}
2005-01-19 11:16:11 +00:00
void DerivationGoal : : work ( )
2004-05-11 18:05:44 +00:00
{
2004-06-18 18:09:32 +00:00
( this - > * state ) ( ) ;
2004-05-11 18:05:44 +00:00
}
2012-11-26 16:15:09 +00:00
void DerivationGoal : : addWantedOutputs ( const StringSet & outputs )
{
/* If we already want all outputs, there is nothing to do. */
if ( wantedOutputs . empty ( ) ) return ;
if ( outputs . empty ( ) ) {
wantedOutputs . clear ( ) ;
needRestart = true ;
} else
2015-07-17 17:24:28 +00:00
for ( auto & i : outputs )
2019-10-09 13:51:52 +00:00
if ( wantedOutputs . insert ( i ) . second )
2012-11-26 16:15:09 +00:00
needRestart = true ;
}
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
void DerivationGoal : : getDerivation ( )
2004-05-11 18:05:44 +00:00
{
2004-06-22 14:48:59 +00:00
trace ( " init " ) ;
2004-05-11 18:05:44 +00:00
2005-01-20 16:01:07 +00:00
/* The first thing to do is to make sure that the derivation
2004-06-18 18:09:32 +00:00
exists . If it doesn ' t , it may be created through a
substitute . */
2014-11-24 15:44:35 +00:00
if ( buildMode = = bmNormal & & worker . store . isValidPath ( drvPath ) ) {
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
loadDerivation ( ) ;
2014-11-24 15:44:35 +00:00
return ;
}
2005-01-19 11:16:11 +00:00
addWaitee ( worker . makeSubstitutionGoal ( drvPath ) ) ;
2003-08-01 14:11:19 +00:00
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
state = & DerivationGoal : : loadDerivation ;
2004-05-11 18:05:44 +00:00
}
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
void DerivationGoal : : loadDerivation ( )
2004-05-11 18:05:44 +00:00
{
2005-01-19 11:16:11 +00:00
trace ( " loading derivation " ) ;
2004-05-11 18:05:44 +00:00
2004-06-25 10:21:44 +00:00
if ( nrFailed ! = 0 ) {
2020-06-15 17:25:35 +00:00
done ( BuildResult : : MiscFailure , Error ( " cannot build missing derivation '%s' " , worker . store . printStorePath ( drvPath ) ) ) ;
2004-06-25 10:21:44 +00:00
return ;
}
2008-12-12 17:14:57 +00:00
/* `drvPath' should already be a root, but let's be on the safe
side : if the user forgot to make it a root , we wouldn ' t want
things being garbage collected while we ' re busy . */
worker . store . addTempRoot ( drvPath ) ;
2012-07-27 13:59:18 +00:00
2008-06-09 13:52:45 +00:00
assert ( worker . store . isValidPath ( drvPath ) ) ;
2003-07-20 19:29:38 +00:00
2005-01-19 11:16:11 +00:00
/* Get the derivation. */
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
drv = std : : unique_ptr < BasicDerivation > ( new Derivation ( worker . store . derivationFromPath ( drvPath ) ) ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
haveDerivation ( ) ;
}
2004-05-11 18:05:44 +00:00
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
void DerivationGoal : : haveDerivation ( )
{
trace ( " have derivation " ) ;
2020-08-07 19:09:26 +00:00
if ( drv - > type ( ) = = DerivationType : : CAFloating )
settings . requireExperimentalFeature ( " ca-derivations " ) ;
2018-06-05 14:04:41 +00:00
retrySubstitution = false ;
2020-08-14 17:00:13 +00:00
for ( auto & i : drv - > outputsAndOptPaths ( worker . store ) )
if ( i . second . second )
worker . store . addTempRoot ( * i . second . second ) ;
2005-01-31 10:27:25 +00:00
2005-01-25 10:55:33 +00:00
/* Check what outputs paths are not already valid. */
2020-08-07 19:09:26 +00:00
checkPathValidity ( ) ;
bool allValid = true ;
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . wanted ) continue ;
if ( ! status . known | | ! status . known - > isValid ( ) ) {
allValid = false ;
break ;
}
}
2005-01-25 10:55:33 +00:00
/* If they are all valid, then we're done. */
2020-08-07 19:09:26 +00:00
if ( allValid & & buildMode = = bmNormal ) {
2015-07-20 01:15:45 +00:00
done ( BuildResult : : AlreadyValid ) ;
2009-03-18 14:48:42 +00:00
return ;
2005-01-25 10:55:33 +00:00
}
2020-06-16 20:20:18 +00:00
parsedDrv = std : : make_unique < ParsedDerivation > ( drvPath , * drv ) ;
2018-09-28 10:43:01 +00:00
2005-01-25 10:55:33 +00:00
/* We are first going to try to create the invalid output paths
through substitutes . If that doesn ' t work , we ' ll build
them . */
2019-09-03 14:02:12 +00:00
if ( settings . useSubstitutes & & parsedDrv - > substitutesAllowed ( ) )
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . wanted ) continue ;
if ( ! status . known ) {
2020-09-04 14:48:50 +00:00
warn ( " do not know how to query for unknown floating content-addressed derivation output yet " ) ;
2020-08-07 19:09:26 +00:00
/* Nothing to wait for; tail call */
return DerivationGoal : : gaveUpOnSubstitution ( ) ;
}
addWaitee ( worker . makeSubstitutionGoal (
status . known - > path ,
buildMode = = bmRepair ? Repair : NoRepair ,
getDerivationCA ( * drv ) ) ) ;
}
2012-07-27 13:59:18 +00:00
2005-01-25 10:55:33 +00:00
if ( waitees . empty ( ) ) /* to prevent hang (no wake-up event) */
2020-08-07 19:09:26 +00:00
outputsSubstitutionTried ( ) ;
2005-01-25 10:55:33 +00:00
else
2020-08-07 19:09:26 +00:00
state = & DerivationGoal : : outputsSubstitutionTried ;
2005-01-25 10:55:33 +00:00
}
2020-08-07 19:09:26 +00:00
void DerivationGoal : : outputsSubstitutionTried ( )
2005-01-25 10:55:33 +00:00
{
trace ( " all outputs substituted (maybe) " ) ;
2016-07-23 13:16:08 +00:00
if ( nrFailed > 0 & & nrFailed > nrNoSubstituters + nrIncompleteClosure & & ! settings . tryFallback ) {
2019-12-05 18:11:09 +00:00
done ( BuildResult : : TransientFailure ,
fmt ( " some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source " ,
worker . store . printStorePath ( drvPath ) ) ) ;
2016-07-23 13:16:08 +00:00
return ;
}
2005-01-25 13:00:12 +00:00
2013-01-02 11:38:28 +00:00
/* If the substitutes form an incomplete closure, then we should
build the dependencies of this derivation , but after that , we
can still use the substitutes for this derivation itself . */
2018-06-05 14:04:41 +00:00
if ( nrIncompleteClosure > 0 ) retrySubstitution = true ;
2013-01-02 11:38:28 +00:00
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0 ;
2005-01-25 13:00:12 +00:00
2012-11-26 16:15:09 +00:00
if ( needRestart ) {
needRestart = false ;
haveDerivation ( ) ;
return ;
}
2020-08-07 19:09:26 +00:00
checkPathValidity ( ) ;
size_t nrInvalid = 0 ;
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . wanted ) continue ;
if ( ! status . known | | ! status . known - > isValid ( ) )
nrInvalid + + ;
}
2014-02-18 00:01:14 +00:00
if ( buildMode = = bmNormal & & nrInvalid = = 0 ) {
2015-07-20 01:15:45 +00:00
done ( BuildResult : : Substituted ) ;
2014-02-18 00:01:14 +00:00
return ;
}
if ( buildMode = = bmRepair & & nrInvalid = = 0 ) {
repairClosure ( ) ;
2009-03-18 14:48:42 +00:00
return ;
2004-05-14 12:24:29 +00:00
}
2014-02-18 00:01:14 +00:00
if ( buildMode = = bmCheck & & nrInvalid > 0 )
2019-12-05 18:11:09 +00:00
throw Error ( " some outputs of '%s' are not valid, so checking is not possible " ,
worker . store . printStorePath ( drvPath ) ) ;
2003-08-01 14:11:19 +00:00
2020-08-07 19:09:26 +00:00
/* Nothing to wait for; tail call */
gaveUpOnSubstitution ( ) ;
}
2020-08-11 21:16:14 +00:00
/* At least one of the output paths could not be
produced using a substitute . So we have to build instead . */
2020-08-07 19:09:26 +00:00
void DerivationGoal : : gaveUpOnSubstitution ( )
{
2012-11-26 16:15:09 +00:00
/* Make sure checkPathValidity() from now on checks all
outputs . */
2019-12-05 18:11:09 +00:00
wantedOutputs . clear ( ) ;
2012-11-26 16:15:09 +00:00
2005-01-25 10:55:33 +00:00
/* The inputs must be built before we can build this goal. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
if ( useDerivation )
for ( auto & i : dynamic_cast < Derivation * > ( drv . get ( ) ) - > inputDrvs )
addWaitee ( worker . makeDerivationGoal ( i . first , i . second , buildMode = = bmRepair ? bmRepair : bmNormal ) ) ;
2003-08-01 14:11:19 +00:00
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
for ( auto & i : drv - > inputSrcs ) {
if ( worker . store . isValidPath ( i ) ) continue ;
if ( ! settings . useSubstitutes )
2019-12-05 18:11:09 +00:00
throw Error ( " dependency '%s' of '%s' does not exist, and substitution is disabled " ,
worker . store . printStorePath ( i ) , worker . store . printStorePath ( drvPath ) ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
addWaitee ( worker . makeSubstitutionGoal ( i ) ) ;
}
2005-01-19 11:16:11 +00:00
2010-08-24 11:45:44 +00:00
if ( waitees . empty ( ) ) /* to prevent hang (no wake-up event) */
inputsRealised ( ) ;
else
state = & DerivationGoal : : inputsRealised ;
2004-05-13 19:14:49 +00:00
}
2012-10-03 14:38:09 +00:00
void DerivationGoal : : repairClosure ( )
{
/* If we're repairing, we now know that our own outputs are valid.
Now check whether the other paths in the outputs closure are
good . If not , then start derivation goals for the derivations
that produced those outputs . */
/* Get the output closure. */
2020-08-20 18:14:12 +00:00
auto outputs = queryDerivationOutputMap ( ) ;
2019-12-05 18:11:09 +00:00
StorePathSet outputClosure ;
2020-08-07 19:09:26 +00:00
for ( auto & i : outputs ) {
2016-01-06 20:49:32 +00:00
if ( ! wantOutput ( i . first , wantedOutputs ) ) continue ;
2020-08-07 19:09:26 +00:00
worker . store . computeFSClosure ( i . second , outputClosure ) ;
2016-01-06 20:49:32 +00:00
}
2012-10-03 14:38:09 +00:00
/* Filter out our own outputs (which we have already checked). */
2020-08-07 19:09:26 +00:00
for ( auto & i : outputs )
outputClosure . erase ( i . second ) ;
2012-10-03 14:38:09 +00:00
/* Get all dependencies of this derivation so that we know which
derivation is responsible for which path in the output
closure . */
2019-12-05 18:11:09 +00:00
StorePathSet inputClosure ;
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
if ( useDerivation ) worker . store . computeFSClosure ( drvPath , inputClosure ) ;
2019-12-05 18:11:09 +00:00
std : : map < StorePath , StorePath > outputsToDrv ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
for ( auto & i : inputClosure )
2019-12-05 18:11:09 +00:00
if ( i . isDerivation ( ) ) {
2020-08-20 18:14:12 +00:00
auto depOutputs = worker . store . queryPartialDerivationOutputMap ( i ) ;
2020-08-07 19:09:26 +00:00
for ( auto & j : depOutputs )
2020-08-11 23:07:50 +00:00
if ( j . second )
outputsToDrv . insert_or_assign ( * j . second , i ) ;
2012-10-03 14:38:09 +00:00
}
/* Check each path (slow!). */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
for ( auto & i : outputClosure ) {
2016-04-08 16:07:13 +00:00
if ( worker . pathContentsGood ( i ) ) continue ;
2020-06-02 14:22:24 +00:00
logError ( {
. name = " Corrupt path in closure " ,
. hint = hintfmt (
" found corrupted or missing path '%s' in the output closure of '%s' " ,
worker . store . printStorePath ( i ) , worker . store . printStorePath ( drvPath ) )
2020-05-03 14:01:25 +00:00
} ) ;
2019-12-05 18:11:09 +00:00
auto drvPath2 = outputsToDrv . find ( i ) ;
if ( drvPath2 = = outputsToDrv . end ( ) )
2017-06-28 16:11:01 +00:00
addWaitee ( worker . makeSubstitutionGoal ( i , Repair ) ) ;
2012-10-03 19:35:42 +00:00
else
2019-12-05 18:11:09 +00:00
addWaitee ( worker . makeDerivationGoal ( drvPath2 - > second , StringSet ( ) , bmRepair ) ) ;
2012-10-03 14:38:09 +00:00
}
if ( waitees . empty ( ) ) {
2015-07-20 01:15:45 +00:00
done ( BuildResult : : AlreadyValid ) ;
2012-10-03 14:38:09 +00:00
return ;
}
state = & DerivationGoal : : closureRepaired ;
}
void DerivationGoal : : closureRepaired ( )
{
trace ( " closure repaired " ) ;
if ( nrFailed > 0 )
2019-12-05 18:11:09 +00:00
throw Error ( " some paths in the output closure of derivation '%s' could not be repaired " ,
worker . store . printStorePath ( drvPath ) ) ;
2015-07-20 01:15:45 +00:00
done ( BuildResult : : AlreadyValid ) ;
2012-10-03 14:38:09 +00:00
}
2005-01-19 11:16:11 +00:00
void DerivationGoal : : inputsRealised ( )
2004-05-11 18:05:44 +00:00
{
2004-06-22 14:48:59 +00:00
trace ( " all inputs realised " ) ;
2004-05-13 19:14:49 +00:00
2004-06-25 10:21:44 +00:00
if ( nrFailed ! = 0 ) {
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
if ( ! useDerivation )
2019-12-05 18:11:09 +00:00
throw Error ( " some dependencies of '%s' are missing " , worker . store . printStorePath ( drvPath ) ) ;
2020-06-15 17:25:35 +00:00
done ( BuildResult : : DependencyFailed , Error (
" %s dependencies of derivation '%s' failed to build " ,
nrFailed , worker . store . printStorePath ( drvPath ) ) ) ;
2004-06-25 10:21:44 +00:00
return ;
}
2013-01-02 11:38:28 +00:00
if ( retrySubstitution ) {
haveDerivation ( ) ;
return ;
}
2009-03-18 14:48:42 +00:00
/* Gather information necessary for computing the closure and/or
running the build hook . */
2012-07-27 13:59:18 +00:00
2009-03-18 14:48:42 +00:00
/* Determine the full set of input paths. */
/* First, the input derivations. */
2020-08-22 20:44:47 +00:00
if ( useDerivation ) {
auto & fullDrv = * dynamic_cast < Derivation * > ( drv . get ( ) ) ;
if ( ! fullDrv . inputDrvs . empty ( ) & & fullDrv . type ( ) = = DerivationType : : CAFloating ) {
/* We are be able to resolve this derivation based on the
now - known results of dependencies . If so , we become a stub goal
aliasing that resolved derivation goal */
2020-09-04 01:17:38 +00:00
std : : optional attempt = fullDrv . tryResolve ( worker . store ) ;
assert ( attempt ) ;
Derivation drvResolved { * std : : move ( attempt ) } ;
2020-08-22 20:44:47 +00:00
auto pathResolved = writeDerivation ( worker . store , drvResolved ) ;
/* Add to memotable to speed up downstream goal's queries with the
original derivation . */
drvPathResolutions . lock ( ) - > insert_or_assign ( drvPath , pathResolved ) ;
auto msg = fmt ( " Resolved derivation: '%s' -> '%s' " ,
worker . store . printStorePath ( drvPath ) ,
worker . store . printStorePath ( pathResolved ) ) ;
act = std : : make_unique < Activity > ( * logger , lvlInfo , actBuildWaiting , msg ,
Logger : : Fields {
worker . store . printStorePath ( drvPath ) ,
worker . store . printStorePath ( pathResolved ) ,
} ) ;
auto resolvedGoal = worker . makeDerivationGoal (
2020-08-28 21:22:57 +00:00
pathResolved , wantedOutputs , buildMode ) ;
2020-08-22 20:44:47 +00:00
addWaitee ( resolvedGoal ) ;
state = & DerivationGoal : : resolvedFinished ;
return ;
}
for ( auto & [ depDrvPath , wantedDepOutputs ] : fullDrv . inputDrvs ) {
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
/* Add the relevant output closures of the input derivation
` i ' as input paths . Only add the closures of output paths
that are specified as inputs . */
2020-08-07 19:09:26 +00:00
assert ( worker . store . isValidPath ( drvPath ) ) ;
2020-08-20 18:14:12 +00:00
auto outputs = worker . store . queryPartialDerivationOutputMap ( depDrvPath ) ;
2020-08-07 19:09:26 +00:00
for ( auto & j : wantedDepOutputs ) {
2020-08-11 23:07:50 +00:00
if ( outputs . count ( j ) > 0 ) {
auto optRealizedInput = outputs . at ( j ) ;
if ( ! optRealizedInput )
throw Error (
2020-09-04 14:48:50 +00:00
" derivation '%s' requires output '%s' from input derivation '%s', which is supposedly realized already, yet we still don't know what path corresponds to that output " ,
2020-08-11 23:07:50 +00:00
worker . store . printStorePath ( drvPath ) , j , worker . store . printStorePath ( drvPath ) ) ;
worker . store . computeFSClosure ( * optRealizedInput , inputPaths ) ;
} else
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
throw Error (
2019-12-05 18:11:09 +00:00
" derivation '%s' requires non-existent output '%s' from input derivation '%s' " ,
2020-08-07 19:09:26 +00:00
worker . store . printStorePath ( drvPath ) , j , worker . store . printStorePath ( drvPath ) ) ;
2019-12-05 18:11:09 +00:00
}
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
}
2020-08-22 20:44:47 +00:00
}
2009-03-18 14:48:42 +00:00
/* Second, the input sources. */
2016-11-10 16:45:04 +00:00
worker . store . computeFSClosure ( drv - > inputSrcs , inputPaths ) ;
2009-03-18 14:48:42 +00:00
2019-12-05 18:11:09 +00:00
debug ( " added input paths %s " , worker . store . showPaths ( inputPaths ) ) ;
2009-03-18 14:48:42 +00:00
2020-03-15 06:23:17 +00:00
/* What type of derivation are we building? */
derivationType = drv - > type ( ) ;
2012-07-27 13:59:18 +00:00
2015-11-09 22:16:24 +00:00
/* Don't repeat fixed-output derivations since they're already
verified by their output hash . */
2020-06-03 17:38:54 +00:00
nrRounds = derivationIsFixed ( derivationType ) ? 1 : settings . buildRepeat + 1 ;
2015-11-09 22:16:24 +00:00
2004-06-19 21:45:04 +00:00
/* Okay, try to build. Note that here we don't wait for a build
slot to become available , since we don ' t need one if there is a
build hook . */
2005-01-19 11:16:11 +00:00
state = & DerivationGoal : : tryToBuild ;
2004-06-19 21:45:04 +00:00
worker . wakeUp ( shared_from_this ( ) ) ;
2016-12-07 12:16:06 +00:00
result = BuildResult ( ) ;
2004-05-13 19:14:49 +00:00
}
2020-05-14 14:00:54 +00:00
void DerivationGoal : : started ( ) {
auto msg = fmt (
buildMode = = bmRepair ? " repairing outputs of '%s' " :
buildMode = = bmCheck ? " checking outputs of '%s' " :
nrRounds > 1 ? " building '%s' (round %d/%d) " :
" building '%s' " , worker . store . printStorePath ( drvPath ) , curRound , nrRounds ) ;
fmt ( " building '%s' " , worker . store . printStorePath ( drvPath ) ) ;
if ( hook ) msg + = fmt ( " on '%s' " , machineName ) ;
act = std : : make_unique < Activity > ( * logger , lvlInfo , actBuild , msg ,
Logger : : Fields { worker . store . printStorePath ( drvPath ) , hook ? machineName : " " , curRound , nrRounds } ) ;
mcRunningBuilds = std : : make_unique < MaintainCount < uint64_t > > ( worker . runningBuilds ) ;
worker . updateProgress ( ) ;
}
2004-05-13 19:14:49 +00:00
2005-01-19 11:16:11 +00:00
void DerivationGoal : : tryToBuild ( )
2004-05-13 19:14:49 +00:00
{
2004-06-22 14:48:59 +00:00
trace ( " trying to build " ) ;
2004-06-18 18:09:32 +00:00
2020-08-07 19:09:26 +00:00
/* Obtain locks on all output paths, if the paths are known a priori.
The locks are automatically released when we exit this function or Nix
crashes . If we can ' t acquire the lock , then continue ; hopefully some
other goal can start a build , and if not , the main loop will sleep a few
seconds and then retry this goal . */
2016-06-03 13:45:11 +00:00
PathSet lockFiles ;
2020-08-07 19:09:26 +00:00
/* FIXME: Should lock something like the drv itself so we don't build same
CA drv concurrently */
2020-08-14 17:00:13 +00:00
for ( auto & i : drv - > outputsAndOptPaths ( worker . store ) )
if ( i . second . second )
lockFiles . insert ( worker . store . Store : : toRealPath ( * i . second . second ) ) ;
2016-06-03 13:45:11 +00:00
if ( ! outputLocks . lockPaths ( lockFiles , " " , false ) ) {
2020-06-15 14:03:29 +00:00
if ( ! actLock )
actLock = std : : make_unique < Activity > ( * logger , lvlWarn , actBuildWaiting ,
fmt ( " waiting for lock on %s " , yellowtxt ( showPaths ( lockFiles ) ) ) ) ;
2009-03-23 01:05:54 +00:00
worker . waitForAWhile ( shared_from_this ( ) ) ;
return ;
}
2009-03-22 23:53:05 +00:00
2020-06-15 14:03:29 +00:00
actLock . reset ( ) ;
2009-03-22 23:53:05 +00:00
/* Now check again whether the outputs are valid. This is because
another process may have started building in parallel . After
it has finished and released the locks , we can ( and should )
reuse its results . ( Strictly speaking the first check can be
omitted , but that would be less efficient . ) Note that since we
now hold the locks on the output paths , no other process can
build this derivation , so no further checks are necessary . */
2020-08-11 23:44:02 +00:00
checkPathValidity ( ) ;
2020-08-07 19:09:26 +00:00
bool allValid = true ;
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . wanted ) continue ;
if ( ! status . known | | ! status . known - > isValid ( ) ) {
allValid = false ;
break ;
}
}
if ( buildMode ! = bmCheck & & allValid ) {
2019-12-05 18:11:09 +00:00
debug ( " skipping build of derivation '%s', someone beat us to it " , worker . store . printStorePath ( drvPath ) ) ;
2009-03-22 23:53:05 +00:00
outputLocks . setDeletion ( true ) ;
2015-07-20 01:15:45 +00:00
done ( BuildResult : : AlreadyValid ) ;
2009-03-22 23:53:05 +00:00
return ;
2009-03-18 14:48:42 +00:00
}
2004-06-25 10:21:44 +00:00
2009-03-22 23:53:05 +00:00
/* If any of the outputs already exist but are not valid, delete
them . */
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . known | | status . known - > isValid ( ) ) continue ;
auto storePath = status . known - > path ;
debug ( " removing invalid path '%s' " , worker . store . printStorePath ( status . known - > path ) ) ;
deletePath ( worker . store . Store : : toRealPath ( storePath ) ) ;
2009-03-22 23:53:05 +00:00
}
2009-03-18 14:48:42 +00:00
2010-08-04 12:13:58 +00:00
/* Don't do a remote build if the derivation has the attribute
2014-02-18 00:01:14 +00:00
` preferLocalBuild ' set . Also , check and repair modes are only
supported for local builds . */
2020-08-12 16:32:36 +00:00
bool buildLocally = buildMode ! = bmNormal | | parsedDrv - > willBuildLocally ( worker . store ) ;
2010-08-04 12:13:58 +00:00
2013-06-20 17:26:31 +00:00
if ( ! buildLocally ) {
2010-08-04 12:13:58 +00:00
switch ( tryBuildHook ( ) ) {
case rpAccept :
/* Yes, it has started doing so. Wait until we get
EOF from the hook . */
2020-06-15 14:03:29 +00:00
actLock . reset ( ) ;
2016-12-07 15:09:38 +00:00
result . startTime = time ( 0 ) ; // inexact
2010-08-04 12:13:58 +00:00
state = & DerivationGoal : : buildDone ;
2017-08-15 13:31:59 +00:00
started ( ) ;
2010-08-04 12:13:58 +00:00
return ;
case rpPostpone :
2010-08-25 20:44:28 +00:00
/* Not now; wait until at least one child finishes or
the wake - up timeout expires . */
2020-06-15 14:03:29 +00:00
if ( ! actLock )
actLock = std : : make_unique < Activity > ( * logger , lvlWarn , actBuildWaiting ,
fmt ( " waiting for a machine to build '%s' " , yellowtxt ( worker . store . printStorePath ( drvPath ) ) ) ) ;
2010-08-04 12:13:58 +00:00
worker . waitForAWhile ( shared_from_this ( ) ) ;
outputLocks . unlock ( ) ;
return ;
case rpDecline :
/* We should do it ourselves. */
break ;
}
2009-03-22 23:53:05 +00:00
}
2012-07-27 13:59:18 +00:00
2020-06-15 14:03:29 +00:00
actLock . reset ( ) ;
2020-09-24 21:07:42 +00:00
state = & DerivationGoal : : tryLocalBuild ;
worker . wakeUp ( shared_from_this ( ) ) ;
}
void DerivationGoal : : tryLocalBuild ( ) {
bool buildLocally = buildMode ! = bmNormal | | parsedDrv - > willBuildLocally ( worker . store ) ;
2010-08-04 12:13:58 +00:00
/* Make sure that we are allowed to start a build. If this
derivation prefers to be done locally , do it even if
maxBuildJobs is 0. */
unsigned int curBuilds = worker . getNrLocalBuilds ( ) ;
2013-06-20 17:26:31 +00:00
if ( curBuilds > = settings . maxBuildJobs & & ! ( buildLocally & & curBuilds = = 0 ) ) {
2009-03-22 23:53:05 +00:00
worker . waitForBuildSlot ( shared_from_this ( ) ) ;
outputLocks . unlock ( ) ;
return ;
}
2012-07-27 13:59:18 +00:00
2020-05-14 14:00:54 +00:00
/* If `build-users-group' is not empty, then we have to build as
one of the members of that group . */
if ( settings . buildUsersGroup ! = " " & & getuid ( ) = = 0 ) {
# if defined(__linux__) || defined(__APPLE__)
if ( ! buildUser ) buildUser = std : : make_unique < UserLock > ( ) ;
if ( buildUser - > findFreeUser ( ) ) {
/* Make sure that no other processes are executing under this
uid . */
buildUser - > kill ( ) ;
} else {
2020-06-15 14:03:29 +00:00
if ( ! actLock )
actLock = std : : make_unique < Activity > ( * logger , lvlWarn , actBuildWaiting ,
fmt ( " waiting for UID to build '%s' " , yellowtxt ( worker . store . printStorePath ( drvPath ) ) ) ) ;
2020-05-14 14:00:54 +00:00
worker . waitForAWhile ( shared_from_this ( ) ) ;
return ;
}
# else
/* Don't know how to block the creation of setuid/setgid
binaries on this platform . */
throw Error ( " build users are not supported on this platform for security reasons " ) ;
# endif
}
2020-06-15 14:03:29 +00:00
actLock . reset ( ) ;
2009-03-22 23:53:05 +00:00
try {
2004-06-19 21:45:04 +00:00
2004-06-25 10:21:44 +00:00
/* Okay, we have to build. */
startBuilder ( ) ;
2003-08-01 14:11:19 +00:00
2004-06-25 10:21:44 +00:00
} catch ( BuildError & e ) {
2009-02-16 09:24:20 +00:00
outputLocks . unlock ( ) ;
2017-01-25 11:45:38 +00:00
buildUser . reset ( ) ;
2010-12-13 16:53:23 +00:00
worker . permanentFailure = true ;
2020-06-15 17:25:35 +00:00
done ( BuildResult : : InputRejected , e ) ;
2004-06-19 21:45:04 +00:00
return ;
}
/* This state will be reached when we get EOF on the child's
log pipe . */
2005-01-19 11:16:11 +00:00
state = & DerivationGoal : : buildDone ;
2017-08-15 13:31:59 +00:00
started ( ) ;
2004-06-19 21:45:04 +00:00
}
2020-09-23 19:29:10 +00:00
static void chmod_ ( const Path & path , mode_t mode )
{
if ( chmod ( path . c_str ( ) , mode ) = = - 1 )
throw SysError ( " setting permissions on '%s' " , path ) ;
}
/* Move/rename path 'src' to 'dst'. Temporarily make 'src' writable if
it ' s a directory and we ' re not root ( to be able to update the
directory ' s parent link " .. " ) . */
static void movePath ( const Path & src , const Path & dst )
{
auto st = lstat ( src ) ;
bool changePerm = ( geteuid ( ) & & S_ISDIR ( st . st_mode ) & & ! ( st . st_mode & S_IWUSR ) ) ;
if ( changePerm )
chmod_ ( src , st . st_mode | S_IWUSR ) ;
if ( rename ( src . c_str ( ) , dst . c_str ( ) ) )
throw SysError ( " renaming '%1%' to '%2%' " , src , dst ) ;
if ( changePerm )
chmod_ ( dst , st . st_mode ) ;
}
2020-09-23 18:21:08 +00:00
void replaceValidPath ( const Path & storePath , const Path & tmpPath )
2012-10-02 21:13:46 +00:00
{
/* We can't atomically replace storePath (the original) with
tmpPath ( the replacement ) , so we have to move it out of the
way first . We ' d better not be interrupted here , because if
we ' re repairing ( say ) Glibc , we end up with a broken system . */
2018-03-06 23:34:44 +00:00
Path oldPath = ( format ( " %1%.old-%2%-%3% " ) % storePath % getpid ( ) % random ( ) ) . str ( ) ;
2013-01-03 11:59:23 +00:00
if ( pathExists ( storePath ) )
2020-09-23 19:29:10 +00:00
movePath ( storePath , oldPath ) ;
try {
movePath ( tmpPath , storePath ) ;
} catch ( . . . ) {
try {
// attempt to recover
movePath ( oldPath , storePath ) ;
} catch ( . . . ) {
ignoreException ( ) ;
}
throw ;
2020-01-04 16:47:52 +00:00
}
2020-09-23 19:29:10 +00:00
2016-02-24 16:44:12 +00:00
deletePath ( oldPath ) ;
2012-10-02 21:13:46 +00:00
}
2019-11-10 16:14:26 +00:00
MakeError ( NotDeterministic , BuildError ) ;
2015-11-09 22:16:24 +00:00
2005-01-19 11:16:11 +00:00
void DerivationGoal : : buildDone ( )
2004-06-19 21:45:04 +00:00
{
2004-06-22 14:48:59 +00:00
trace ( " build done " ) ;
2004-06-19 21:45:04 +00:00
2017-01-25 11:45:38 +00:00
/* Release the build user at the end of this function. We don't do
it right away because we don ' t want another build grabbing this
uid and then messing around with our output . */
Finally releaseBuildUser ( [ & ] ( ) { buildUser . reset ( ) ; } ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
sandboxMountNamespace = - 1 ;
2004-06-19 21:45:04 +00:00
/* Since we got an EOF on the logger pipe, the builder is presumed
to have terminated . In fact , the builder could also have
2017-01-19 15:58:39 +00:00
simply have closed its end of the pipe , so just to be sure ,
kill it . */
2017-03-16 09:52:28 +00:00
int status = hook ? hook - > pid . kill ( ) : pid . kill ( ) ;
2004-06-19 21:45:04 +00:00
2019-12-05 18:11:09 +00:00
debug ( " builder process for '%s' finished " , worker . store . printStorePath ( drvPath ) ) ;
2006-12-08 00:19:50 +00:00
2016-12-07 12:16:06 +00:00
result . timesBuilt + + ;
2016-12-07 15:09:38 +00:00
result . stopTime = time ( 0 ) ;
2016-12-07 12:16:06 +00:00
2004-06-19 21:45:04 +00:00
/* So the child is gone now. */
2016-08-30 13:45:39 +00:00
worker . childTerminated ( this ) ;
2012-07-27 13:59:18 +00:00
2006-12-08 00:19:50 +00:00
/* Close the read side of the logger pipe. */
2010-08-30 14:53:03 +00:00
if ( hook ) {
2016-07-11 19:44:44 +00:00
hook - > builderOut . readSide = - 1 ;
hook - > fromHook . readSide = - 1 ;
2019-05-17 20:29:15 +00:00
} else
builderOut . readSide = - 1 ;
2006-12-08 00:19:50 +00:00
/* Close the log file. */
2012-05-30 14:12:29 +00:00
closeLogFile ( ) ;
2006-12-08 00:19:50 +00:00
2005-10-17 17:43:21 +00:00
/* When running under a build user, make sure that all processes
running under that uid are gone . This is to prevent a
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown ' ed to
root . */
2017-01-25 11:45:38 +00:00
if ( buildUser ) buildUser - > kill ( ) ;
2005-10-17 17:43:21 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Terminate the recursive Nix daemon. */
stopDaemon ( ) ;
2014-02-17 13:15:56 +00:00
bool diskFull = false ;
2006-12-08 17:26:21 +00:00
try {
2004-06-19 21:45:04 +00:00
2014-02-17 21:25:15 +00:00
/* Check the exit status. */
if ( ! statusOk ( status ) ) {
2012-07-27 13:59:18 +00:00
2014-02-17 21:25:15 +00:00
/* Heuristically check whether the build failure may have
been caused by a disk full condition . We have no way
of knowing whether the build actually got an ENOSPC .
So instead , check if the disk is ( nearly ) full now . If
so , we don ' t mark this build as a permanent failure . */
# if HAVE_STATVFS
2020-07-30 11:10:49 +00:00
uint64_t required = 8ULL * 1024 * 1024 ; // FIXME: make configurable
2014-02-17 21:25:15 +00:00
struct statvfs st ;
2016-06-03 13:45:11 +00:00
if ( statvfs ( worker . store . realStoreDir . c_str ( ) , & st ) = = 0 & &
2020-07-30 11:10:49 +00:00
( uint64_t ) st . f_bavail * st . f_bsize < required )
2014-02-17 21:25:15 +00:00
diskFull = true ;
if ( statvfs ( tmpDir . c_str ( ) , & st ) = = 0 & &
2020-07-30 11:10:49 +00:00
( uint64_t ) st . f_bavail * st . f_bsize < required )
2014-02-17 21:25:15 +00:00
diskFull = true ;
2006-12-08 00:19:50 +00:00
# endif
2004-06-19 21:45:04 +00:00
2014-02-17 21:25:15 +00:00
deleteTmpDir ( false ) ;
2013-06-13 15:12:06 +00:00
2014-02-17 21:25:15 +00:00
/* Move paths out of the chroot for easier debugging of
build failures . */
2014-02-18 00:01:14 +00:00
if ( useChroot & & buildMode = = bmNormal )
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . known ) continue ;
if ( buildMode ! = bmCheck & & status . known - > isValid ( ) ) continue ;
auto p = worker . store . printStorePath ( status . known - > path ) ;
2019-12-05 18:11:09 +00:00
if ( pathExists ( chrootRootDir + p ) )
rename ( ( chrootRootDir + p ) . c_str ( ) , p . c_str ( ) ) ;
}
2012-07-27 13:59:18 +00:00
2019-12-05 18:11:09 +00:00
auto msg = fmt ( " builder for '%s' %s " ,
2020-06-15 17:25:35 +00:00
yellowtxt ( worker . store . printStorePath ( drvPath ) ) ,
2019-12-05 18:11:09 +00:00
statusToString ( status ) ) ;
2016-04-25 14:47:46 +00:00
2020-06-05 16:20:11 +00:00
if ( ! logger - > isVerbose ( ) & & ! logTail . empty ( ) ) {
2016-04-25 14:47:46 +00:00
msg + = ( format ( " ; last %d log lines: " ) % logTail . size ( ) ) . str ( ) ;
for ( auto & line : logTail )
msg + = " \n " + line ;
}
2014-02-17 13:15:56 +00:00
if ( diskFull )
2016-04-25 14:47:46 +00:00
msg + = " \n note: build failure may have been caused by lack of free disk space " ;
2014-02-18 00:01:14 +00:00
2016-04-25 14:47:46 +00:00
throw BuildError ( msg ) ;
2006-12-08 17:26:21 +00:00
}
2012-07-27 13:59:18 +00:00
2006-12-08 17:26:21 +00:00
/* Compute the FS closure of the outputs and register them as
being valid . */
2014-02-17 21:25:15 +00:00
registerOutputs ( ) ;
2019-07-11 18:23:03 +00:00
if ( settings . postBuildHook ! = " " ) {
Activity act ( * logger , lvlInfo , actPostBuildHook ,
fmt ( " running post-build-hook '%s' " , settings . postBuildHook ) ,
2019-12-05 18:11:09 +00:00
Logger : : Fields { worker . store . printStorePath ( drvPath ) } ) ;
2019-07-11 18:23:03 +00:00
PushActivity pact ( act . id ) ;
2020-08-07 19:09:26 +00:00
StorePathSet outputPaths ;
for ( auto i : drv - > outputs ) {
outputPaths . insert ( finalOutputs . at ( i . first ) ) ;
}
2019-07-11 18:23:03 +00:00
std : : map < std : : string , std : : string > hookEnvironment = getEnv ( ) ;
2019-12-05 18:11:09 +00:00
hookEnvironment . emplace ( " DRV_PATH " , worker . store . printStorePath ( drvPath ) ) ;
hookEnvironment . emplace ( " OUT_PATHS " , chomp ( concatStringsSep ( " " , worker . store . printStorePathSet ( outputPaths ) ) ) ) ;
2019-07-11 18:23:03 +00:00
RunOptions opts ( settings . postBuildHook , { } ) ;
opts . environment = hookEnvironment ;
struct LogSink : Sink {
Activity & act ;
std : : string currentLine ;
LogSink ( Activity & act ) : act ( act ) { }
void operator ( ) ( const unsigned char * data , size_t len ) override {
for ( size_t i = 0 ; i < len ; i + + ) {
auto c = data [ i ] ;
if ( c = = ' \n ' ) {
flushLine ( ) ;
} else {
currentLine + = c ;
}
}
}
void flushLine ( ) {
2020-06-05 16:20:11 +00:00
act . result ( resPostBuildLogLine , currentLine ) ;
2019-07-11 18:23:03 +00:00
currentLine . clear ( ) ;
}
~ LogSink ( ) {
if ( currentLine ! = " " ) {
currentLine + = ' \n ' ;
flushLine ( ) ;
}
}
} ;
LogSink sink ( act ) ;
opts . standardOut = & sink ;
opts . mergeStderrToStdout = true ;
runProgram2 ( opts ) ;
}
2014-02-18 00:01:14 +00:00
if ( buildMode = = bmCheck ) {
2019-02-17 21:26:49 +00:00
deleteTmpDir ( true ) ;
2015-07-20 01:15:45 +00:00
done ( BuildResult : : Built ) ;
2014-02-18 00:01:14 +00:00
return ;
}
/* Delete unused redirected outputs (when doing hash rewriting). */
2015-07-17 17:24:28 +00:00
for ( auto & i : redirectedOutputs )
2020-03-24 13:26:13 +00:00
deletePath ( worker . store . Store : : toRealPath ( i . second ) ) ;
2014-02-18 00:01:14 +00:00
2014-02-17 21:25:15 +00:00
/* Delete the chroot (if we were using one). */
autoDelChroot . reset ( ) ; /* this runs the destructor */
2008-11-12 11:08:27 +00:00
2012-09-18 14:11:42 +00:00
deleteTmpDir ( true ) ;
2015-11-09 22:16:24 +00:00
/* Repeat the build if necessary. */
if ( curRound + + < nrRounds ) {
outputLocks . unlock ( ) ;
state = & DerivationGoal : : tryToBuild ;
worker . wakeUp ( shared_from_this ( ) ) ;
return ;
}
2011-12-25 16:38:37 +00:00
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid ; they will
not create new lock files with the same names as the old
( unlinked ) lock files . */
outputLocks . setDeletion ( true ) ;
outputLocks . unlock ( ) ;
2004-06-25 10:21:44 +00:00
} catch ( BuildError & e ) {
2009-02-16 09:24:20 +00:00
outputLocks . unlock ( ) ;
2009-04-15 06:25:02 +00:00
2015-07-20 01:15:45 +00:00
BuildResult : : Status st = BuildResult : : MiscFailure ;
2016-04-25 14:35:28 +00:00
if ( hook & & WIFEXITED ( status ) & & WEXITSTATUS ( status ) = = 101 )
2015-07-20 01:15:45 +00:00
st = BuildResult : : TimedOut ;
2012-07-27 13:59:18 +00:00
2014-08-17 17:09:03 +00:00
else if ( hook & & ( ! WIFEXITED ( status ) | | WEXITSTATUS ( status ) ! = 100 ) ) {
}
else {
2015-07-20 01:15:45 +00:00
st =
2015-11-09 22:16:24 +00:00
dynamic_cast < NotDeterministic * > ( & e ) ? BuildResult : : NotDeterministic :
2015-07-20 01:15:45 +00:00
statusOk ( status ) ? BuildResult : : OutputRejected :
2020-06-03 17:38:54 +00:00
derivationIsImpure ( derivationType ) | | diskFull ? BuildResult : : TransientFailure :
2015-07-20 01:15:45 +00:00
BuildResult : : PermanentFailure ;
2008-11-12 11:08:27 +00:00
}
2009-03-25 21:05:42 +00:00
2020-06-15 17:25:35 +00:00
done ( st , e ) ;
2004-06-25 10:21:44 +00:00
return ;
}
2004-06-19 21:45:04 +00:00
2015-07-20 01:15:45 +00:00
done ( BuildResult : : Built ) ;
2004-06-19 21:45:04 +00:00
}
2020-08-22 20:44:47 +00:00
void DerivationGoal : : resolvedFinished ( ) {
done ( BuildResult : : Built ) ;
}
2004-06-19 21:45:04 +00:00
2010-08-25 20:44:28 +00:00
HookReply DerivationGoal : : tryBuildHook ( )
2004-06-19 21:45:04 +00:00
{
2017-10-24 09:00:16 +00:00
if ( ! worker . tryBuildHook | | ! useDerivation ) return rpDecline ;
2004-06-19 21:45:04 +00:00
2010-08-25 20:44:28 +00:00
if ( ! worker . hook )
2017-01-19 14:15:09 +00:00
worker . hook = std : : make_unique < HookInstance > ( ) ;
2004-06-19 21:45:04 +00:00
2017-03-03 14:40:06 +00:00
try {
/* Send the request to the hook. */
2017-10-23 18:43:04 +00:00
worker . hook - > sink
< < " try "
< < ( worker . getNrLocalBuilds ( ) < settings . maxBuildJobs ? 1 : 0 )
< < drv - > platform
2019-12-05 18:11:09 +00:00
< < worker . store . printStorePath ( drvPath )
2018-09-28 13:57:27 +00:00
< < parsedDrv - > getRequiredSystemFeatures ( ) ;
2017-10-23 18:43:04 +00:00
worker . hook - > sink . flush ( ) ;
2017-03-03 14:40:06 +00:00
/* Read the first line of input, which should be a word indicating
whether the hook wishes to perform the build . */
string reply ;
while ( true ) {
string s = readLine ( worker . hook - > fromHook . readSide . get ( ) ) ;
2017-10-24 12:47:23 +00:00
if ( handleJSONLogMessage ( s , worker . act , worker . hook - > activities , true ) )
2017-10-24 11:41:52 +00:00
;
else if ( string ( s , 0 , 2 ) = = " # " ) {
2017-03-03 14:40:06 +00:00
reply = string ( s , 2 ) ;
break ;
}
2017-10-24 11:41:52 +00:00
else {
s + = " \n " ;
writeToStderr ( s ) ;
}
2009-03-28 19:29:55 +00:00
}
2004-06-19 21:45:04 +00:00
2020-05-11 21:52:15 +00:00
debug ( " hook reply is '%1%' " , reply ) ;
2017-03-03 14:40:06 +00:00
2017-05-02 10:16:29 +00:00
if ( reply = = " decline " )
return rpDecline ;
else if ( reply = = " decline-permanently " ) {
2017-10-24 09:00:16 +00:00
worker . tryBuildHook = false ;
2017-05-02 10:16:29 +00:00
worker . hook = 0 ;
return rpDecline ;
}
else if ( reply = = " postpone " )
return rpPostpone ;
2017-03-03 14:40:06 +00:00
else if ( reply ! = " accept " )
2019-12-05 18:11:09 +00:00
throw Error ( " bad hook reply '%s' " , reply ) ;
2004-06-19 21:45:04 +00:00
2017-03-03 14:40:06 +00:00
} catch ( SysError & e ) {
if ( e . errNo = = EPIPE ) {
2020-06-02 14:22:24 +00:00
logError ( {
. name = " Build hook died " ,
. hint = hintfmt (
" build hook died unexpectedly: %s " ,
chomp ( drainFD ( worker . hook - > fromHook . readSide . get ( ) ) ) )
2020-05-03 14:01:25 +00:00
} ) ;
2017-03-03 14:40:06 +00:00
worker . hook = 0 ;
return rpDecline ;
} else
throw ;
}
2004-06-19 21:45:04 +00:00
2017-01-19 14:15:09 +00:00
hook = std : : move ( worker . hook ) ;
2012-07-27 13:59:18 +00:00
2017-10-24 12:24:57 +00:00
machineName = readLine ( hook - > fromHook . readSide . get ( ) ) ;
2010-08-25 20:44:28 +00:00
/* Tell the hook all the inputs that have to be copied to the
2017-05-01 13:00:39 +00:00
remote system . */
2019-12-05 18:11:09 +00:00
writeStorePaths ( worker . store , hook - > sink , inputPaths ) ;
2012-07-27 13:59:18 +00:00
2014-02-17 21:58:21 +00:00
/* Tell the hooks the missing outputs that have to be copied back
from the remote system . */
2020-08-07 19:09:26 +00:00
{
StorePathSet missingPaths ;
for ( auto & [ _ , status ] : initialOutputs ) {
if ( ! status . known ) continue ;
if ( buildMode ! = bmCheck & & status . known - > isValid ( ) ) continue ;
missingPaths . insert ( status . known - > path ) ;
}
writeStorePaths ( worker . store , hook - > sink , missingPaths ) ;
}
2012-07-27 13:59:18 +00:00
2017-10-23 18:43:04 +00:00
hook - > sink = FdSink ( ) ;
2016-07-11 19:44:44 +00:00
hook - > toHook . writeSide = - 1 ;
2004-06-19 21:45:04 +00:00
2010-08-25 20:44:28 +00:00
/* Create the log file and pipe. */
Path logFile = openLogFile ( ) ;
2010-08-30 14:53:03 +00:00
set < int > fds ;
2016-07-11 19:44:44 +00:00
fds . insert ( hook - > fromHook . readSide . get ( ) ) ;
fds . insert ( hook - > builderOut . readSide . get ( ) ) ;
2016-04-29 11:57:08 +00:00
worker . childStarted ( shared_from_this ( ) , fds , false , false ) ;
2012-07-27 13:59:18 +00:00
return rpAccept ;
2004-06-19 21:45:04 +00:00
}
2014-12-23 16:25:06 +00:00
int childEntry ( void * arg )
{
( ( DerivationGoal * ) arg ) - > runChild ( ) ;
return 1 ;
}
2019-12-05 18:11:09 +00:00
StorePathSet DerivationGoal : : exportReferences ( const StorePathSet & storePaths )
2017-10-25 12:08:29 +00:00
{
2019-12-05 18:11:09 +00:00
StorePathSet paths ;
2018-04-17 10:03:27 +00:00
2019-12-05 18:11:09 +00:00
for ( auto & storePath : storePaths ) {
2018-04-17 10:03:27 +00:00
if ( ! inputPaths . count ( storePath ) )
2019-12-05 18:11:09 +00:00
throw BuildError ( " cannot export references of path '%s' because it is not in the input closure of the derivation " , worker . store . printStorePath ( storePath ) ) ;
2018-04-17 10:03:27 +00:00
2020-06-16 20:20:18 +00:00
worker . store . computeFSClosure ( { storePath } , paths ) ;
2017-10-25 13:18:49 +00:00
}
2017-10-25 12:08:29 +00:00
/* If there are derivations in the graph, then include their
outputs as well . This is useful if you want to do things
like passing all build - time dependencies of some path to a
derivation that builds a NixOS DVD image . */
2020-06-16 20:20:18 +00:00
auto paths2 = paths ;
2017-10-25 12:08:29 +00:00
for ( auto & j : paths2 ) {
2019-12-05 18:11:09 +00:00
if ( j . isDerivation ( ) ) {
2018-04-17 10:03:27 +00:00
Derivation drv = worker . store . derivationFromPath ( j ) ;
2020-08-14 17:00:13 +00:00
for ( auto & k : drv . outputsAndOptPaths ( worker . store ) ) {
if ( ! k . second . second )
2020-08-07 19:09:26 +00:00
/* FIXME: I am confused why we are calling
` computeFSClosure ` on the output path , rather than
derivation itself . That doesn ' t seem right to me , so I
won ' t try to implemented this for CA derivations . */
2020-09-04 14:48:50 +00:00
throw UnimplementedError ( " exportReferences on CA derivations is not yet implemented " ) ;
2020-08-14 17:00:13 +00:00
worker . store . computeFSClosure ( * k . second . second , paths ) ;
2020-08-07 19:09:26 +00:00
}
2017-10-25 12:08:29 +00:00
}
}
return paths ;
}
2018-01-23 19:13:30 +00:00
static std : : once_flag dns_resolve_flag ;
static void preloadNSS ( ) {
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
one of the glibc NSS libraries in a sandboxed child , which will fail unless the library ' s already
2018-06-12 11:05:14 +00:00
been loaded in the parent . So we force a lookup of an invalid domain to force the NSS machinery to
2018-01-23 19:13:30 +00:00
load its lookup libraries in the parent before any child gets a chance to . */
std : : call_once ( dns_resolve_flag , [ ] ( ) {
2018-06-12 11:05:14 +00:00
struct addrinfo * res = NULL ;
if ( getaddrinfo ( " this.pre-initializes.the.dns.resolvers.invalid. " , " http " , NULL , & res ) ! = 0 ) {
if ( res ) freeaddrinfo ( res ) ;
}
2018-01-23 19:13:30 +00:00
} ) ;
}
2017-10-25 12:08:29 +00:00
2018-10-02 09:22:13 +00:00
void linkOrCopy ( const Path & from , const Path & to )
{
if ( link ( from . c_str ( ) , to . c_str ( ) ) = = - 1 ) {
/* Hard-linking fails if we exceed the maximum link count on a
file ( e . g . 32000 of ext3 ) , which is quite possible after a
' nix - store - - optimise ' . FIXME : actually , why don ' t we just
2020-06-25 10:03:26 +00:00
bind - mount in this case ?
2020-08-22 20:44:47 +00:00
2020-06-25 10:03:26 +00:00
It can also fail with EPERM in BeegFS v7 and earlier versions
which don ' t allow hard - links to other directories */
if ( errno ! = EMLINK & & errno ! = EPERM )
2018-10-02 09:22:13 +00:00
throw SysError ( " linking '%s' to '%s' " , to , from ) ;
copyPath ( from , to ) ;
}
}
2005-01-19 11:16:11 +00:00
void DerivationGoal : : startBuilder ( )
2004-05-11 18:05:44 +00:00
{
2004-05-12 14:20:32 +00:00
/* Right platform? */
2020-08-12 16:32:36 +00:00
if ( ! parsedDrv - > canBuildLocally ( worker . store ) )
2018-09-28 13:57:27 +00:00
throw Error ( " a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s} " ,
drv - > platform ,
concatStringsSep ( " , " , parsedDrv - > getRequiredSystemFeatures ( ) ) ,
2019-12-05 18:11:09 +00:00
worker . store . printStorePath ( drvPath ) ,
2018-09-28 13:57:27 +00:00
settings . thisSystem ,
2020-08-12 16:32:36 +00:00
concatStringsSep < StringSet > ( " , " , worker . store . systemFeatures ) ) ;
2004-05-12 14:20:32 +00:00
2018-01-23 19:13:30 +00:00
if ( drv - > isBuiltin ( ) )
preloadNSS ( ) ;
2016-01-04 15:32:16 +00:00
# if __APPLE__
2018-09-28 12:31:16 +00:00
additionalSandboxProfile = parsedDrv - > getStringAttr ( " __sandboxProfile " ) . value_or ( " " ) ;
2016-01-04 15:32:16 +00:00
# endif
2017-01-25 11:00:28 +00:00
/* Are we doing a chroot build? */
2015-12-02 13:59:07 +00:00
{
2018-09-28 12:31:16 +00:00
auto noChroot = parsedDrv - > getBoolAttr ( " __noChroot " ) ;
2017-04-13 18:53:23 +00:00
if ( settings . sandboxMode = = smEnabled ) {
2018-09-28 10:43:01 +00:00
if ( noChroot )
2019-12-05 18:11:09 +00:00
throw Error ( " derivation '%s' has '__noChroot' set, "
" but that's not allowed when 'sandbox' is 'true' " , worker . store . printStorePath ( drvPath ) ) ;
2016-01-05 12:31:15 +00:00
# if __APPLE__
2016-01-04 15:32:16 +00:00
if ( additionalSandboxProfile ! = " " )
2019-12-05 18:11:09 +00:00
throw Error ( " derivation '%s' specifies a sandbox profile, "
" but this is only allowed when 'sandbox' is 'relaxed' " , worker . store . printStorePath ( drvPath ) ) ;
2016-01-05 12:31:15 +00:00
# endif
2015-12-02 13:59:07 +00:00
useChroot = true ;
}
2017-04-13 18:53:23 +00:00
else if ( settings . sandboxMode = = smDisabled )
2015-12-02 13:59:07 +00:00
useChroot = false ;
2017-04-13 18:53:23 +00:00
else if ( settings . sandboxMode = = smRelaxed )
2020-06-03 17:38:54 +00:00
useChroot = ! ( derivationIsImpure ( derivationType ) ) & & ! noChroot ;
2015-12-02 13:59:07 +00:00
}
2018-01-15 11:14:43 +00:00
if ( worker . store . storeDir ! = worker . store . realStoreDir ) {
# if __linux__
useChroot = true ;
# else
throw Error ( " building using a diverted store is not supported on this platform " ) ;
# endif
}
2016-06-03 13:45:11 +00:00
2004-05-11 18:05:44 +00:00
/* Create a temporary directory where the build will take
place . */
2019-12-05 18:11:09 +00:00
tmpDir = createTempDir ( " " , " nix-build- " + std : : string ( drvPath . name ( ) ) , false , false , 0700 ) ;
2015-12-02 13:59:07 +00:00
2017-01-25 11:00:28 +00:00
chownToBuilder ( tmpDir ) ;
2004-05-11 18:05:44 +00:00
2020-08-07 19:09:26 +00:00
for ( auto & [ outputName , status ] : initialOutputs ) {
/* Set scratch path we'll actually use during the build.
If we ' re not doing a chroot build , but we have some valid
output paths . Since we can ' t just overwrite or delete
them , we have to do hash rewriting : i . e . in the
environment / arguments passed to the build , we replace the
hashes of the valid outputs with unique dummy strings ;
after the build , we discard the redirected outputs
corresponding to the valid outputs , and rewrite the
contents of the new outputs to replace the dummy strings
with the actual hashes . */
auto scratchPath =
! status . known
? makeFallbackPath ( outputName )
: ! needsHashRewrite ( )
/* Can always use original path in sandbox */
? status . known - > path
2020-09-15 15:19:45 +00:00
: ! status . known - > isPresent ( )
2020-08-07 19:09:26 +00:00
/* If path doesn't yet exist can just use it */
? status . known - > path
2020-09-15 15:19:45 +00:00
: buildMode ! = bmRepair & & ! status . known - > isValid ( )
2020-08-07 19:09:26 +00:00
/* If we aren't repairing we'll delete a corrupted path, so we
can use original path */
? status . known - > path
: /* If we are repairing or the path is totally valid, we'll need
to use a temporary path */
makeFallbackPath ( status . known - > path ) ;
scratchOutputs . insert_or_assign ( outputName , scratchPath ) ;
/* A non-removed corrupted path needs to be stored here, too */
2020-09-15 15:19:45 +00:00
if ( buildMode = = bmRepair & & ! status . known - > isValid ( ) )
2020-08-07 19:09:26 +00:00
redirectedBadOutputs . insert ( status . known - > path ) ;
/* Substitute output placeholders with the scratch output paths.
We ' ll use during the build . */
inputRewrites [ hashPlaceholder ( outputName ) ] = worker . store . printStorePath ( scratchPath ) ;
/* Additional tasks if we know the final path a priori. */
if ( ! status . known ) continue ;
auto fixedFinalPath = status . known - > path ;
/* Additional tasks if the final and scratch are both known and
differ . */
if ( fixedFinalPath = = scratchPath ) continue ;
2020-09-04 14:48:50 +00:00
/* Ensure scratch path is ours to use. */
2020-08-07 19:09:26 +00:00
deletePath ( worker . store . printStorePath ( scratchPath ) ) ;
/* Rewrite and unrewrite paths */
{
std : : string h1 { fixedFinalPath . hashPart ( ) } ;
std : : string h2 { scratchPath . hashPart ( ) } ;
inputRewrites [ h1 ] = h2 ;
}
redirectedOutputs . insert_or_assign ( std : : move ( fixedFinalPath ) , std : : move ( scratchPath ) ) ;
}
2016-08-17 13:12:54 +00:00
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
/* Construct the environment passed to the builder. */
initEnv ( ) ;
writeStructuredAttrs ( ) ;
2017-01-24 16:18:11 +00:00
/* Handle exportReferencesGraph(), if set. */
2018-09-28 12:31:16 +00:00
if ( ! parsedDrv - > getStructuredAttrs ( ) ) {
2017-10-25 12:08:29 +00:00
/* The `exportReferencesGraph' feature allows the references graph
to be passed to a builder . This attribute should be a list of
pairs [ name1 path1 name2 path2 . . . ] . The references graph of
each ` pathN ' will be stored in a text file ` nameN ' in the
temporary build directory . The text files have the format used
by ` nix - store - - register - validity ' . However , the deriver
fields are left empty . */
2019-12-05 18:11:09 +00:00
string s = get ( drv - > env , " exportReferencesGraph " ) . value_or ( " " ) ;
2017-10-25 12:08:29 +00:00
Strings ss = tokenizeString < Strings > ( s ) ;
if ( ss . size ( ) % 2 ! = 0 )
2020-04-21 23:07:07 +00:00
throw BuildError ( " odd number of tokens in 'exportReferencesGraph': '%1%' " , s ) ;
2017-10-25 12:08:29 +00:00
for ( Strings : : iterator i = ss . begin ( ) ; i ! = ss . end ( ) ; ) {
string fileName = * i + + ;
2020-01-02 01:50:40 +00:00
static std : : regex regex ( " [A-Za-z_][A-Za-z0-9_.-]* " ) ;
2019-12-05 18:11:09 +00:00
if ( ! std : : regex_match ( fileName , regex ) )
throw Error ( " invalid file name '%s' in 'exportReferencesGraph' " , fileName ) ;
2020-07-07 12:25:43 +00:00
auto storePathS = * i + + ;
if ( ! worker . store . isInStore ( storePathS ) )
throw BuildError ( " 'exportReferencesGraph' contains a non-store path '%1%' " , storePathS ) ;
2020-07-13 14:19:37 +00:00
auto storePath = worker . store . toStorePath ( storePathS ) . first ;
2017-10-25 12:08:29 +00:00
/* Write closure info to <fileName>. */
writeFile ( tmpDir + " / " + fileName ,
worker . store . makeValidityRegistration (
2020-06-16 20:20:18 +00:00
exportReferences ( { storePath } ) , false , false ) ) ;
2017-10-25 12:08:29 +00:00
}
}
2009-03-18 17:36:42 +00:00
2007-10-27 00:46:59 +00:00
if ( useChroot ) {
2015-03-24 10:57:46 +00:00
2015-01-06 06:27:38 +00:00
/* Allow a user-configurable set of directories from the
host file system . */
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
PathSet dirs = settings . sandboxPaths ;
PathSet dirs2 = settings . extraSandboxPaths ;
2015-01-06 06:27:38 +00:00
dirs . insert ( dirs2 . begin ( ) , dirs2 . end ( ) ) ;
2015-11-09 22:16:24 +00:00
dirsInChroot . clear ( ) ;
2016-10-31 16:09:52 +00:00
for ( auto i : dirs ) {
if ( i . empty ( ) ) continue ;
bool optional = false ;
if ( i [ i . size ( ) - 1 ] = = ' ? ' ) {
optional = true ;
i . pop_back ( ) ;
}
2015-01-06 06:27:38 +00:00
size_t p = i . find ( ' = ' ) ;
if ( p = = string : : npos )
2016-10-31 16:09:52 +00:00
dirsInChroot [ i ] = { i , optional } ;
2015-01-06 06:27:38 +00:00
else
2016-10-31 16:09:52 +00:00
dirsInChroot [ string ( i , 0 , p ) ] = { string ( i , p + 1 ) , optional } ;
2015-01-06 06:27:38 +00:00
}
2015-12-02 13:59:07 +00:00
dirsInChroot [ tmpDirInSandbox ] = tmpDir ;
2015-01-06 06:27:38 +00:00
2015-03-24 10:50:22 +00:00
/* Add the closure of store paths to the chroot. */
2019-12-05 18:11:09 +00:00
StorePathSet closure ;
2015-03-24 10:50:22 +00:00
for ( auto & i : dirsInChroot )
2016-09-08 15:30:25 +00:00
try {
2016-10-31 16:09:52 +00:00
if ( worker . store . isInStore ( i . second . source ) )
2020-07-13 14:19:37 +00:00
worker . store . computeFSClosure ( worker . store . toStorePath ( i . second . source ) . first , closure ) ;
2017-05-15 15:26:20 +00:00
} catch ( InvalidPath & e ) {
2016-09-08 15:30:25 +00:00
} catch ( Error & e ) {
2020-04-21 23:07:07 +00:00
throw Error ( " while processing 'sandbox-paths': %s " , e . what ( ) ) ;
2016-09-08 15:30:25 +00:00
}
2019-12-05 18:11:09 +00:00
for ( auto & i : closure ) {
auto p = worker . store . printStorePath ( i ) ;
dirsInChroot . insert_or_assign ( p , p ) ;
}
2015-03-24 10:50:22 +00:00
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
PathSet allowedPaths = settings . allowedImpureHostPrefixes ;
2015-01-06 06:27:38 +00:00
/* This works like the above, except on a per-derivation level */
2018-09-28 12:31:16 +00:00
auto impurePaths = parsedDrv - > getStringsAttr ( " __impureHostDeps " ) . value_or ( Strings ( ) ) ;
2015-01-06 06:27:38 +00:00
for ( auto & i : impurePaths ) {
bool found = false ;
2015-01-13 10:16:32 +00:00
/* Note: we're not resolving symlinks here to prevent
giving a non - root user info about inaccessible
files . */
Path canonI = canonPath ( i ) ;
2015-01-06 06:27:38 +00:00
/* If only we had a trie to do this more efficiently :) luckily, these are generally going to be pretty small */
for ( auto & a : allowedPaths ) {
2015-01-13 10:16:32 +00:00
Path canonA = canonPath ( a ) ;
2015-01-06 06:27:38 +00:00
if ( canonI = = canonA | | isInDir ( canonI , canonA ) ) {
found = true ;
break ;
}
}
if ( ! found )
2019-12-05 18:11:09 +00:00
throw Error ( " derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps " ,
worker . store . printStorePath ( drvPath ) , i ) ;
2015-01-06 06:27:38 +00:00
dirsInChroot [ i ] = i ;
}
2015-12-03 15:30:19 +00:00
# if __linux__
2007-10-27 16:06:38 +00:00
/* Create a temporary directory in which we set up the chroot
2008-12-12 17:14:57 +00:00
environment using bind - mounts . We put it in the Nix store
to ensure that we can create hard - links to non - directory
inputs in the fake Nix store in the chroot ( see below ) . */
2020-03-24 13:26:13 +00:00
chrootRootDir = worker . store . Store : : toRealPath ( drvPath ) + " .chroot " ;
2016-02-24 16:44:12 +00:00
deletePath ( chrootRootDir ) ;
2007-10-27 16:06:38 +00:00
2008-12-11 17:00:12 +00:00
/* Clean up the chroot directory automatically. */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
autoDelChroot = std : : make_shared < AutoDelete > ( chrootRootDir ) ;
2012-07-27 13:59:18 +00:00
2017-07-30 11:27:57 +00:00
printMsg ( lvlChatty , format ( " setting up chroot environment in '%1%' " ) % chrootRootDir ) ;
2007-10-27 00:46:59 +00:00
2015-03-24 10:35:53 +00:00
if ( mkdir ( chrootRootDir . c_str ( ) , 0750 ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot create '%1%' " , chrootRootDir ) ;
2015-03-24 10:35:53 +00:00
2017-01-25 11:45:38 +00:00
if ( buildUser & & chown ( chrootRootDir . c_str ( ) , 0 , buildUser - > getGID ( ) ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot change ownership of '%1%' " , chrootRootDir ) ;
2015-03-24 10:35:53 +00:00
2008-08-27 16:03:03 +00:00
/* Create a writable /tmp in the chroot. Many builders need
this . ( Of course they should really respect $ TMPDIR
instead . ) */
Path chrootTmpDir = chrootRootDir + " /tmp " ;
createDirs ( chrootTmpDir ) ;
2012-08-19 20:32:42 +00:00
chmod_ ( chrootTmpDir , 01777 ) ;
2008-08-27 16:03:03 +00:00
2010-03-11 10:21:23 +00:00
/* Create a /etc/passwd with entries for the build user and the
nobody account . The latter is kind of a hack to support
2010-03-11 15:45:05 +00:00
Samba - in - QEMU . */
2008-12-11 17:44:02 +00:00
createDirs ( chrootRootDir + " /etc " ) ;
2017-05-04 14:57:03 +00:00
writeFile ( chrootRootDir + " /etc/passwd " , fmt (
" root:x:0:0:Nix build user:%3%:/noshell \n "
" nixbld:x:%1%:%2%:Nix build user:%3%:/noshell \n "
" nobody:x:65534:65534:Nobody:/:/noshell \n " ,
2017-05-05 15:45:22 +00:00
sandboxUid , sandboxGid , settings . sandboxBuildDir ) ) ;
2008-12-11 17:44:02 +00:00
2012-07-27 13:59:18 +00:00
/* Declare the build user's group so that programs get a consistent
view of the system ( e . g . , " id -gn " ) . */
2010-03-11 15:45:05 +00:00
writeFile ( chrootRootDir + " /etc/group " ,
2016-12-19 10:52:57 +00:00
( format (
" root:x:0: \n "
" nixbld:!:%1%: \n "
" nogroup:x:65534: \n " ) % sandboxGid ) . str ( ) ) ;
2010-03-11 14:47:04 +00:00
2011-11-21 15:19:51 +00:00
/* Create /etc/hosts with localhost entry. */
2020-06-03 17:38:54 +00:00
if ( ! ( derivationIsImpure ( derivationType ) ) )
2018-08-31 09:12:51 +00:00
writeFile ( chrootRootDir + " /etc/hosts " , " 127.0.0.1 localhost \n ::1 localhost \n " ) ;
2011-11-21 15:19:51 +00:00
2008-12-11 18:57:10 +00:00
/* Make the closure of the inputs available in the chroot,
rather than the whole Nix store . This prevents any access
to undeclared dependencies . Directories are bind - mounted ,
while other inputs are hard - linked ( since only directories
can be bind - mounted ) . ! ! ! As an extra security
precaution , make the fake Nix store only writable by the
build user . */
2016-06-01 12:49:12 +00:00
Path chrootStoreDir = chrootRootDir + worker . store . storeDir ;
2015-03-24 10:35:53 +00:00
createDirs ( chrootStoreDir ) ;
2015-04-07 11:21:26 +00:00
chmod_ ( chrootStoreDir , 01775 ) ;
2015-03-24 10:35:53 +00:00
2017-01-25 11:45:38 +00:00
if ( buildUser & & chown ( chrootStoreDir . c_str ( ) , 0 , buildUser - > getGID ( ) ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot change ownership of '%1%' " , chrootStoreDir ) ;
2008-12-11 18:57:10 +00:00
2015-07-17 17:24:28 +00:00
for ( auto & i : inputPaths ) {
2019-12-05 18:11:09 +00:00
auto p = worker . store . printStorePath ( i ) ;
Path r = worker . store . toRealPath ( p ) ;
2020-09-23 17:17:28 +00:00
if ( S_ISDIR ( lstat ( r ) . st_mode ) )
2019-12-05 18:11:09 +00:00
dirsInChroot . insert_or_assign ( p , r ) ;
2018-10-02 09:22:13 +00:00
else
2019-12-05 18:11:09 +00:00
linkOrCopy ( r , chrootRootDir + p ) ;
2008-12-11 18:57:10 +00:00
}
2012-07-27 13:59:18 +00:00
2015-09-02 12:54:12 +00:00
/* If we're repairing, checking or rebuilding part of a
multiple - outputs derivation , it ' s possible that we ' re
2014-02-17 22:04:52 +00:00
rebuilding a path that is in settings . dirsInChroot
( typically the dependencies of / bin / sh ) . Throw them
out . */
2020-08-14 17:00:13 +00:00
for ( auto & i : drv - > outputsAndOptPaths ( worker . store ) ) {
2020-08-11 22:46:05 +00:00
/* If the name isn't known a priori (i.e. floating
content - addressed derivation ) , the temporary location we use
should be fresh . Freshness means it is impossible that the path
is already in the sandbox , so we don ' t need to worry about
removing it . */
2020-08-14 17:00:13 +00:00
if ( i . second . second )
dirsInChroot . erase ( worker . store . printStorePath ( * i . second . second ) ) ;
2020-08-07 19:09:26 +00:00
}
2012-10-03 15:20:16 +00:00
2015-12-03 15:30:19 +00:00
# elif __APPLE__
2015-01-06 06:27:38 +00:00
/* We don't really have any parent prep work to do (yet?)
All work happens in the child , instead . */
2007-10-27 16:51:55 +00:00
# else
2015-09-29 16:21:10 +00:00
throw Error ( " sandboxing builds is not supported on this platform " ) ;
2007-10-27 16:51:55 +00:00
# endif
2012-09-11 22:39:22 +00:00
}
2020-08-07 19:09:26 +00:00
if ( needsHashRewrite ( ) & & pathExists ( homeDir ) )
throw Error ( " home directory '%1%' exists; please remove it to assure purity of builds without sandboxing " , homeDir ) ;
2012-07-27 13:59:18 +00:00
2017-06-12 14:07:34 +00:00
if ( useChroot & & settings . preBuildHook ! = " " & & dynamic_cast < Derivation * > ( drv . get ( ) ) ) {
2017-07-30 11:27:57 +00:00
printMsg ( lvlChatty , format ( " executing pre-build hook '%1%' " )
2015-04-18 20:56:02 +00:00
% settings . preBuildHook ) ;
2019-12-05 18:11:09 +00:00
auto args = useChroot ? Strings ( { worker . store . printStorePath ( drvPath ) , chrootRootDir } ) :
Strings ( { worker . store . printStorePath ( drvPath ) } ) ;
2015-04-18 20:56:02 +00:00
enum BuildHookState {
stBegin ,
stExtraChrootDirs
} ;
auto state = stBegin ;
auto lines = runProgram ( settings . preBuildHook , false , args ) ;
auto lastPos = std : : string : : size_type { 0 } ;
for ( auto nlPos = lines . find ( ' \n ' ) ; nlPos ! = string : : npos ;
nlPos = lines . find ( ' \n ' , lastPos ) ) {
2015-10-09 15:18:48 +00:00
auto line = std : : string { lines , lastPos , nlPos - lastPos } ;
2015-04-18 20:56:02 +00:00
lastPos = nlPos + 1 ;
if ( state = = stBegin ) {
2015-09-29 16:21:10 +00:00
if ( line = = " extra-sandbox-paths " | | line = = " extra-chroot-dirs " ) {
2015-04-18 20:56:02 +00:00
state = stExtraChrootDirs ;
} else {
2020-04-21 23:07:07 +00:00
throw Error ( " unknown pre-build hook command '%1%' " , line ) ;
2015-04-18 20:56:02 +00:00
}
} else if ( state = = stExtraChrootDirs ) {
if ( line = = " " ) {
state = stBegin ;
} else {
auto p = line . find ( ' = ' ) ;
if ( p = = string : : npos )
dirsInChroot [ line ] = line ;
else
dirsInChroot [ string ( line , 0 , p ) ] = string ( line , p + 1 ) ;
}
}
}
}
2012-07-27 13:59:18 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Fire up a Nix daemon to process recursive Nix calls from the
builder . */
2019-11-04 12:48:58 +00:00
if ( parsedDrv - > getRequiredSystemFeatures ( ) . count ( " recursive-nix " ) )
startDaemon ( ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2004-05-11 18:05:44 +00:00
/* Run the builder. */
2020-05-11 21:52:15 +00:00
printMsg ( lvlChatty , " executing builder '%1%' " , drv - > builder ) ;
2004-05-11 18:05:44 +00:00
2010-08-25 20:44:28 +00:00
/* Create the log file. */
2008-11-12 11:08:27 +00:00
Path logFile = openLogFile ( ) ;
2012-07-27 13:59:18 +00:00
2010-08-30 14:53:03 +00:00
/* Create a pipe to get the output of the builder. */
2019-05-17 20:29:15 +00:00
//builderOut.create();
builderOut . readSide = posix_openpt ( O_RDWR | O_NOCTTY ) ;
if ( ! builderOut . readSide )
throw SysError ( " opening pseudoterminal master " ) ;
std : : string slaveName ( ptsname ( builderOut . readSide . get ( ) ) ) ;
2019-06-16 18:02:40 +00:00
if ( buildUser ) {
if ( chmod ( slaveName . c_str ( ) , 0600 ) )
throw SysError ( " changing mode of pseudoterminal slave " ) ;
2019-05-17 20:29:15 +00:00
2019-06-16 18:02:40 +00:00
if ( chown ( slaveName . c_str ( ) , buildUser - > getUID ( ) , 0 ) )
throw SysError ( " changing owner of pseudoterminal slave " ) ;
2020-04-29 12:39:37 +00:00
}
# if __APPLE__
else {
2019-06-17 06:08:04 +00:00
if ( grantpt ( builderOut . readSide . get ( ) ) )
throw SysError ( " granting access to pseudoterminal slave " ) ;
2019-06-16 18:02:40 +00:00
}
2020-04-29 12:39:37 +00:00
# endif
2019-05-17 20:29:15 +00:00
#if 0
// Mount the pt in the sandbox so that the "tty" command works.
// FIXME: this doesn't work with the new devpts in the sandbox.
if ( useChroot )
dirsInChroot [ slaveName ] = { slaveName , false } ;
# endif
if ( unlockpt ( builderOut . readSide . get ( ) ) )
throw SysError ( " unlocking pseudoterminal " ) ;
builderOut . writeSide = open ( slaveName . c_str ( ) , O_RDWR | O_NOCTTY ) ;
if ( ! builderOut . writeSide )
throw SysError ( " opening pseudoterminal slave " ) ;
// Put the pt into raw mode to prevent \n -> \r\n translation.
struct termios term ;
if ( tcgetattr ( builderOut . writeSide . get ( ) , & term ) )
throw SysError ( " getting pseudoterminal attributes " ) ;
cfmakeraw ( & term ) ;
if ( tcsetattr ( builderOut . writeSide . get ( ) , TCSANOW , & term ) )
throw SysError ( " putting pseudoterminal into raw mode " ) ;
2010-08-25 20:44:28 +00:00
2016-12-07 15:09:38 +00:00
result . startTime = time ( 0 ) ;
2014-08-21 12:08:09 +00:00
/* Fork a child to build the package. */
2017-01-19 15:58:39 +00:00
ProcessOptions options ;
2015-12-03 15:30:19 +00:00
# if __linux__
2014-12-23 16:25:06 +00:00
if ( useChroot ) {
/* Set up private namespaces for the build:
- The PID namespace causes the build to start as PID 1.
Processes outside of the chroot are not visible to those
on the inside , but processes inside the chroot are
visible from the outside ( though with different PIDs ) .
- The private mount namespace ensures that all the bind
mounts we do will only show up in this process and its
children , and will disappear automatically when we ' re
done .
- The private network namespace ensures that the builder
cannot talk to the outside world ( or vice versa ) . It
2015-02-23 14:41:41 +00:00
only has a private loopback interface . ( Fixed - output
derivations are not run in a private network namespace
to allow functions like fetchurl to work . )
2014-12-23 16:25:06 +00:00
- The IPC namespace prevents the builder from communicating
with outside processes using SysV IPC mechanisms ( shared
memory , message queues , semaphores ) . It also ensures
that all IPC objects are destroyed when the builder
exits .
- The UTS namespace ensures that builders see a hostname of
localhost rather than the actual hostname .
We use a helper process to do the clone ( ) to work around
clone ( ) being broken in multi - threaded programs due to
at - fork handlers not being run . Note that we use
CLONE_PARENT to ensure that the real builder is parented to
us .
*/
2016-06-03 13:45:11 +00:00
2020-06-03 17:38:54 +00:00
if ( ! ( derivationIsImpure ( derivationType ) ) )
2016-06-03 13:45:11 +00:00
privateNetwork = true ;
2016-06-09 16:27:39 +00:00
userNamespaceSync . create ( ) ;
2015-03-04 14:08:53 +00:00
options . allowVfork = false ;
2016-06-09 16:27:39 +00:00
2014-12-23 16:25:06 +00:00
Pid helper = startProcess ( [ & ] ( ) {
2016-06-09 16:27:39 +00:00
/* Drop additional groups here because we can't do it
2016-12-19 10:52:57 +00:00
after we ' ve created the new user namespace . FIXME :
this means that if we ' re not root in the parent
namespace , we can ' t drop additional groups ; they will
be mapped to nogroup in the child namespace . There does
not seem to be a workaround for this . ( But who can tell
2016-12-22 16:38:42 +00:00
from reading user_namespaces ( 7 ) ? )
See also https : //lwn.net/Articles/621612/. */
2016-06-09 16:27:39 +00:00
if ( getuid ( ) = = 0 & & setgroups ( 0 , 0 ) = = - 1 )
throw SysError ( " setgroups failed " ) ;
2015-10-21 12:45:56 +00:00
size_t stackSize = 1 * 1024 * 1024 ;
char * stack = ( char * ) mmap ( 0 , stackSize ,
PROT_WRITE | PROT_READ , MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK , - 1 , 0 ) ;
2015-12-29 22:37:51 +00:00
if ( stack = = MAP_FAILED ) throw SysError ( " allocating stack " ) ;
2016-06-09 16:27:39 +00:00
int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD ;
2016-06-03 13:45:11 +00:00
if ( privateNetwork )
flags | = CLONE_NEWNET ;
2016-06-09 16:27:39 +00:00
2015-10-21 12:45:56 +00:00
pid_t child = clone ( childEntry , stack + stackSize , flags , this ) ;
2019-07-25 13:37:57 +00:00
if ( child = = - 1 & & errno = = EINVAL ) {
2015-02-23 14:52:04 +00:00
/* Fallback for Linux < 2.13 where CLONE_NEWPID and
CLONE_PARENT are not allowed together . */
2019-07-25 13:37:57 +00:00
flags & = ~ CLONE_NEWPID ;
child = clone ( childEntry , stack + stackSize , flags , this ) ;
}
if ( child = = - 1 & & ( errno = = EPERM | | errno = = EINVAL ) ) {
2019-09-21 22:53:15 +00:00
/* Some distros patch Linux to not allow unprivileged
2019-07-25 13:37:57 +00:00
* user namespaces . If we get EPERM or EINVAL , try
* without CLONE_NEWUSER and see if that works .
*/
flags & = ~ CLONE_NEWUSER ;
child = clone ( childEntry , stack + stackSize , flags , this ) ;
}
2019-07-25 18:29:58 +00:00
/* Otherwise exit with EPERM so we can handle this in the
parent . This is only done when sandbox - fallback is set
to true ( the default ) . */
if ( child = = - 1 & & ( errno = = EPERM | | errno = = EINVAL ) & & settings . sandboxFallback )
2019-07-30 21:52:42 +00:00
_exit ( 1 ) ;
2015-02-23 14:52:04 +00:00
if ( child = = - 1 ) throw SysError ( " cloning builder process " ) ;
2016-06-09 16:27:39 +00:00
2016-07-11 19:44:44 +00:00
writeFull ( builderOut . writeSide . get ( ) , std : : to_string ( child ) + " \n " ) ;
2014-12-23 16:25:06 +00:00
_exit ( 0 ) ;
2015-03-04 14:08:53 +00:00
} , options ) ;
2016-06-09 16:27:39 +00:00
2019-07-25 18:29:58 +00:00
int res = helper . wait ( ) ;
2019-07-30 21:52:42 +00:00
if ( res ! = 0 & & settings . sandboxFallback ) {
2019-07-25 18:29:58 +00:00
useChroot = false ;
2019-10-12 23:02:57 +00:00
initTmpDir ( ) ;
2019-07-25 18:29:58 +00:00
goto fallback ;
} else if ( res ! = 0 )
2014-12-23 16:25:06 +00:00
throw Error ( " unable to start build process " ) ;
2016-06-09 16:27:39 +00:00
2016-07-11 19:44:44 +00:00
userNamespaceSync . readSide = - 1 ;
2016-06-09 16:27:39 +00:00
2014-12-23 16:25:06 +00:00
pid_t tmp ;
2016-07-11 19:44:44 +00:00
if ( ! string2Int < pid_t > ( readLine ( builderOut . readSide . get ( ) ) , tmp ) ) abort ( ) ;
2014-12-23 16:25:06 +00:00
pid = tmp ;
2016-06-09 16:27:39 +00:00
2016-12-19 10:52:57 +00:00
/* Set the UID/GID mapping of the builder's user namespace
such that the sandbox user maps to the build user , or to
the calling user ( if build users are disabled ) . */
2017-01-25 11:45:38 +00:00
uid_t hostUid = buildUser ? buildUser - > getUID ( ) : getuid ( ) ;
uid_t hostGid = buildUser ? buildUser - > getGID ( ) : getgid ( ) ;
2016-06-09 16:27:39 +00:00
writeFile ( " /proc/ " + std : : to_string ( pid ) + " /uid_map " ,
2016-12-19 10:52:57 +00:00
( format ( " %d %d 1 " ) % sandboxUid % hostUid ) . str ( ) ) ;
2016-06-09 16:27:39 +00:00
writeFile ( " /proc/ " + std : : to_string ( pid ) + " /setgroups " , " deny " ) ;
writeFile ( " /proc/ " + std : : to_string ( pid ) + " /gid_map " ,
2016-12-19 10:52:57 +00:00
( format ( " %d %d 1 " ) % sandboxGid % hostGid ) . str ( ) ) ;
2016-06-09 16:27:39 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Save the mount namespace of the child. We have to do this
* before * the child does a chroot . */
sandboxMountNamespace = open ( fmt ( " /proc/%d/ns/mnt " , ( pid_t ) pid ) . c_str ( ) , O_RDONLY ) ;
if ( sandboxMountNamespace . get ( ) = = - 1 )
throw SysError ( " getting sandbox mount namespace " ) ;
2019-09-21 22:53:15 +00:00
/* Signal the builder that we've updated its user namespace. */
2016-07-11 19:44:44 +00:00
writeFull ( userNamespaceSync . writeSide . get ( ) , " 1 " ) ;
userNamespaceSync . writeSide = - 1 ;
2016-06-09 16:27:39 +00:00
2015-01-06 09:49:44 +00:00
} else
# endif
{
2019-07-25 18:29:58 +00:00
fallback :
2017-01-25 11:45:38 +00:00
options . allowVfork = ! buildUser & & ! drv - > isBuiltin ( ) ;
2014-12-23 16:25:06 +00:00
pid = startProcess ( [ & ] ( ) {
runChild ( ) ;
} , options ) ;
}
2012-06-27 13:52:06 +00:00
2012-06-25 19:45:16 +00:00
/* parent */
pid . setSeparatePG ( true ) ;
2016-07-11 19:44:44 +00:00
builderOut . writeSide = - 1 ;
worker . childStarted ( shared_from_this ( ) , { builderOut . readSide . get ( ) } , true , true ) ;
2004-05-11 18:05:44 +00:00
2014-08-01 17:38:21 +00:00
/* Check if setting up the build environment failed. */
2015-08-03 16:04:32 +00:00
while ( true ) {
2016-07-11 19:44:44 +00:00
string msg = readLine ( builderOut . readSide . get ( ) ) ;
2015-08-03 16:04:32 +00:00
if ( string ( msg , 0 , 1 ) = = " \1 " ) {
if ( msg . size ( ) = = 1 ) break ;
throw Error ( string ( msg , 1 ) ) ;
}
2016-09-21 14:11:01 +00:00
debug ( msg ) ;
2015-08-03 16:04:32 +00:00
}
2012-06-25 19:45:16 +00:00
}
2004-05-11 18:05:44 +00:00
2019-10-12 23:02:57 +00:00
void DerivationGoal : : initTmpDir ( ) {
2019-10-13 20:41:49 +00:00
/* In a sandbox, for determinism, always use the same temporary
directory . */
# if __linux__
tmpDirInSandbox = useChroot ? settings . sandboxBuildDir : tmpDir ;
# else
tmpDirInSandbox = tmpDir ;
# endif
2017-01-25 11:00:28 +00:00
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
/* In non-structured mode, add all bindings specified in the
2017-09-08 09:27:10 +00:00
derivation via the environment , except those listed in the
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
passAsFile attribute . Those are passed as file names pointing
to temporary files containing the contents . Note that
passAsFile is ignored in structure mode because it ' s not
needed ( attributes are not passed through the environment , so
there is no size constraint ) . */
2018-09-28 12:31:16 +00:00
if ( ! parsedDrv - > getStructuredAttrs ( ) ) {
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
2019-12-05 18:11:09 +00:00
StringSet passAsFile = tokenizeString < StringSet > ( get ( drv - > env , " passAsFile " ) . value_or ( " " ) ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
for ( auto & i : drv - > env ) {
if ( passAsFile . find ( i . first ) = = passAsFile . end ( ) ) {
env [ i . first ] = i . second ;
} else {
2020-01-02 17:20:57 +00:00
auto hash = hashString ( htSHA256 , i . first ) ;
2020-01-02 23:41:48 +00:00
string fn = " .attr- " + hash . to_string ( Base32 , false ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
Path p = tmpDir + " / " + fn ;
2020-03-31 11:40:16 +00:00
writeFile ( p , rewriteStrings ( i . second , inputRewrites ) ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
chownToBuilder ( p ) ;
env [ i . first + " Path " ] = tmpDirInSandbox + " / " + fn ;
}
2017-01-25 11:00:28 +00:00
}
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
2017-01-25 11:00:28 +00:00
}
/* For convenience, set an environment pointing to the top build
directory . */
env [ " NIX_BUILD_TOP " ] = tmpDirInSandbox ;
/* Also set TMPDIR and variants to point to this directory. */
env [ " TMPDIR " ] = env [ " TEMPDIR " ] = env [ " TMP " ] = env [ " TEMP " ] = tmpDirInSandbox ;
/* Explicitly set PWD to prevent problems with chroot builds. In
particular , dietlibc cannot figure out the cwd because the
inode of the current directory doesn ' t appear in . . ( because
getdents returns the inode of the mount point ) . */
env [ " PWD " ] = tmpDirInSandbox ;
2019-10-12 23:02:57 +00:00
}
2019-12-05 18:11:09 +00:00
2019-10-12 23:02:57 +00:00
void DerivationGoal : : initEnv ( )
{
env . clear ( ) ;
/* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
PATH is not set . We don ' t want this , so we fill it in with some dummy
value . */
env [ " PATH " ] = " /path-not-set " ;
/* Set HOME to a non-existing path to prevent certain programs from using
/ etc / passwd ( or NIS , or whatever ) to locate the home directory ( for
example , wget looks for ~ / . wgetrc ) . I . e . , these tools use / etc / passwd
if HOME is not set , but they will just assume that the settings file
they are looking for does not exist if HOME is set but points to some
non - existing path . */
env [ " HOME " ] = homeDir ;
/* Tell the builder where the Nix store is. Usually they
shouldn ' t care , but this is useful for purity checking ( e . g . ,
the compiler or linker might only want to accept paths to files
in the store or in the build directory ) . */
env [ " NIX_STORE " ] = worker . store . storeDir ;
/* The maximum number of cores to utilize for parallel building. */
env [ " NIX_BUILD_CORES " ] = ( format ( " %d " ) % settings . buildCores ) . str ( ) ;
initTmpDir ( ) ;
2017-01-25 11:00:28 +00:00
/* Compatibility hack with Nix <= 0.7: if this is a fixed-output
derivation , tell the builder , so that for instance ` fetchurl '
can skip checking the output . On older Nixes , this environment
variable won ' t be set , so ` fetchurl ' will do the check . */
2020-06-03 17:38:54 +00:00
if ( derivationIsFixed ( derivationType ) ) env [ " NIX_OUTPUT_CHECKED " ] = " 1 " ;
2017-01-25 11:00:28 +00:00
/* *Only* if this is a fixed-output derivation, propagate the
values of the environment variables specified in the
` impureEnvVars ' attribute to the builder . This allows for
instance environment variables for proxy configuration such as
` http_proxy ' to be easily passed to downloaders like
` fetchurl ' . Passing such environment variables from the caller
to the builder is generally impure , but the output of
fixed - output derivations is by definition pure ( since we
already know the cryptographic hash of the output ) . */
2020-06-03 17:38:54 +00:00
if ( derivationIsImpure ( derivationType ) ) {
2018-09-28 12:31:16 +00:00
for ( auto & i : parsedDrv - > getStringsAttr ( " impureEnvVars " ) . value_or ( Strings ( ) ) )
2019-11-22 15:06:44 +00:00
env [ i ] = getEnv ( i ) . value_or ( " " ) ;
2017-01-25 11:00:28 +00:00
}
2017-09-08 09:27:10 +00:00
/* Currently structured log messages piggyback on stderr, but we
may change that in the future . So tell the builder which file
descriptor to use for that . */
env [ " NIX_LOG_FD " ] = " 2 " ;
2019-05-19 14:56:08 +00:00
/* Trigger colored output in various tools. */
env [ " TERM " ] = " xterm-256color " ;
2017-01-25 11:00:28 +00:00
}
2018-09-28 10:43:01 +00:00
static std : : regex shVarName ( " [A-Za-z_][A-Za-z0-9_]* " ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
void DerivationGoal : : writeStructuredAttrs ( )
{
2020-03-24 13:26:13 +00:00
auto structuredAttrs = parsedDrv - > getStructuredAttrs ( ) ;
2018-09-28 10:43:01 +00:00
if ( ! structuredAttrs ) return ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
auto json = * structuredAttrs ;
/* Add an "outputs" object containing the output paths. */
nlohmann : : json outputs ;
2020-08-07 19:09:26 +00:00
for ( auto & i : drv - > outputs ) {
/* The placeholder must have a rewrite, so we use it to cover both the
cases where we know or don ' t know the output path ahead of time . */
outputs [ i . first ] = rewriteStrings ( hashPlaceholder ( i . first ) , inputRewrites ) ;
}
2018-09-28 10:43:01 +00:00
json [ " outputs " ] = outputs ;
/* Handle exportReferencesGraph. */
auto e = json . find ( " exportReferencesGraph " ) ;
if ( e ! = json . end ( ) & & e - > is_object ( ) ) {
for ( auto i = e - > begin ( ) ; i ! = e - > end ( ) ; + + i ) {
std : : ostringstream str ;
{
JSONPlaceholder jsonRoot ( str , true ) ;
2019-12-05 18:11:09 +00:00
StorePathSet storePaths ;
2018-09-28 10:43:01 +00:00
for ( auto & p : * i )
2019-12-05 18:11:09 +00:00
storePaths . insert ( worker . store . parseStorePath ( p . get < std : : string > ( ) ) ) ;
2018-09-28 10:43:01 +00:00
worker . store . pathInfoToJSON ( jsonRoot ,
exportReferences ( storePaths ) , false , true ) ;
2017-10-25 11:01:50 +00:00
}
2018-09-28 10:43:01 +00:00
json [ i . key ( ) ] = nlohmann : : json : : parse ( str . str ( ) ) ; // urgh
}
}
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
writeFile ( tmpDir + " /.attrs.json " , rewriteStrings ( json . dump ( ) , inputRewrites ) ) ;
2020-01-23 16:38:07 +00:00
chownToBuilder ( tmpDir + " /.attrs.json " ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
/* As a convenience to bash scripts, write a shell file that
maps all attributes that are representable in bash -
namely , strings , integers , nulls , Booleans , and arrays and
objects consisting entirely of those values . ( So nested
arrays or objects are not supported . ) */
2017-10-25 11:01:50 +00:00
2019-02-12 12:43:32 +00:00
auto handleSimpleType = [ ] ( const nlohmann : : json & value ) - > std : : optional < std : : string > {
2018-09-28 10:43:01 +00:00
if ( value . is_string ( ) )
return shellEscape ( value ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
2018-09-28 10:43:01 +00:00
if ( value . is_number ( ) ) {
auto f = value . get < float > ( ) ;
if ( std : : ceil ( f ) = = f )
return std : : to_string ( value . get < int > ( ) ) ;
}
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
if ( value . is_null ( ) )
return std : : string ( " '' " ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
if ( value . is_boolean ( ) )
return value . get < bool > ( ) ? std : : string ( " 1 " ) : std : : string ( " " ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
return { } ;
} ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
std : : string jsonSh ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
for ( auto i = json . begin ( ) ; i ! = json . end ( ) ; + + i ) {
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
if ( ! std : : regex_match ( i . key ( ) , shVarName ) ) continue ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
auto & value = i . value ( ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
auto s = handleSimpleType ( value ) ;
if ( s )
jsonSh + = fmt ( " declare %s=%s \n " , i . key ( ) , * s ) ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
else if ( value . is_array ( ) ) {
std : : string s2 ;
bool good = true ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
for ( auto i = value . begin ( ) ; i ! = value . end ( ) ; + + i ) {
auto s3 = handleSimpleType ( i . value ( ) ) ;
if ( ! s3 ) { good = false ; break ; }
s2 + = * s3 ; s2 + = ' ' ;
2017-10-25 11:01:50 +00:00
}
2018-09-28 10:43:01 +00:00
if ( good )
jsonSh + = fmt ( " declare -a %s=(%s) \n " , i . key ( ) , s2 ) ;
2017-10-25 11:01:50 +00:00
}
2018-09-28 10:43:01 +00:00
else if ( value . is_object ( ) ) {
std : : string s2 ;
bool good = true ;
2017-10-25 11:01:50 +00:00
2018-09-28 10:43:01 +00:00
for ( auto i = value . begin ( ) ; i ! = value . end ( ) ; + + i ) {
auto s3 = handleSimpleType ( i . value ( ) ) ;
if ( ! s3 ) { good = false ; break ; }
s2 + = fmt ( " [%s]=%s " , shellEscape ( i . key ( ) ) , * s3 ) ;
}
if ( good )
jsonSh + = fmt ( " declare -A %s=(%s) \n " , i . key ( ) , s2 ) ;
}
2017-10-25 11:01:50 +00:00
}
2018-09-28 10:43:01 +00:00
writeFile ( tmpDir + " /.attrs.sh " , rewriteStrings ( jsonSh , inputRewrites ) ) ;
2020-01-23 16:38:07 +00:00
chownToBuilder ( tmpDir + " /.attrs.sh " ) ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
}
2020-09-14 12:04:02 +00:00
struct RestrictedStoreConfig : LocalFSStoreConfig
{
using LocalFSStoreConfig : : LocalFSStoreConfig ;
const std : : string name ( ) { return " Restricted Store " ; }
} ;
Add support for passing structured data to builders
Previously, all derivation attributes had to be coerced into strings
so that they could be passed via the environment. This is lossy
(e.g. lists get flattened, necessitating configureFlags
vs. configureFlagsArray, of which the latter cannot be specified as an
attribute), doesn't support attribute sets at all, and has size
limitations (necessitating hacks like passAsFile).
This patch adds a new mode for passing attributes to builders, namely
encoded as a JSON file ".attrs.json" in the current directory of the
builder. This mode is activated via the special attribute
__structuredAttrs = true;
(The idea is that one day we can set this in stdenv.mkDerivation.)
For example,
stdenv.mkDerivation {
__structuredAttrs = true;
name = "foo";
buildInputs = [ pkgs.hello pkgs.cowsay ];
doCheck = true;
hardening.format = false;
}
results in a ".attrs.json" file containing (sans the indentation):
{
"buildInputs": [],
"builder": "/nix/store/ygl61ycpr2vjqrx775l1r2mw1g2rb754-bash-4.3-p48/bin/bash",
"configureFlags": [
"--with-foo",
"--with-bar=1 2"
],
"doCheck": true,
"hardening": {
"format": false
},
"name": "foo",
"nativeBuildInputs": [
"/nix/store/10h6li26i7g6z3mdpvra09yyf10mmzdr-hello-2.10",
"/nix/store/4jnvjin0r6wp6cv1hdm5jbkx3vinlcvk-cowsay-3.03"
],
"propagatedBuildInputs": [],
"propagatedNativeBuildInputs": [],
"stdenv": "/nix/store/f3hw3p8armnzy6xhd4h8s7anfjrs15n2-stdenv",
"system": "x86_64-linux"
}
"passAsFile" is ignored in this mode because it's not needed - large
strings are included directly in the JSON representation.
It is up to the builder to do something with the JSON
representation. For example, in bash-based builders, lists/attrsets of
string values could be mapped to bash (associative) arrays.
2017-01-25 15:42:07 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* A wrapper around LocalStore that only allows building/querying of
paths that are in the input closures of the build or were added via
recursive Nix calls . */
2020-09-14 12:04:02 +00:00
struct RestrictedStore : public LocalFSStore , public virtual RestrictedStoreConfig
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
ref < LocalStore > next ;
DerivationGoal & goal ;
RestrictedStore ( const Params & params , ref < LocalStore > next , DerivationGoal & goal )
2020-09-14 09:18:45 +00:00
: StoreConfig ( params ) , Store ( params ) , LocalFSStore ( params ) , next ( next ) , goal ( goal )
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ }
Path getRealStoreDir ( ) override
{ return next - > realStoreDir ; }
std : : string getUri ( ) override
{ return next - > getUri ( ) ; }
2019-12-05 18:11:09 +00:00
StorePathSet queryAllValidPaths ( ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
2019-12-05 18:11:09 +00:00
StorePathSet paths ;
2020-06-16 20:20:18 +00:00
for ( auto & p : goal . inputPaths ) paths . insert ( p ) ;
for ( auto & p : goal . addedPaths ) paths . insert ( p ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
return paths ;
}
2019-12-05 18:11:09 +00:00
void queryPathInfoUncached ( const StorePath & path ,
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
Callback < std : : shared_ptr < const ValidPathInfo > > callback ) noexcept override
{
if ( goal . isAllowed ( path ) ) {
try {
/* Censor impure information. */
auto info = std : : make_shared < ValidPathInfo > ( * next - > queryPathInfo ( path ) ) ;
2019-12-05 18:11:09 +00:00
info - > deriver . reset ( ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
info - > registrationTime = 0 ;
info - > ultimate = false ;
info - > sigs . clear ( ) ;
callback ( info ) ;
} catch ( InvalidPath & ) {
callback ( nullptr ) ;
}
} else
callback ( nullptr ) ;
} ;
2019-12-05 18:11:09 +00:00
void queryReferrers ( const StorePath & path , StorePathSet & referrers ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ }
2020-08-20 18:14:12 +00:00
std : : map < std : : string , std : : optional < StorePath > > queryPartialDerivationOutputMap ( const StorePath & path ) override
2020-07-24 21:02:51 +00:00
{
if ( ! goal . isAllowed ( path ) )
throw InvalidPath ( " cannot query output map for unknown path '%s' in recursive Nix " , printStorePath ( path ) ) ;
2020-08-20 18:14:12 +00:00
return next - > queryPartialDerivationOutputMap ( path ) ;
2020-07-24 21:02:51 +00:00
}
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2019-12-05 18:11:09 +00:00
std : : optional < StorePath > queryPathFromHashPart ( const std : : string & hashPart ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ throw Error ( " queryPathFromHashPart " ) ; }
2019-12-05 18:11:09 +00:00
StorePath addToStore ( const string & name , const Path & srcPath ,
2020-05-27 18:04:20 +00:00
FileIngestionMethod method = FileIngestionMethod : : Recursive , HashType hashAlgo = htSHA256 ,
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
PathFilter & filter = defaultPathFilter , RepairFlag repair = NoRepair ) override
2019-11-04 11:55:05 +00:00
{ throw Error ( " addToStore " ) ; }
void addToStore ( const ValidPathInfo & info , Source & narSource ,
2020-07-13 15:37:44 +00:00
RepairFlag repair = NoRepair , CheckSigsFlag checkSigs = CheckSigs ) override
2019-11-04 11:55:05 +00:00
{
2020-07-13 15:37:44 +00:00
next - > addToStore ( info , narSource , repair , checkSigs ) ;
2019-11-04 11:55:05 +00:00
goal . addDependency ( info . path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
}
2019-12-05 18:11:09 +00:00
StorePath addTextToStore ( const string & name , const string & s ,
const StorePathSet & references , RepairFlag repair = NoRepair ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
auto path = next - > addTextToStore ( name , s , references , repair ) ;
goal . addDependency ( path ) ;
return path ;
}
2019-12-05 18:11:09 +00:00
void narFromPath ( const StorePath & path , Sink & sink ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
if ( ! goal . isAllowed ( path ) )
2019-12-05 18:11:09 +00:00
throw InvalidPath ( " cannot dump unknown path '%s' in recursive Nix " , printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
LocalFSStore : : narFromPath ( path , sink ) ;
}
2019-12-05 18:11:09 +00:00
void ensurePath ( const StorePath & path ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
if ( ! goal . isAllowed ( path ) )
2019-12-05 18:11:09 +00:00
throw InvalidPath ( " cannot substitute unknown path '%s' in recursive Nix " , printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Nothing to be done; 'path' must already be valid. */
}
2019-12-05 18:11:09 +00:00
void buildPaths ( const std : : vector < StorePathWithOutputs > & paths , BuildMode buildMode ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
if ( buildMode ! = bmNormal ) throw Error ( " unsupported build mode " ) ;
2019-12-05 18:11:09 +00:00
StorePathSet newPaths ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
for ( auto & path : paths ) {
2020-08-07 19:09:26 +00:00
if ( ! goal . isAllowed ( path . path ) )
2019-12-05 18:11:09 +00:00
throw InvalidPath ( " cannot build unknown path '%s' in recursive Nix " , printStorePath ( path . path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
}
next - > buildPaths ( paths , buildMode ) ;
2020-08-07 19:09:26 +00:00
for ( auto & path : paths ) {
if ( ! path . path . isDerivation ( ) ) continue ;
2020-08-20 18:14:12 +00:00
auto outputs = next - > queryDerivationOutputMap ( path . path ) ;
2020-08-07 19:09:26 +00:00
for ( auto & output : outputs )
if ( wantOutput ( output . first , path . outputs ) )
newPaths . insert ( output . second ) ;
}
2019-12-05 18:11:09 +00:00
StorePathSet closure ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
next - > computeFSClosure ( newPaths , closure ) ;
for ( auto & path : closure )
goal . addDependency ( path ) ;
}
2019-12-05 18:11:09 +00:00
BuildResult buildDerivation ( const StorePath & drvPath , const BasicDerivation & drv ,
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
BuildMode buildMode = bmNormal ) override
{ unsupported ( " buildDerivation " ) ; }
2019-12-05 18:11:09 +00:00
void addTempRoot ( const StorePath & path ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ }
2019-12-05 18:11:09 +00:00
void addIndirectRoot ( const Path & path ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ }
2019-12-05 18:11:09 +00:00
Roots findRoots ( bool censor ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ return Roots ( ) ; }
2019-12-05 18:11:09 +00:00
void collectGarbage ( const GCOptions & options , GCResults & results ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ }
2019-12-05 18:11:09 +00:00
void addSignatures ( const StorePath & storePath , const StringSet & sigs ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{ unsupported ( " addSignatures " ) ; }
2019-12-05 18:11:09 +00:00
void queryMissing ( const std : : vector < StorePathWithOutputs > & targets ,
StorePathSet & willBuild , StorePathSet & willSubstitute , StorePathSet & unknown ,
2020-07-30 11:10:49 +00:00
uint64_t & downloadSize , uint64_t & narSize ) override
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
/* This is slightly impure since it leaks information to the
client about what paths will be built / substituted or are
already present . Probably not a big deal . */
2019-12-05 18:11:09 +00:00
std : : vector < StorePathWithOutputs > allowed ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
for ( auto & path : targets ) {
2019-12-05 18:11:09 +00:00
if ( goal . isAllowed ( path . path ) )
allowed . emplace_back ( path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
else
2020-06-16 20:20:18 +00:00
unknown . insert ( path . path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
}
next - > queryMissing ( allowed , willBuild , willSubstitute ,
unknown , downloadSize , narSize ) ;
}
} ;
void DerivationGoal : : startDaemon ( )
{
2019-11-04 12:48:58 +00:00
settings . requireExperimentalFeature ( " recursive-nix " ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
Store : : Params params ;
params [ " path-info-cache-size " ] = " 0 " ;
params [ " store " ] = worker . store . storeDir ;
params [ " root " ] = worker . store . rootDir ;
params [ " state " ] = " /no-such-path " ;
params [ " log " ] = " /no-such-path " ;
auto store = make_ref < RestrictedStore > ( params ,
ref < LocalStore > ( std : : dynamic_pointer_cast < LocalStore > ( worker . store . shared_from_this ( ) ) ) ,
* this ) ;
addedPaths . clear ( ) ;
auto socketName = " .nix-socket " ;
Path socketPath = tmpDir + " / " + socketName ;
env [ " NIX_REMOTE " ] = " unix:// " + tmpDirInSandbox + " / " + socketName ;
daemonSocket = createUnixDomainSocket ( socketPath , 0600 ) ;
chownToBuilder ( socketPath ) ;
daemonThread = std : : thread ( [ this , store ] ( ) {
while ( true ) {
/* Accept a connection. */
struct sockaddr_un remoteAddr ;
socklen_t remoteAddrLen = sizeof ( remoteAddr ) ;
AutoCloseFD remote = accept ( daemonSocket . get ( ) ,
( struct sockaddr * ) & remoteAddr , & remoteAddrLen ) ;
if ( ! remote ) {
if ( errno = = EINTR ) continue ;
if ( errno = = EINVAL ) break ;
throw SysError ( " accepting connection " ) ;
}
closeOnExec ( remote . get ( ) ) ;
debug ( " received daemon connection " ) ;
2019-12-05 18:11:09 +00:00
auto workerThread = std : : thread ( [ store , remote { std : : move ( remote ) } ] ( ) {
2019-11-04 13:27:28 +00:00
FdSource from ( remote . get ( ) ) ;
FdSink to ( remote . get ( ) ) ;
try {
daemon : : processConnection ( store , from , to ,
2020-08-12 15:14:56 +00:00
daemon : : NotTrusted , daemon : : Recursive ,
[ & ] ( Store & store ) { store . createUser ( " nobody " , 65535 ) ; } ) ;
2019-11-04 13:27:28 +00:00
debug ( " terminated daemon connection " ) ;
} catch ( SysError & ) {
ignoreException ( ) ;
}
} ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2019-11-04 13:27:28 +00:00
daemonWorkerThreads . push_back ( std : : move ( workerThread ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
}
debug ( " daemon shutting down " ) ;
} ) ;
}
void DerivationGoal : : stopDaemon ( )
{
if ( daemonSocket & & shutdown ( daemonSocket . get ( ) , SHUT_RDWR ) = = - 1 )
throw SysError ( " shutting down daemon socket " ) ;
if ( daemonThread . joinable ( ) )
daemonThread . join ( ) ;
2019-11-04 13:27:28 +00:00
// FIXME: should prune worker threads more quickly.
// FIXME: shutdown the client socket to speed up worker termination.
for ( auto & thread : daemonWorkerThreads )
thread . join ( ) ;
daemonWorkerThreads . clear ( ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
daemonSocket = - 1 ;
}
2019-12-05 18:11:09 +00:00
void DerivationGoal : : addDependency ( const StorePath & path )
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
{
if ( isAllowed ( path ) ) return ;
2020-06-16 20:20:18 +00:00
addedPaths . insert ( path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* If we're doing a sandbox build, then we have to make the path
appear in the sandbox . */
if ( useChroot ) {
2019-12-05 18:11:09 +00:00
debug ( " materialising '%s' in the sandbox " , worker . store . printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
# if __linux__
2020-03-24 13:26:13 +00:00
Path source = worker . store . Store : : toRealPath ( path ) ;
2019-12-05 18:11:09 +00:00
Path target = chrootRootDir + worker . store . printStorePath ( path ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
debug ( " bind-mounting %s -> %s " , target , source ) ;
if ( pathExists ( target ) )
2019-12-05 18:11:09 +00:00
throw Error ( " store path '%s' already exists in the sandbox " , worker . store . printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2020-09-23 17:17:28 +00:00
auto st = lstat ( source ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
if ( S_ISDIR ( st . st_mode ) ) {
/* Bind-mount the path into the sandbox. This requires
entering its mount namespace , which is not possible
in multithreaded programs . So we do this in a
child process . */
Pid child ( startProcess ( [ & ] ( ) {
if ( setns ( sandboxMountNamespace . get ( ) , 0 ) = = - 1 )
throw SysError ( " entering sandbox mount namespace " ) ;
createDirs ( target ) ;
if ( mount ( source . c_str ( ) , target . c_str ( ) , " " , MS_BIND , 0 ) = = - 1 )
throw SysError ( " bind mount from '%s' to '%s' failed " , source , target ) ;
_exit ( 0 ) ;
} ) ) ;
int status = child . wait ( ) ;
if ( status ! = 0 )
2019-12-05 18:11:09 +00:00
throw Error ( " could not add path '%s' to sandbox " , worker . store . printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
} else
linkOrCopy ( source , target ) ;
# else
2019-12-05 18:11:09 +00:00
throw Error ( " don't know how to make path '%s' (produced by a recursive Nix call) appear in the sandbox " ,
worker . store . printStorePath ( path ) ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
# endif
}
}
2017-01-25 11:00:28 +00:00
void DerivationGoal : : chownToBuilder ( const Path & path )
{
2017-01-25 11:45:38 +00:00
if ( ! buildUser ) return ;
if ( chown ( path . c_str ( ) , buildUser - > getUID ( ) , buildUser - > getGID ( ) ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot change ownership of '%1%' " , path ) ;
2017-01-25 11:00:28 +00:00
}
2017-05-29 09:34:24 +00:00
void setupSeccomp ( )
{
2018-02-19 14:56:24 +00:00
# if __linux__
2017-10-12 16:21:55 +00:00
if ( ! settings . filterSyscalls ) return ;
2018-02-19 14:56:24 +00:00
# if HAVE_SECCOMP
2017-05-29 09:34:24 +00:00
scmp_filter_ctx ctx ;
if ( ! ( ctx = seccomp_init ( SCMP_ACT_ALLOW ) ) )
throw SysError ( " unable to initialize seccomp mode 2 " ) ;
Finally cleanup ( [ & ] ( ) {
seccomp_release ( ctx ) ;
} ) ;
2019-05-03 08:44:32 +00:00
if ( nativeSystem = = " x86_64-linux " & &
2017-05-29 12:18:36 +00:00
seccomp_arch_add ( ctx , SCMP_ARCH_X86 ) ! = 0 )
2017-05-29 09:34:24 +00:00
throw SysError ( " unable to add 32-bit seccomp architecture " ) ;
2019-05-03 08:44:32 +00:00
if ( nativeSystem = = " x86_64-linux " & &
2017-07-04 17:00:51 +00:00
seccomp_arch_add ( ctx , SCMP_ARCH_X32 ) ! = 0 )
throw SysError ( " unable to add X32 seccomp architecture " ) ;
2019-05-03 08:44:32 +00:00
if ( nativeSystem = = " aarch64-linux " & &
2018-02-20 12:30:03 +00:00
seccomp_arch_add ( ctx , SCMP_ARCH_ARM ) ! = 0 )
2019-05-03 08:44:32 +00:00
printError ( " unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes " ) ;
2018-02-20 12:30:03 +00:00
2017-05-30 11:55:17 +00:00
/* Prevent builders from creating setuid/setgid binaries. */
2017-05-29 09:34:24 +00:00
for ( int perm : { S_ISUID , S_ISGID } ) {
if ( seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( EPERM ) , SCMP_SYS ( chmod ) , 1 ,
2017-05-30 12:37:24 +00:00
SCMP_A1 ( SCMP_CMP_MASKED_EQ , ( scmp_datum_t ) perm , ( scmp_datum_t ) perm ) ) ! = 0 )
2017-05-29 09:34:24 +00:00
throw SysError ( " unable to add seccomp rule " ) ;
if ( seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( EPERM ) , SCMP_SYS ( fchmod ) , 1 ,
2017-05-30 12:37:24 +00:00
SCMP_A1 ( SCMP_CMP_MASKED_EQ , ( scmp_datum_t ) perm , ( scmp_datum_t ) perm ) ) ! = 0 )
2017-05-29 09:34:24 +00:00
throw SysError ( " unable to add seccomp rule " ) ;
if ( seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( EPERM ) , SCMP_SYS ( fchmodat ) , 1 ,
2017-05-30 12:37:24 +00:00
SCMP_A2 ( SCMP_CMP_MASKED_EQ , ( scmp_datum_t ) perm , ( scmp_datum_t ) perm ) ) ! = 0 )
2017-05-29 09:34:24 +00:00
throw SysError ( " unable to add seccomp rule " ) ;
}
2017-05-30 11:55:17 +00:00
/* Prevent builders from creating EAs or ACLs. Not all filesystems
support these , and they ' re not allowed in the Nix store because
they ' re not representable in the NAR serialisation . */
if ( seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( ENOTSUP ) , SCMP_SYS ( setxattr ) , 0 ) ! = 0 | |
seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( ENOTSUP ) , SCMP_SYS ( lsetxattr ) , 0 ) ! = 0 | |
seccomp_rule_add ( ctx , SCMP_ACT_ERRNO ( ENOTSUP ) , SCMP_SYS ( fsetxattr ) , 0 ) ! = 0 )
throw SysError ( " unable to add seccomp rule " ) ;
2017-07-04 13:43:06 +00:00
if ( seccomp_attr_set ( ctx , SCMP_FLTATR_CTL_NNP , settings . allowNewPrivileges ? 0 : 1 ) ! = 0 )
throw SysError ( " unable to set 'no new privileges' seccomp attribute " ) ;
2017-05-29 09:34:24 +00:00
if ( seccomp_load ( ctx ) ! = 0 )
throw SysError ( " unable to load seccomp BPF program " ) ;
2018-02-19 14:56:24 +00:00
# else
2018-02-19 19:46:39 +00:00
throw Error (
" seccomp is not supported on this platform; "
" you can bypass this error by setting the option 'filter-syscalls' to false, but note that untrusted builds can then create setuid binaries! " ) ;
2018-02-19 14:56:24 +00:00
# endif
2017-05-29 09:34:24 +00:00
# endif
}
2014-12-10 16:25:12 +00:00
void DerivationGoal : : runChild ( )
2012-06-25 19:45:16 +00:00
{
/* Warning: in the child we should absolutely not make any SQLite
calls ! */
try { /* child */
2004-05-11 18:05:44 +00:00
2014-08-01 17:29:03 +00:00
commonChildInit ( builderOut ) ;
2017-05-30 10:37:04 +00:00
try {
setupSeccomp ( ) ;
} catch ( . . . ) {
if ( buildUser ) throw ;
}
2017-05-29 09:34:24 +00:00
2016-06-09 16:27:39 +00:00
bool setUser = true ;
2017-02-16 14:42:49 +00:00
/* Make the contents of netrc available to builtin:fetchurl
( which may run under a different uid and / or in a sandbox ) . */
std : : string netrcData ;
try {
if ( drv - > isBuiltin ( ) & & drv - > builder = = " builtin:fetchurl " )
netrcData = readFile ( settings . netrcFile ) ;
} catch ( SysError & ) { }
2015-12-03 15:30:19 +00:00
# if __linux__
2012-06-25 19:45:16 +00:00
if ( useChroot ) {
2014-08-21 12:08:09 +00:00
2016-07-11 19:44:44 +00:00
userNamespaceSync . writeSide = - 1 ;
2016-06-09 16:27:39 +00:00
2016-07-11 19:44:44 +00:00
if ( drainFD ( userNamespaceSync . readSide . get ( ) ) ! = " 1 " )
2016-06-09 16:27:39 +00:00
throw Error ( " user namespace initialisation failed " ) ;
2016-07-11 19:44:44 +00:00
userNamespaceSync . readSide = - 1 ;
2016-06-09 16:27:39 +00:00
2016-06-03 13:45:11 +00:00
if ( privateNetwork ) {
/* Initialise the loopback interface. */
AutoCloseFD fd ( socket ( PF_INET , SOCK_DGRAM , IPPROTO_IP ) ) ;
2016-07-11 19:44:44 +00:00
if ( ! fd ) throw SysError ( " cannot open IP socket " ) ;
2012-07-27 13:59:18 +00:00
2016-06-03 13:45:11 +00:00
struct ifreq ifr ;
strcpy ( ifr . ifr_name , " lo " ) ;
ifr . ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING ;
2016-07-11 19:44:44 +00:00
if ( ioctl ( fd . get ( ) , SIOCSIFFLAGS , & ifr ) = = - 1 )
2016-06-03 13:45:11 +00:00
throw SysError ( " cannot set loopback interface flags " ) ;
}
2012-06-25 19:45:16 +00:00
/* Set the hostname etc. to fixed values. */
char hostname [ ] = " localhost " ;
2014-12-12 16:14:28 +00:00
if ( sethostname ( hostname , sizeof ( hostname ) ) = = - 1 )
throw SysError ( " cannot set host name " ) ;
2012-06-25 19:45:16 +00:00
char domainname [ ] = " (none) " ; // kernel default
2014-12-12 16:14:28 +00:00
if ( setdomainname ( domainname , sizeof ( domainname ) ) = = - 1 )
throw SysError ( " cannot set domain name " ) ;
2012-06-25 19:45:16 +00:00
2012-08-20 19:27:30 +00:00
/* Make all filesystems private. This is necessary
because subtrees may have been mounted as " shared "
( MS_SHARED ) . ( Systemd does this , for instance . ) Even
though we have a private mount namespace , mounting
filesystems on top of a shared subtree still propagates
outside of the namespace . Making a subtree private is
local to the namespace , though , so setting MS_PRIVATE
does not affect the outside world . */
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
if ( mount ( 0 , " / " , 0 , MS_PRIVATE | MS_REC , 0 ) = = - 1 )
throw SysError ( " unable to make '/' private " ) ;
2012-08-20 19:27:30 +00:00
2015-02-13 16:05:49 +00:00
/* Bind-mount chroot directory to itself, to treat it as a
different filesystem from / , as needed for pivot_root . */
if ( mount ( chrootRootDir . c_str ( ) , chrootRootDir . c_str ( ) , 0 , MS_BIND , 0 ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " unable to bind mount '%1%' " , chrootRootDir ) ;
2015-02-13 16:05:49 +00:00
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Bind-mount the sandbox's Nix store onto itself so that
we can mark it as a " shared " subtree , allowing bind
mounts made in * this * mount namespace to be propagated
into the child namespace created by the
unshare ( CLONE_NEWNS ) call below .
Marking chrootRootDir as MS_SHARED causes pivot_root ( )
to fail with EINVAL . Don ' t know why . */
Path chrootStoreDir = chrootRootDir + worker . store . storeDir ;
if ( mount ( chrootStoreDir . c_str ( ) , chrootStoreDir . c_str ( ) , 0 , MS_BIND , 0 ) = = - 1 )
throw SysError ( " unable to bind mount the Nix store " , chrootStoreDir ) ;
if ( mount ( 0 , chrootStoreDir . c_str ( ) , 0 , MS_SHARED , 0 ) = = - 1 )
throw SysError ( " unable to make '%s' shared " , chrootStoreDir ) ;
2014-02-27 22:17:53 +00:00
/* Set up a nearly empty /dev, unless the user asked to
bind - mount the host / dev . */
2015-02-23 14:41:41 +00:00
Strings ss ;
2014-02-27 22:17:53 +00:00
if ( dirsInChroot . find ( " /dev " ) = = dirsInChroot . end ( ) ) {
createDirs ( chrootRootDir + " /dev/shm " ) ;
2014-02-27 22:25:03 +00:00
createDirs ( chrootRootDir + " /dev/pts " ) ;
2014-02-27 22:17:53 +00:00
ss . push_back ( " /dev/full " ) ;
2020-08-12 16:32:36 +00:00
if ( worker . store . systemFeatures . get ( ) . count ( " kvm " ) & & pathExists ( " /dev/kvm " ) )
2014-03-21 12:54:53 +00:00
ss . push_back ( " /dev/kvm " ) ;
2014-02-27 22:17:53 +00:00
ss . push_back ( " /dev/null " ) ;
ss . push_back ( " /dev/random " ) ;
ss . push_back ( " /dev/tty " ) ;
ss . push_back ( " /dev/urandom " ) ;
ss . push_back ( " /dev/zero " ) ;
createSymlink ( " /proc/self/fd " , chrootRootDir + " /dev/fd " ) ;
createSymlink ( " /proc/self/fd/0 " , chrootRootDir + " /dev/stdin " ) ;
createSymlink ( " /proc/self/fd/1 " , chrootRootDir + " /dev/stdout " ) ;
createSymlink ( " /proc/self/fd/2 " , chrootRootDir + " /dev/stderr " ) ;
}
2015-02-23 14:41:41 +00:00
/* Fixed-output derivations typically need to access the
network , so give them access to / etc / resolv . conf and so
on . */
2020-06-03 17:38:54 +00:00
if ( derivationIsImpure ( derivationType ) ) {
2015-02-23 14:41:41 +00:00
ss . push_back ( " /etc/resolv.conf " ) ;
2019-06-27 18:22:53 +00:00
// Only use nss functions to resolve hosts and
// services. Don’ t use it for anything else that may
// be configured for this system. This limits the
2020-05-09 14:58:43 +00:00
// potential impurities introduced in fixed-outputs.
2019-06-27 18:22:53 +00:00
writeFile ( chrootRootDir + " /etc/nsswitch.conf " , " hosts: files dns \n services: files \n " ) ;
2015-02-23 14:41:41 +00:00
ss . push_back ( " /etc/services " ) ;
ss . push_back ( " /etc/hosts " ) ;
2017-04-04 15:40:50 +00:00
if ( pathExists ( " /var/run/nscd/socket " ) )
ss . push_back ( " /var/run/nscd/socket " ) ;
2015-02-23 14:41:41 +00:00
}
2017-04-04 15:54:16 +00:00
for ( auto & i : ss ) dirsInChroot . emplace ( i , i ) ;
2015-02-23 14:41:41 +00:00
2012-06-25 19:45:16 +00:00
/* Bind-mount all the directories from the "host"
filesystem that we want in the chroot
environment . */
2017-05-08 13:42:59 +00:00
auto doBind = [ & ] ( const Path & source , const Path & target , bool optional = false ) {
2020-05-11 21:52:15 +00:00
debug ( " bind mounting '%1%' to '%2%' " , source , target ) ;
2017-05-08 13:42:59 +00:00
struct stat st ;
2016-10-31 16:09:52 +00:00
if ( stat ( source . c_str ( ) , & st ) = = - 1 ) {
2017-05-08 13:42:59 +00:00
if ( optional & & errno = = ENOENT )
return ;
2016-10-31 16:09:52 +00:00
else
2017-07-30 11:27:57 +00:00
throw SysError ( " getting attributes of path '%1%' " , source ) ;
2016-10-31 16:09:52 +00:00
}
2013-07-12 13:35:33 +00:00
if ( S_ISDIR ( st . st_mode ) )
createDirs ( target ) ;
else {
createDirs ( dirOf ( target ) ) ;
writeFile ( target , " " ) ;
}
2016-06-03 13:45:11 +00:00
if ( mount ( source . c_str ( ) , target . c_str ( ) , " " , MS_BIND | MS_REC , 0 ) = = - 1 )
2017-07-30 11:27:57 +00:00
throw SysError ( " bind mount from '%1%' to '%2%' failed " , source , target ) ;
2017-05-08 13:42:59 +00:00
} ;
for ( auto & i : dirsInChroot ) {
if ( i . second . source = = " /proc " ) continue ; // backwards compatibility
doBind ( i . second . source , chrootRootDir + i . first , i . second . optional ) ;
2008-12-11 17:00:12 +00:00
}
2012-06-25 19:45:16 +00:00
2014-10-29 14:49:03 +00:00
/* Bind a new instance of procfs on /proc. */
2012-08-20 19:27:00 +00:00
createDirs ( chrootRootDir + " /proc " ) ;
2012-06-25 19:45:16 +00:00
if ( mount ( " none " , ( chrootRootDir + " /proc " ) . c_str ( ) , " proc " , 0 , 0 ) = = - 1 )
throw SysError ( " mounting /proc " ) ;
2012-06-27 13:52:27 +00:00
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in / dev / shm is cleaned up automatically . */
2016-09-21 14:53:41 +00:00
if ( pathExists ( " /dev/shm " ) & & mount ( " none " , ( chrootRootDir + " /dev/shm " ) . c_str ( ) , " tmpfs " , 0 ,
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
fmt ( " size=%s " , settings . sandboxShmSize ) . c_str ( ) ) = = - 1 )
2014-02-27 22:17:53 +00:00
throw SysError ( " mounting /dev/shm " ) ;
2012-07-27 13:59:18 +00:00
2014-02-27 22:25:03 +00:00
/* Mount a new devpts on /dev/pts. Note that this
requires the kernel to be compiled with
CONFIG_DEVPTS_MULTIPLE_INSTANCES = y ( which is the case
if / dev / ptx / ptmx exists ) . */
2016-06-09 16:27:39 +00:00
if ( pathExists ( " /dev/pts/ptmx " ) & &
2014-02-27 22:25:03 +00:00
! pathExists ( chrootRootDir + " /dev/ptmx " )
2017-03-31 16:12:01 +00:00
& & ! dirsInChroot . count ( " /dev/pts " ) )
2014-02-27 22:25:03 +00:00
{
2017-05-08 13:42:59 +00:00
if ( mount ( " none " , ( chrootRootDir + " /dev/pts " ) . c_str ( ) , " devpts " , 0 , " newinstance,mode=0620 " ) = = 0 )
{
createSymlink ( " /dev/pts/ptmx " , chrootRootDir + " /dev/ptmx " ) ;
2014-04-02 21:41:11 +00:00
2017-05-08 13:42:59 +00:00
/* Make sure /dev/pts/ptmx is world-writable. With some
Linux versions , it is created with permissions 0. */
chmod_ ( chrootRootDir + " /dev/pts/ptmx " , 0666 ) ;
} else {
if ( errno ! = EINVAL )
throw SysError ( " mounting /dev/pts " ) ;
2018-04-20 16:55:04 +00:00
doBind ( " /dev/pts " , chrootRootDir + " /dev/pts " ) ;
doBind ( " /dev/ptmx " , chrootRootDir + " /dev/ptmx " ) ;
2017-05-08 13:42:59 +00:00
}
2014-02-27 22:25:03 +00:00
}
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* Unshare this mount namespace. This is necessary because
pivot_root ( ) below changes the root of the mount
namespace . This means that the call to setns ( ) in
addDependency ( ) would hide the host ' s filesystem ,
making it impossible to bind - mount paths from the host
Nix store into the sandbox . Therefore , we save the
pre - pivot_root namespace in
sandboxMountNamespace . Since we made / nix / store a
shared subtree above , this allows addDependency ( ) to
make paths appear in the sandbox . */
if ( unshare ( CLONE_NEWNS ) = = - 1 )
throw SysError ( " unsharing mount namespace " ) ;
2015-02-13 16:05:49 +00:00
/* Do the chroot(). */
if ( chdir ( chrootRootDir . c_str ( ) ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot change directory to '%1%' " , chrootRootDir ) ;
2015-02-13 16:05:49 +00:00
if ( mkdir ( " real-root " , 0 ) = = - 1 )
throw SysError ( " cannot create real-root directory " ) ;
if ( pivot_root ( " . " , " real-root " ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot pivot old root directory onto '%1%' " , ( chrootRootDir + " /real-root " ) ) ;
2015-02-13 16:05:49 +00:00
if ( chroot ( " . " ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " cannot change root directory to '%1%' " , chrootRootDir ) ;
2015-02-13 16:05:49 +00:00
if ( umount2 ( " real-root " , MNT_DETACH ) = = - 1 )
throw SysError ( " cannot unmount real root filesystem " ) ;
if ( rmdir ( " real-root " ) = = - 1 )
throw SysError ( " cannot remove real-root directory " ) ;
2016-06-09 16:27:39 +00:00
2016-12-19 10:52:57 +00:00
/* Switch to the sandbox uid/gid in the user namespace,
which corresponds to the build user or calling user in
the parent namespace . */
if ( setgid ( sandboxGid ) = = - 1 )
2016-06-09 16:27:39 +00:00
throw SysError ( " setgid failed " ) ;
2016-12-19 10:52:57 +00:00
if ( setuid ( sandboxUid ) = = - 1 )
2016-06-09 16:27:39 +00:00
throw SysError ( " setuid failed " ) ;
setUser = false ;
2012-06-25 19:45:16 +00:00
}
2007-10-27 16:51:55 +00:00
# endif
2012-07-27 13:59:18 +00:00
2015-12-02 13:59:07 +00:00
if ( chdir ( tmpDirInSandbox . c_str ( ) ) = = - 1 )
2020-04-21 23:07:07 +00:00
throw SysError ( " changing into '%1%' " , tmpDir ) ;
2010-08-25 20:44:28 +00:00
2012-06-25 19:45:16 +00:00
/* Close all other file descriptors. */
2017-08-09 14:22:05 +00:00
closeMostFDs ( { STDIN_FILENO , STDOUT_FILENO , STDERR_FILENO } ) ;
2012-04-14 22:20:32 +00:00
2014-09-17 15:21:13 +00:00
# if __linux__
2012-06-25 19:45:16 +00:00
/* Change the personality to 32-bit if we're doing an
i686 - linux build on an x86_64 - linux machine . */
2013-08-22 15:57:39 +00:00
struct utsname utsbuf ;
uname ( & utsbuf ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
if ( drv - > platform = = " i686-linux " & &
2013-08-22 15:57:39 +00:00
( settings . thisSystem = = " x86_64-linux " | |
( ! strcmp ( utsbuf . sysname , " Linux " ) & & ! strcmp ( utsbuf . machine , " x86_64 " ) ) ) ) {
2014-11-14 13:16:20 +00:00
if ( personality ( PER_LINUX32 ) = = - 1 )
2012-06-25 19:45:16 +00:00
throw SysError ( " cannot set i686-linux personality " ) ;
}
2012-04-05 11:03:19 +00:00
2012-06-25 19:45:16 +00:00
/* Impersonate a Linux 2.6 machine to get some determinism in
builds that depend on the kernel version . */
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
if ( ( drv - > platform = = " i686-linux " | | drv - > platform = = " x86_64-linux " ) & & settings . impersonateLinux26 ) {
2012-06-25 19:45:16 +00:00
int cur = personality ( 0xffffffff ) ;
if ( cur ! = - 1 ) personality ( cur | 0x0020000 /* == UNAME26 */ ) ;
}
2014-09-17 15:21:13 +00:00
/* Disable address space randomization for improved
determinism . */
int cur = personality ( 0xffffffff ) ;
if ( cur ! = - 1 ) personality ( cur | ADDR_NO_RANDOMIZE ) ;
2009-01-12 16:30:32 +00:00
# endif
2016-04-14 11:39:14 +00:00
/* Disable core dumps by default. */
struct rlimit limit = { 0 , RLIM_INFINITY } ;
setrlimit ( RLIMIT_CORE , & limit ) ;
// FIXME: set other limits to deterministic values?
2012-06-25 19:45:16 +00:00
/* Fill in the environment. */
Strings envStrs ;
2015-07-17 17:24:28 +00:00
for ( auto & i : env )
2016-08-17 13:12:54 +00:00
envStrs . push_back ( rewriteStrings ( i . first + " = " + i . second , inputRewrites ) ) ;
2012-07-27 13:59:18 +00:00
2012-06-25 19:45:16 +00:00
/* If we are running in `build-users' mode, then switch to the
user we allocated above . Make sure that we drop all root
privileges . Note that above we have closed all file
descriptors except std * , so that ' s safe . Also note that
setuid ( ) when run as root sets the real , effective and
saved UIDs . */
2017-01-25 11:45:38 +00:00
if ( setUser & & buildUser ) {
2015-07-01 12:56:34 +00:00
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as " kvm " . */
2017-01-25 11:45:38 +00:00
if ( ! buildUser - > getSupplementaryGIDs ( ) . empty ( ) & &
setgroups ( buildUser - > getSupplementaryGIDs ( ) . size ( ) ,
buildUser - > getSupplementaryGIDs ( ) . data ( ) ) = = - 1 )
2015-07-01 12:56:34 +00:00
throw SysError ( " cannot set supplementary groups of build user " ) ;
2013-11-14 10:57:37 +00:00
2017-01-25 11:45:38 +00:00
if ( setgid ( buildUser - > getGID ( ) ) = = - 1 | |
getgid ( ) ! = buildUser - > getGID ( ) | |
getegid ( ) ! = buildUser - > getGID ( ) )
2013-11-14 10:57:37 +00:00
throw SysError ( " setgid failed " ) ;
2017-01-25 11:45:38 +00:00
if ( setuid ( buildUser - > getUID ( ) ) = = - 1 | |
getuid ( ) ! = buildUser - > getUID ( ) | |
geteuid ( ) ! = buildUser - > getUID ( ) )
2013-11-14 10:57:37 +00:00
throw SysError ( " setuid failed " ) ;
2012-06-25 19:45:16 +00:00
}
2012-07-27 13:59:18 +00:00
2012-06-25 19:45:16 +00:00
/* Fill in the arguments. */
2014-12-12 14:01:16 +00:00
Strings args ;
2015-01-06 06:27:38 +00:00
const char * builder = " invalid " ;
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
if ( drv - > isBuiltin ( ) ) {
2015-07-20 02:30:16 +00:00
;
2017-05-31 15:23:27 +00:00
}
2015-12-03 15:30:19 +00:00
# if __APPLE__
2020-01-04 23:41:18 +00:00
else {
2017-06-06 16:44:49 +00:00
/* This has to appear before import statements. */
std : : string sandboxProfile = " (version 1) \n " ;
if ( useChroot ) {
/* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */
PathSet ancestry ;
/* We build the ancestry before adding all inputPaths to the store because we know they'll
all have the same parents ( the store ) , and there might be lots of inputs . This isn ' t
particularly efficient . . . I doubt it ' ll be a bottleneck in practice */
for ( auto & i : dirsInChroot ) {
Path cur = i . first ;
while ( cur . compare ( " / " ) ! = 0 ) {
cur = dirOf ( cur ) ;
ancestry . insert ( cur ) ;
}
}
2015-01-06 06:27:38 +00:00
2017-06-06 16:44:49 +00:00
/* And we want the store in there regardless of how empty dirsInChroot. We include the innermost
path component this time , since it ' s typically / nix / store and we care about that . */
Path cur = worker . store . storeDir ;
2015-01-06 06:27:38 +00:00
while ( cur . compare ( " / " ) ! = 0 ) {
ancestry . insert ( cur ) ;
2017-06-06 16:44:49 +00:00
cur = dirOf ( cur ) ;
2015-01-06 06:27:38 +00:00
}
2017-06-06 16:44:49 +00:00
/* Add all our input paths to the chroot */
2019-12-05 18:11:09 +00:00
for ( auto & i : inputPaths ) {
auto p = worker . store . printStorePath ( i ) ;
dirsInChroot [ p ] = p ;
}
2015-01-06 06:27:38 +00:00
2017-06-06 16:44:49 +00:00
/* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */
if ( settings . darwinLogSandboxViolations ) {
sandboxProfile + = " (deny default) \n " ;
} else {
sandboxProfile + = " (deny default (with no-log)) \n " ;
}
2017-05-30 15:16:49 +00:00
2017-10-30 16:25:41 +00:00
sandboxProfile + = " (import \" sandbox-defaults.sb \" ) \n " ;
2017-05-31 15:23:27 +00:00
2020-06-03 17:38:54 +00:00
if ( derivationIsImpure ( derivationType ) )
2017-10-30 16:25:41 +00:00
sandboxProfile + = " (import \" sandbox-network.sb \" ) \n " ;
2015-11-16 13:29:09 +00:00
2020-08-07 19:09:26 +00:00
/* Add the output paths we'll use at build-time to the chroot */
2017-06-06 16:44:49 +00:00
sandboxProfile + = " (allow file-read* file-write* process-exec \n " ;
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , path ] : scratchOutputs )
sandboxProfile + = fmt ( " \t (subpath \" %s \" ) \n " , worker . store . printStorePath ( path ) ) ;
2019-12-05 18:11:09 +00:00
2017-06-06 16:44:49 +00:00
sandboxProfile + = " ) \n " ;
/* Our inputs (transitive dependencies and any impurities computed above)
without file - write * allowed , access ( ) incorrectly returns EPERM
*/
sandboxProfile + = " (allow file-read* file-write* process-exec \n " ;
for ( auto & i : dirsInChroot ) {
if ( i . first ! = i . second . source )
2020-04-21 23:07:07 +00:00
throw Error (
" can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin " ,
i . first , i . second . source ) ;
2017-06-06 16:44:49 +00:00
string path = i . first ;
struct stat st ;
if ( lstat ( path . c_str ( ) , & st ) ) {
if ( i . second . optional & & errno = = ENOENT )
continue ;
2019-12-05 18:11:09 +00:00
throw SysError ( " getting attributes of path '%s " , path ) ;
2017-06-06 16:44:49 +00:00
}
if ( S_ISDIR ( st . st_mode ) )
2019-12-05 18:11:09 +00:00
sandboxProfile + = fmt ( " \t (subpath \" %s \" ) \n " , path ) ;
2017-06-06 16:44:49 +00:00
else
2019-12-05 18:11:09 +00:00
sandboxProfile + = fmt ( " \t (literal \" %s \" ) \n " , path ) ;
2017-06-06 16:44:49 +00:00
}
sandboxProfile + = " ) \n " ;
2015-01-06 06:27:38 +00:00
2017-06-06 16:44:49 +00:00
/* Allow file-read* on full directory hierarchy to self. Allows realpath() */
sandboxProfile + = " (allow file-read* \n " ;
for ( auto & i : ancestry ) {
2019-12-05 18:11:09 +00:00
sandboxProfile + = fmt ( " \t (literal \" %s \" ) \n " , i ) ;
2017-06-06 16:44:49 +00:00
}
sandboxProfile + = " ) \n " ;
2015-01-06 06:27:38 +00:00
2017-06-06 16:44:49 +00:00
sandboxProfile + = additionalSandboxProfile ;
} else
2017-10-30 16:25:41 +00:00
sandboxProfile + = " (import \" sandbox-minimal.sb \" ) \n " ;
2015-11-13 03:00:16 +00:00
2015-08-03 16:04:32 +00:00
debug ( " Generated sandbox profile: " ) ;
debug ( sandboxProfile ) ;
2017-05-31 11:39:27 +00:00
Path sandboxFile = tmpDir + " /.sandbox.sb " ;
2015-11-15 11:08:50 +00:00
writeFile ( sandboxFile , sandboxProfile ) ;
2015-11-13 03:00:16 +00:00
2018-09-28 12:31:16 +00:00
bool allowLocalNetworking = parsedDrv - > getBoolAttr ( " __darwinAllowLocalNetworking " ) ;
2017-10-30 16:25:41 +00:00
2017-06-06 16:44:49 +00:00
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
to find temporary directories , so we want to open up a broader place for them to dump their files , if needed . */
2019-11-22 15:06:44 +00:00
Path globalTmpDir = canonPath ( getEnv ( " TMPDIR " ) . value_or ( " /tmp " ) , true ) ;
2017-06-06 16:44:49 +00:00
/* They don't like trailing slashes on subpath directives */
if ( globalTmpDir . back ( ) = = ' / ' ) globalTmpDir . pop_back ( ) ;
2020-01-04 23:41:18 +00:00
if ( getEnv ( " _NIX_TEST_NO_SANDBOX " ) ! = " 1 " ) {
builder = " /usr/bin/sandbox-exec " ;
args . push_back ( " sandbox-exec " ) ;
args . push_back ( " -f " ) ;
args . push_back ( sandboxFile ) ;
args . push_back ( " -D " ) ;
args . push_back ( " _GLOBAL_TMP_DIR= " + globalTmpDir ) ;
2017-10-30 16:25:41 +00:00
args . push_back ( " -D " ) ;
2020-01-04 23:41:18 +00:00
args . push_back ( " IMPORT_DIR= " + settings . nixDataDir + " /nix/sandbox/ " ) ;
if ( allowLocalNetworking ) {
args . push_back ( " -D " ) ;
args . push_back ( string ( " _ALLOW_LOCAL_NETWORKING=1 " ) ) ;
}
args . push_back ( drv - > builder ) ;
} else {
builder = drv - > builder . c_str ( ) ;
args . push_back ( std : : string ( baseNameOf ( drv - > builder ) ) ) ;
2017-10-30 16:25:41 +00:00
}
2017-05-31 15:23:27 +00:00
}
2020-01-04 23:41:18 +00:00
# else
2017-05-31 15:23:27 +00:00
else {
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
builder = drv - > builder . c_str ( ) ;
2019-12-05 18:11:09 +00:00
args . push_back ( std : : string ( baseNameOf ( drv - > builder ) ) ) ;
2015-01-06 06:27:38 +00:00
}
2020-01-04 23:41:18 +00:00
# endif
2015-01-06 06:27:38 +00:00
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
for ( auto & i : drv - > args )
2016-08-17 13:12:54 +00:00
args . push_back ( rewriteStrings ( i , inputRewrites ) ) ;
2006-12-07 00:42:30 +00:00
2014-08-01 17:38:21 +00:00
/* Indicate that we managed to set up the build environment. */
2015-08-03 16:04:32 +00:00
writeFull ( STDERR_FILENO , string ( " \1 \n " ) ) ;
2015-01-06 06:27:38 +00:00
2012-06-25 19:45:16 +00:00
/* Execute the program. This should not return. */
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
if ( drv - > isBuiltin ( ) ) {
2015-07-20 02:30:16 +00:00
try {
2017-10-24 11:41:52 +00:00
logger = makeJSONLogger ( * logger ) ;
2018-01-19 13:53:34 +00:00
2019-12-05 18:11:09 +00:00
BasicDerivation & drv2 ( * drv ) ;
2018-01-19 13:53:34 +00:00
for ( auto & e : drv2 . env )
e . second = rewriteStrings ( e . second , inputRewrites ) ;
2015-07-20 02:30:16 +00:00
if ( drv - > builder = = " builtin:fetchurl " )
2018-01-19 13:53:34 +00:00
builtinFetchurl ( drv2 , netrcData ) ;
2018-03-20 16:28:09 +00:00
else if ( drv - > builder = = " builtin:buildenv " )
builtinBuildenv ( drv2 ) ;
2019-03-27 22:40:35 +00:00
else if ( drv - > builder = = " builtin:unpack-channel " )
builtinUnpackChannel ( drv2 ) ;
2015-07-20 02:30:16 +00:00
else
2020-04-21 23:07:07 +00:00
throw Error ( " unsupported builtin function '%1%' " , string ( drv - > builder , 8 ) ) ;
2015-07-20 02:30:16 +00:00
_exit ( 0 ) ;
} catch ( std : : exception & e ) {
writeFull ( STDERR_FILENO , " error: " + string ( e . what ( ) ) + " \n " ) ;
_exit ( 1 ) ;
}
}
2015-06-09 08:50:55 +00:00
execve ( builder , stringsToCharPtrs ( args ) . data ( ) , stringsToCharPtrs ( envStrs ) . data ( ) ) ;
2004-05-11 18:05:44 +00:00
2020-04-21 23:07:07 +00:00
throw SysError ( " executing '%1%' " , drv - > builder ) ;
2012-07-27 13:59:18 +00:00
2012-06-25 19:45:16 +00:00
} catch ( std : : exception & e ) {
2015-08-03 16:04:32 +00:00
writeFull ( STDERR_FILENO , " \1 while setting up the build environment: " + string ( e . what ( ) ) + " \n " ) ;
2014-08-01 17:38:21 +00:00
_exit ( 1 ) ;
2008-11-12 11:08:27 +00:00
}
2004-05-13 22:52:37 +00:00
}
2014-02-17 21:25:15 +00:00
void DerivationGoal : : registerOutputs ( )
2004-05-18 14:52:35 +00:00
{
2009-02-02 17:24:10 +00:00
/* When using a build hook, the build hook can register the output
as valid ( by doing ` nix - store - - import ' ) . If so we don ' t have
2020-08-07 19:09:26 +00:00
to do anything here .
We can only early return when the outputs are known a priori . For
floating content - addressed derivations this isn ' t the case .
*/
2010-08-25 20:44:28 +00:00
if ( hook ) {
2009-02-02 17:24:10 +00:00
bool allValid = true ;
2020-08-14 17:00:13 +00:00
for ( auto & i : drv - > outputsAndOptPaths ( worker . store ) ) {
if ( ! i . second . second | | ! worker . store . isValidPath ( * i . second . second ) )
2020-08-07 19:09:26 +00:00
allValid = false ;
}
2009-02-02 17:24:10 +00:00
if ( allValid ) return ;
}
2012-07-27 13:59:18 +00:00
2018-10-22 19:49:56 +00:00
std : : map < std : : string , ValidPathInfo > infos ;
2014-02-18 09:46:30 +00:00
2015-11-09 22:16:24 +00:00
/* Set of inodes seen during calls to canonicalisePathMetaData()
for this build ' s outputs . This needs to be shared between
outputs to allow hard links between outputs . */
InodesSeen inodesSeen ;
2016-12-08 20:38:58 +00:00
Path checkSuffix = " .check " ;
2019-05-10 20:39:31 +00:00
bool keepPreviousRound = settings . keepFailed | | settings . runDiffHook ;
2016-01-12 17:25:57 +00:00
2018-02-03 09:04:29 +00:00
std : : exception_ptr delayedException ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
/* The paths that can be referenced are the input closures, the
output paths , and any paths that have been built via recursive
Nix calls . */
2019-12-05 18:11:09 +00:00
StorePathSet referenceablePaths ;
2020-06-16 20:20:18 +00:00
for ( auto & p : inputPaths ) referenceablePaths . insert ( p ) ;
2020-08-07 19:09:26 +00:00
for ( auto & i : scratchOutputs ) referenceablePaths . insert ( i . second ) ;
2020-06-16 20:20:18 +00:00
for ( auto & p : addedPaths ) referenceablePaths . insert ( p ) ;
Recursive Nix support
This allows Nix builders to call Nix to build derivations, with some
limitations.
Example:
let nixpkgs = fetchTarball channel:nixos-18.03; in
with import <nixpkgs> {};
runCommand "foo"
{
buildInputs = [ nix jq ];
NIX_PATH = "nixpkgs=${nixpkgs}";
}
''
hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "hello-3.5"; })')
$hello/bin/hello
mkdir -p $out/bin
ln -s $hello/bin/hello $out/bin/hello
nix path-info -r --json $hello | jq .
''
This derivation makes a recursive Nix call to build GNU Hello and
symlinks it from its $out, i.e.
# ll ./result/bin/
lrwxrwxrwx 1 root root 63 Jan 1 1970 hello -> /nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5/bin/hello
# nix-store -qR ./result
/nix/store/hwwqshlmazzjzj7yhrkyjydxamvvkfd3-glibc-2.26-131
/nix/store/s0awxrs71gickhaqdwxl506hzccb30y5-hello-3.5
/nix/store/sgmvvyw8vhfqdqb619bxkcpfn9lvd8ss-foo
This is implemented as follows:
* Before running the outer builder, Nix creates a Unix domain socket
'.nix-socket' in the builder's temporary directory and sets
$NIX_REMOTE to point to it. It starts a thread to process
connections to this socket. (Thus you don't need to have nix-daemon
running.)
* The daemon thread uses a wrapper store (RestrictedStore) to keep
track of paths added through recursive Nix calls, to implement some
restrictions (see below), and to do some censorship (e.g. for
purity, queryPathInfo() won't return impure information such as
signatures and timestamps).
* After the build finishes, the output paths are scanned for
references to the paths added through recursive Nix calls (in
addition to the inputs closure). Thus, in the example above, $out
has a reference to $hello.
The main restriction on recursive Nix calls is that they cannot do
arbitrary substitutions. For example, doing
nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
is forbidden unless /nix/store/kmwd... is in the inputs closure or
previously built by a recursive Nix call. This is to prevent
irreproducible derivations that have hidden dependencies on
substituters or the current store contents. Building a derivation is
fine, however, and Nix will use substitutes if available. In other
words, the builder has to present proof that it knows how to build a
desired store path from scratch by constructing a derivation graph for
that path.
Probably we should also disallow instantiating/building fixed-output
derivations (specifically, those that access the network, but
currently we have no way to mark fixed-output derivations that don't
access the network). Otherwise sandboxed derivations can bypass
sandbox restrictions and access the network.
When sandboxing is enabled, we make paths appear in the sandbox of the
builder by entering the mount namespace of the builder and
bind-mounting each path. This is tricky because we do a pivot_root()
in the builder to change the root directory of its mount namespace,
and thus the host /nix/store is not visible in the mount namespace of
the builder. To get around this, just before doing pivot_root(), we
branch a second mount namespace that shares its /nix/store mountpoint
with the parent.
Recursive Nix currently doesn't work on macOS in sandboxed mode
(because we can't change the sandbox policy of a running build) and in
non-root mode (because setns() barfs).
2018-10-02 14:01:26 +00:00
2020-08-07 19:09:26 +00:00
/* FIXME `needsHashRewrite` should probably be removed and we get to the
real reason why we aren ' t using the chroot dir */
auto toRealPathChroot = [ & ] ( const Path & p ) - > Path {
return useChroot & & ! needsHashRewrite ( )
? chrootRootDir + p
: worker . store . toRealPath ( p ) ;
} ;
/* Check whether the output paths were created, and make all
output paths read - only . Then get the references of each output ( that we
might need to register ) , so we can topologically sort them . For the ones
that are most definitely already installed , we just store their final
name so we can also use it in rewrites . */
StringSet outputsToSort ;
2020-08-11 22:34:09 +00:00
struct AlreadyRegistered { StorePath path ; } ;
struct PerhapsNeedToRegister { StorePathSet refs ; } ;
std : : map < std : : string , std : : variant < AlreadyRegistered , PerhapsNeedToRegister > > outputReferencesIfUnregistered ;
2020-08-07 19:09:26 +00:00
std : : map < std : : string , struct stat > outputStats ;
for ( auto & [ outputName , _ ] : drv - > outputs ) {
auto actualPath = toRealPathChroot ( worker . store . printStorePath ( scratchOutputs . at ( outputName ) ) ) ;
outputsToSort . insert ( outputName ) ;
/* Updated wanted info to remove the outputs we definitely don't need to register */
auto & initialInfo = initialOutputs . at ( outputName ) ;
/* Don't register if already valid, and not checking */
initialInfo . wanted = buildMode = = bmCheck
| | ! ( initialInfo . known & & initialInfo . known - > isValid ( ) ) ;
if ( ! initialInfo . wanted ) {
2020-08-11 22:34:09 +00:00
outputReferencesIfUnregistered . insert_or_assign (
outputName ,
AlreadyRegistered { . path = initialInfo . known - > path } ) ;
2020-08-07 19:09:26 +00:00
continue ;
2004-06-25 10:21:44 +00:00
}
2004-05-18 14:52:35 +00:00
2005-10-17 16:59:25 +00:00
struct stat st ;
2014-02-18 00:01:14 +00:00
if ( lstat ( actualPath . c_str ( ) , & st ) = = - 1 ) {
2014-02-17 21:25:15 +00:00
if ( errno = = ENOENT )
throw BuildError (
2020-08-07 19:09:26 +00:00
" builder for '%s' failed to produce output path for output '%s' at '%s' " ,
worker . store . printStorePath ( drvPath ) , outputName , actualPath ) ;
2019-12-05 18:11:09 +00:00
throw SysError ( " getting attributes of path '%s' " , actualPath ) ;
2014-02-17 21:25:15 +00:00
}
# ifndef __CYGWIN__
/* Check that the output is not group or world writable, as
that means that someone else can have interfered with the
build . Also , the output should be owned by the build
user . */
if ( ( ! S_ISLNK ( st . st_mode ) & & ( st . st_mode & ( S_IWGRP | S_IWOTH ) ) ) | |
2017-01-25 11:45:38 +00:00
( buildUser & & st . st_uid ! = buildUser - > getUID ( ) ) )
2020-08-07 19:09:26 +00:00
throw BuildError (
" suspicious ownership or permission on '%s' for output '%s'; rejecting this build output " ,
actualPath , outputName ) ;
2014-02-17 21:25:15 +00:00
# endif
2020-08-07 19:09:26 +00:00
/* Canonicalise first. This ensures that the path we're
rewriting doesn ' t contain a hard link to / etc / shadow or
something like that . */
canonicalisePathMetaData ( actualPath , buildUser ? buildUser - > getUID ( ) : - 1 , inodesSeen ) ;
2014-02-17 21:25:15 +00:00
2020-08-24 18:10:58 +00:00
debug ( " scanning for references for output '%s' in temp location '%s' " , outputName , actualPath ) ;
2014-02-17 21:25:15 +00:00
2020-08-07 19:09:26 +00:00
/* Pass blank Sink as we are not ready to hash data at this stage. */
NullSink blank ;
auto references = worker . store . parseStorePathSet (
scanForReferences ( blank , actualPath , worker . store . printStorePathSet ( referenceablePaths ) ) ) ;
2014-02-17 21:25:15 +00:00
2020-08-11 22:34:09 +00:00
outputReferencesIfUnregistered . insert_or_assign (
outputName ,
PerhapsNeedToRegister { . refs = references } ) ;
2020-08-07 19:09:26 +00:00
outputStats . insert_or_assign ( outputName , std : : move ( st ) ) ;
}
auto sortedOutputNames = topoSort ( outputsToSort ,
{ [ & ] ( const std : : string & name ) {
return std : : visit ( overloaded {
/* Since we'll use the already installed versions of these, we
can treat them as leaves and ignore any references they
have . */
2020-08-11 22:34:09 +00:00
[ & ] ( AlreadyRegistered _ ) { return StringSet { } ; } ,
[ & ] ( PerhapsNeedToRegister refs ) {
2020-08-07 19:09:26 +00:00
StringSet referencedOutputs ;
/* FIXME build inverted map up front so no quadratic waste here */
2020-08-11 22:34:09 +00:00
for ( auto & r : refs . refs )
2020-08-07 19:09:26 +00:00
for ( auto & [ o , p ] : scratchOutputs )
if ( r = = p )
referencedOutputs . insert ( o ) ;
return referencedOutputs ;
} ,
2020-08-11 22:34:09 +00:00
} , outputReferencesIfUnregistered . at ( name ) ) ;
2020-08-07 19:09:26 +00:00
} } ,
{ [ & ] ( const std : : string & path , const std : : string & parent ) {
// TODO with more -vvvv also show the temporary paths for manual inspection.
return BuildError (
" cycle detected in build of '%s' in the references of output '%s' from output '%s' " ,
worker . store . printStorePath ( drvPath ) , path , parent ) ;
} } ) ;
std : : reverse ( sortedOutputNames . begin ( ) , sortedOutputNames . end ( ) ) ;
for ( auto & outputName : sortedOutputNames ) {
auto output = drv - > outputs . at ( outputName ) ;
auto & scratchPath = scratchOutputs . at ( outputName ) ;
2020-08-11 20:49:10 +00:00
auto actualPath = toRealPathChroot ( worker . store . printStorePath ( scratchPath ) ) ;
2020-08-07 19:09:26 +00:00
auto finish = [ & ] ( StorePath finalStorePath ) {
/* Store the final path */
finalOutputs . insert_or_assign ( outputName , finalStorePath ) ;
/* The rewrite rule will be used in downstream outputs that refer to
use . This is why the topological sort is essential to do first
before this for loop . */
if ( scratchPath ! = finalStorePath )
outputRewrites [ std : : string { scratchPath . hashPart ( ) } ] = std : : string { finalStorePath . hashPart ( ) } ;
} ;
std : : optional < StorePathSet > referencesOpt = std : : visit ( overloaded {
2020-08-11 22:34:09 +00:00
[ & ] ( AlreadyRegistered skippedFinalPath ) - > std : : optional < StorePathSet > {
finish ( skippedFinalPath . path ) ;
2020-08-07 19:09:26 +00:00
return std : : nullopt ;
} ,
2020-08-11 22:34:09 +00:00
[ & ] ( PerhapsNeedToRegister r ) - > std : : optional < StorePathSet > {
return r . refs ;
2020-08-07 19:09:26 +00:00
} ,
2020-08-11 22:34:09 +00:00
} , outputReferencesIfUnregistered . at ( outputName ) ) ;
2020-08-07 19:09:26 +00:00
if ( ! referencesOpt )
continue ;
auto references = * referencesOpt ;
auto rewriteOutput = [ & ] ( ) {
/* Apply hash rewriting if necessary. */
if ( ! outputRewrites . empty ( ) ) {
logWarning ( {
. name = " Rewriting hashes " ,
. hint = hintfmt ( " rewriting hashes in '%1%'; cross fingers " , actualPath ) ,
} ) ;
/* FIXME: this is in-memory. */
StringSink sink ;
dumpPath ( actualPath , sink ) ;
deletePath ( actualPath ) ;
sink . s = make_ref < std : : string > ( rewriteStrings ( * sink . s , outputRewrites ) ) ;
StringSource source ( * sink . s ) ;
restorePath ( actualPath , source ) ;
2020-09-23 17:10:16 +00:00
/* FIXME: set proper permissions in restorePath() so
we don ' t have to do another traversal . */
canonicalisePathMetaData ( actualPath , - 1 , inodesSeen ) ;
2020-08-07 19:09:26 +00:00
}
} ;
2012-07-27 13:59:18 +00:00
2020-08-07 19:09:26 +00:00
auto rewriteRefs = [ & ] ( ) - > std : : pair < bool , StorePathSet > {
/* In the CA case, we need the rewritten refs to calculate the
final path , therefore we look for a * non - rewritten
self - reference , and use a bool rather try to solve the
computationally intractable fixed point . */
std : : pair < bool , StorePathSet > res {
false ,
{ } ,
} ;
for ( auto & r : references ) {
auto name = r . name ( ) ;
auto origHash = std : : string { r . hashPart ( ) } ;
if ( r = = scratchPath )
res . first = true ;
else if ( outputRewrites . count ( origHash ) = = 0 )
res . second . insert ( r ) ;
else {
std : : string newRef = outputRewrites . at ( origHash ) ;
newRef + = ' - ' ;
newRef + = name ;
res . second . insert ( StorePath { newRef } ) ;
}
}
return res ;
} ;
2019-12-05 18:11:09 +00:00
2020-08-12 02:23:31 +00:00
auto newInfoFromCA = [ & ] ( const DerivationOutputCAFloating outputHash ) - > ValidPathInfo {
2020-08-07 19:09:26 +00:00
auto & st = outputStats . at ( outputName ) ;
2020-07-08 23:11:39 +00:00
if ( outputHash . method = = FileIngestionMethod : : Flat ) {
2019-10-27 17:45:02 +00:00
/* The output path should be a regular file without execute permission. */
2005-02-22 21:14:41 +00:00
if ( ! S_ISREG ( st . st_mode ) | | ( st . st_mode & S_IXUSR ) ! = 0 )
2006-12-08 17:26:21 +00:00
throw BuildError (
2020-05-11 20:35:30 +00:00
" output path '%1%' should be a non-executable regular file "
" since recursive hashing is not enabled (outputHashMode=flat) " ,
2020-08-07 19:09:26 +00:00
actualPath ) ;
2005-02-22 21:14:41 +00:00
}
2020-08-07 19:09:26 +00:00
rewriteOutput ( ) ;
/* FIXME optimize and deduplicate with addToStore */
std : : string oldHashPart { scratchPath . hashPart ( ) } ;
HashModuloSink caSink { outputHash . hashType , oldHashPart } ;
switch ( outputHash . method ) {
case FileIngestionMethod : : Recursive :
dumpPath ( actualPath , caSink ) ;
break ;
case FileIngestionMethod : : Flat :
readFile ( actualPath , caSink ) ;
break ;
}
auto got = caSink . finish ( ) . first ;
auto refs = rewriteRefs ( ) ;
2020-08-14 17:00:13 +00:00
HashModuloSink narSink { htSHA256 , oldHashPart } ;
dumpPath ( actualPath , narSink ) ;
auto narHashAndSize = narSink . finish ( ) ;
2020-08-07 19:09:26 +00:00
ValidPathInfo newInfo0 {
worker . store . makeFixedOutputPath (
outputHash . method ,
got ,
outputPathName ( drv - > name , outputName ) ,
refs . second ,
refs . first ) ,
2020-08-14 17:00:13 +00:00
narHashAndSize . first ,
2020-08-07 19:09:26 +00:00
} ;
2020-08-14 17:00:13 +00:00
newInfo0 . narSize = narHashAndSize . second ;
2020-08-07 19:09:26 +00:00
newInfo0 . ca = FixedOutputHash {
. method = outputHash . method ,
. hash = got ,
} ;
newInfo0 . references = refs . second ;
if ( refs . first )
newInfo0 . references . insert ( newInfo0 . path ) ;
2018-02-03 09:04:29 +00:00
2020-08-12 02:23:31 +00:00
assert ( newInfo0 . ca ) ;
return newInfo0 ;
} ;
ValidPathInfo newInfo = std : : visit ( overloaded {
[ & ] ( DerivationOutputInputAddressed output ) {
/* input-addressed case */
auto requiredFinalPath = output . path ;
2020-09-23 17:10:16 +00:00
/* Preemptively add rewrite rule for final hash, as that is
2020-08-12 02:23:31 +00:00
what the NAR hash will use rather than normalized - self references */
if ( scratchPath ! = requiredFinalPath )
outputRewrites . insert_or_assign (
std : : string { scratchPath . hashPart ( ) } ,
std : : string { requiredFinalPath . hashPart ( ) } ) ;
rewriteOutput ( ) ;
auto narHashAndSize = hashPath ( htSHA256 , actualPath ) ;
2020-08-14 17:00:13 +00:00
ValidPathInfo newInfo0 { requiredFinalPath , narHashAndSize . first } ;
2020-08-12 02:23:31 +00:00
newInfo0 . narSize = narHashAndSize . second ;
auto refs = rewriteRefs ( ) ;
newInfo0 . references = refs . second ;
if ( refs . first )
newInfo0 . references . insert ( newInfo0 . path ) ;
return newInfo0 ;
} ,
[ & ] ( DerivationOutputCAFixed dof ) {
auto newInfo0 = newInfoFromCA ( DerivationOutputCAFloating {
. method = dof . hash . method ,
. hashType = dof . hash . hash . type ,
} ) ;
/* Check wanted hash */
Hash & wanted = dof . hash . hash ;
assert ( newInfo0 . ca ) ;
auto got = getContentAddressHash ( * newInfo0 . ca ) ;
2020-08-07 19:09:26 +00:00
if ( wanted ! = got ) {
/* Throw an error after registering the path as
valid . */
worker . hashMismatch = true ;
delayedException = std : : make_exception_ptr (
BuildError ( " hash mismatch in fixed-output derivation '%s': \n wanted: %s \n got: %s " ,
worker . store . printStorePath ( drvPath ) ,
wanted . to_string ( SRI , true ) ,
got . to_string ( SRI , true ) ) ) ;
}
2020-08-12 02:23:31 +00:00
return newInfo0 ;
} ,
[ & ] ( DerivationOutputCAFloating dof ) {
return newInfoFromCA ( dof ) ;
} ,
} , output . output ) ;
2020-08-07 19:09:26 +00:00
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are , for now , rather than move to
their usual " final destination " */
auto finalDestPath = worker . store . printStorePath ( newInfo . path ) ;
/* Lock final output path, if not already locked. This happens with
floating CA derivations and hash - mismatching fixed - output
derivations . */
PathLocks dynamicOutputLock ;
2020-09-15 15:21:39 +00:00
auto optFixedPath = output . path ( worker . store , drv - > name , outputName ) ;
2020-08-07 19:09:26 +00:00
if ( ! optFixedPath | |
worker . store . printStorePath ( * optFixedPath ) ! = finalDestPath )
{
assert ( newInfo . ca ) ;
dynamicOutputLock . lockPaths ( { worker . store . toRealPath ( finalDestPath ) } ) ;
}
2018-02-03 09:04:29 +00:00
2020-08-07 19:09:26 +00:00
/* Move files, if needed */
if ( worker . store . toRealPath ( finalDestPath ) ! = actualPath ) {
if ( buildMode = = bmRepair ) {
/* Path already exists, need to replace it */
replaceValidPath ( worker . store . toRealPath ( finalDestPath ) , actualPath ) ;
actualPath = worker . store . toRealPath ( finalDestPath ) ;
} else if ( buildMode = = bmCheck ) {
/* Path already exists, and we want to compare, so we leave out
new path in place . */
2020-08-11 21:25:40 +00:00
} else if ( worker . store . isValidPath ( newInfo . path ) ) {
2020-08-07 19:09:26 +00:00
/* Path already exists because CA path produced by something
else . No moving needed . */
assert ( newInfo . ca ) ;
} else {
2020-09-23 17:10:16 +00:00
auto destPath = worker . store . toRealPath ( finalDestPath ) ;
movePath ( actualPath , destPath ) ;
actualPath = destPath ;
2016-01-31 11:06:45 +00:00
}
2005-01-17 19:01:48 +00:00
}
2014-02-18 00:01:14 +00:00
if ( buildMode = = bmCheck ) {
2020-08-07 19:09:26 +00:00
if ( ! worker . store . isValidPath ( newInfo . path ) ) continue ;
ValidPathInfo oldInfo ( * worker . store . queryPathInfo ( newInfo . path ) ) ;
if ( newInfo . narHash ! = oldInfo . narHash ) {
2019-07-01 22:12:12 +00:00
worker . checkMismatch = true ;
2019-05-11 00:59:39 +00:00
if ( settings . runDiffHook | | settings . keepFailed ) {
2020-09-23 17:10:16 +00:00
auto dst = worker . store . toRealPath ( finalDestPath + checkSuffix ) ;
2016-02-24 16:44:12 +00:00
deletePath ( dst ) ;
2020-09-23 17:10:16 +00:00
movePath ( actualPath , dst ) ;
2019-05-10 20:39:31 +00:00
2019-05-11 00:59:39 +00:00
handleDiffHook (
buildUser ? buildUser - > getUID ( ) : getuid ( ) ,
buildUser ? buildUser - > getGID ( ) : getgid ( ) ,
2020-08-07 19:09:26 +00:00
finalDestPath , dst , worker . store . printStorePath ( drvPath ) , tmpDir ) ;
2019-05-11 00:59:39 +00:00
2019-12-05 18:11:09 +00:00
throw NotDeterministic ( " derivation '%s' may not be deterministic: output '%s' differs from '%s' " ,
2020-08-07 19:09:26 +00:00
worker . store . printStorePath ( drvPath ) , worker . store . toRealPath ( finalDestPath ) , dst ) ;
2016-01-12 15:44:26 +00:00
} else
2019-12-05 18:11:09 +00:00
throw NotDeterministic ( " derivation '%s' may not be deterministic: output '%s' differs " ,
2020-08-07 19:09:26 +00:00
worker . store . printStorePath ( drvPath ) , worker . store . toRealPath ( finalDestPath ) ) ;
2016-01-12 15:44:26 +00:00
}
2016-03-30 15:35:48 +00:00
2019-10-27 17:45:02 +00:00
/* Since we verified the build, it's now ultimately trusted. */
2020-08-07 19:09:26 +00:00
if ( ! oldInfo . ultimate ) {
oldInfo . ultimate = true ;
worker . store . signPathInfo ( oldInfo ) ;
worker . store . registerValidPaths ( { std : : move ( oldInfo ) } ) ;
2016-03-30 15:35:48 +00:00
}
2014-02-18 00:01:14 +00:00
continue ;
}
2019-10-27 17:45:02 +00:00
/* For debugging, print out the referenced and unreferenced paths. */
2015-07-17 17:24:28 +00:00
for ( auto & i : inputPaths ) {
2019-12-05 18:11:09 +00:00
auto j = references . find ( i ) ;
2006-11-13 18:19:05 +00:00
if ( j = = references . end ( ) )
2019-12-05 18:11:09 +00:00
debug ( " unreferenced input: '%1%' " , worker . store . printStorePath ( i ) ) ;
2006-11-13 18:19:05 +00:00
else
2019-12-05 18:11:09 +00:00
debug ( " referenced input: '%1%' " , worker . store . printStorePath ( i ) ) ;
2004-05-13 19:14:49 +00:00
}
2016-01-12 17:25:57 +00:00
if ( curRound = = nrRounds ) {
2016-06-03 13:45:11 +00:00
worker . store . optimisePath ( actualPath ) ; // FIXME: combine with scanForReferences()
2020-08-07 19:09:26 +00:00
worker . markContentsGood ( newInfo . path ) ;
2016-01-12 17:25:57 +00:00
}
2014-02-18 09:46:30 +00:00
2020-08-07 19:09:26 +00:00
newInfo . deriver = drvPath ;
newInfo . ultimate = true ;
worker . store . signPathInfo ( newInfo ) ;
finish ( newInfo . path ) ;
2018-05-03 12:48:28 +00:00
2020-08-07 19:09:26 +00:00
/* If it's a CA path, register it right away. This is necessary if it
isn ' t statically known so that we can safely unlock the path before
the next iteration */
if ( newInfo . ca )
worker . store . registerValidPaths ( { newInfo } ) ;
infos . emplace ( outputName , std : : move ( newInfo ) ) ;
2004-05-18 14:52:35 +00:00
}
2004-05-13 19:14:49 +00:00
2014-02-18 00:01:14 +00:00
if ( buildMode = = bmCheck ) return ;
2018-10-22 19:49:56 +00:00
/* Apply output checks. */
checkOutputs ( infos ) ;
2016-01-12 17:25:57 +00:00
/* Compare the result with the previous round, and report which
path is different , if any . */
if ( curRound > 1 & & prevInfos ! = infos ) {
assert ( prevInfos . size ( ) = = infos . size ( ) ) ;
for ( auto i = prevInfos . begin ( ) , j = infos . begin ( ) ; i ! = prevInfos . end ( ) ; + + i , + + j )
if ( ! ( * i = = * j ) ) {
2016-12-07 12:16:06 +00:00
result . isNonDeterministic = true ;
2019-12-05 18:11:09 +00:00
Path prev = worker . store . printStorePath ( i - > second . path ) + checkSuffix ;
2016-12-07 16:57:35 +00:00
bool prevExists = keepPreviousRound & & pathExists ( prev ) ;
2020-05-03 14:01:25 +00:00
hintformat hint = prevExists
? hintfmt ( " output '%s' of '%s' differs from '%s' from previous round " ,
2019-12-05 18:11:09 +00:00
worker . store . printStorePath ( i - > second . path ) , worker . store . printStorePath ( drvPath ) , prev )
2020-05-03 14:01:25 +00:00
: hintfmt ( " output '%s' of '%s' differs from previous round " ,
2019-12-05 18:11:09 +00:00
worker . store . printStorePath ( i - > second . path ) , worker . store . printStorePath ( drvPath ) ) ;
2016-12-07 16:57:35 +00:00
2019-05-11 00:59:39 +00:00
handleDiffHook (
buildUser ? buildUser - > getUID ( ) : getuid ( ) ,
buildUser ? buildUser - > getGID ( ) : getgid ( ) ,
2019-12-05 18:11:09 +00:00
prev , worker . store . printStorePath ( i - > second . path ) ,
worker . store . printStorePath ( drvPath ) , tmpDir ) ;
2016-12-07 16:57:35 +00:00
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
if ( settings . enforceDeterminism )
2020-05-03 14:01:25 +00:00
throw NotDeterministic ( hint ) ;
2020-06-02 14:22:24 +00:00
logError ( {
. name = " Output determinism error " ,
. hint = hint
2020-05-03 14:01:25 +00:00
} ) ;
2016-12-07 16:57:35 +00:00
2016-12-07 14:31:18 +00:00
curRound = nrRounds ; // we know enough, bail out early
2016-01-12 17:25:57 +00:00
}
}
2019-10-27 17:45:02 +00:00
/* If this is the first round of several, then move the output out of the way. */
2016-12-07 16:57:35 +00:00
if ( nrRounds > 1 & & curRound = = 1 & & curRound < nrRounds & & keepPreviousRound ) {
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , outputStorePath ] : finalOutputs ) {
auto path = worker . store . printStorePath ( outputStorePath ) ;
2019-12-05 18:11:09 +00:00
Path prev = path + checkSuffix ;
2016-02-24 16:44:12 +00:00
deletePath ( prev ) ;
2019-12-05 18:11:09 +00:00
Path dst = path + checkSuffix ;
if ( rename ( path . c_str ( ) , dst . c_str ( ) ) )
throw SysError ( " renaming '%s' to '%s' " , path , dst ) ;
2016-01-12 17:25:57 +00:00
}
}
2015-11-09 22:16:24 +00:00
if ( curRound < nrRounds ) {
2019-12-05 18:11:09 +00:00
prevInfos = std : : move ( infos ) ;
2015-11-09 22:16:24 +00:00
return ;
}
2016-12-08 20:38:58 +00:00
/* Remove the .check directories if we're done. FIXME: keep them
2016-12-07 16:57:35 +00:00
if the result was not determistic ? */
if ( curRound = = nrRounds ) {
2020-08-07 19:09:26 +00:00
for ( auto & [ _ , outputStorePath ] : finalOutputs ) {
Path prev = worker . store . printStorePath ( outputStorePath ) + checkSuffix ;
2016-12-07 16:57:35 +00:00
deletePath ( prev ) ;
}
}
2005-01-19 11:16:11 +00:00
/* Register each output path as valid, and register the sets of
2011-12-30 14:47:14 +00:00
paths referenced by each of them . If there are cycles in the
outputs , this will fail . */
2020-09-09 19:13:21 +00:00
{
ValidPathInfos infos2 ;
for ( auto & [ outputName , newInfo ] : infos ) {
infos2 . push_back ( newInfo ) ;
}
worker . store . registerValidPaths ( infos2 ) ;
2018-10-22 19:49:56 +00:00
}
2018-02-03 09:04:29 +00:00
/* In case of a fixed-output derivation hash mismatch, throw an
exception now that we have registered the output as valid . */
if ( delayedException )
std : : rethrow_exception ( delayedException ) ;
2020-09-15 14:26:56 +00:00
/* If we made it this far, we are sure the output matches the derivation
( since the delayedException would be a fixed output CA mismatch ) . That
means it ' s safe to link the derivation to the output hash . We must do
that for floating CA derivations , which otherwise couldn ' t be cached ,
but it ' s fine to do in all cases . */
2020-09-09 19:13:21 +00:00
bool isCaFloating = drv - > type ( ) = = DerivationType : : CAFloating ;
2020-09-28 15:43:56 +00:00
auto drvPathResolved = drvPath ;
2020-09-09 19:13:21 +00:00
if ( ! useDerivation & & isCaFloating ) {
/* Once a floating CA derivations reaches this point, it
must already be resolved , so we don ' t bother trying to
downcast drv to get would would just be an empty
inputDrvs field . */
Derivation drv2 { * drv } ;
2020-09-28 15:43:56 +00:00
drvPathResolved = writeDerivation ( worker . store , drv2 ) ;
2020-09-15 14:26:56 +00:00
}
2020-09-09 19:13:21 +00:00
if ( useDerivation | | isCaFloating )
for ( auto & [ outputName , newInfo ] : infos )
2020-09-28 15:43:56 +00:00
worker . store . linkDeriverToPath ( drvPathResolved , outputName , newInfo . path ) ;
2004-05-13 22:52:37 +00:00
}
2018-10-22 19:49:56 +00:00
void DerivationGoal : : checkOutputs ( const std : : map < Path , ValidPathInfo > & outputs )
{
std : : map < Path , const ValidPathInfo & > outputsByPath ;
for ( auto & output : outputs )
2019-12-05 18:11:09 +00:00
outputsByPath . emplace ( worker . store . printStorePath ( output . second . path ) , output . second ) ;
2018-10-22 19:49:56 +00:00
for ( auto & output : outputs ) {
auto & outputName = output . first ;
auto & info = output . second ;
struct Checks
{
2018-10-27 13:40:09 +00:00
bool ignoreSelfRefs = false ;
2019-02-12 12:43:32 +00:00
std : : optional < uint64_t > maxSize , maxClosureSize ;
std : : optional < Strings > allowedReferences , allowedRequisites , disallowedReferences , disallowedRequisites ;
2018-10-22 19:49:56 +00:00
} ;
/* Compute the closure and closure size of some output. This
is slightly tricky because some of its references ( namely
other outputs ) may not be valid yet . */
2019-12-05 18:11:09 +00:00
auto getClosure = [ & ] ( const StorePath & path )
2018-10-22 19:49:56 +00:00
{
uint64_t closureSize = 0 ;
2019-12-05 18:11:09 +00:00
StorePathSet pathsDone ;
std : : queue < StorePath > pathsLeft ;
2020-06-16 20:20:18 +00:00
pathsLeft . push ( path ) ;
2018-10-22 19:49:56 +00:00
while ( ! pathsLeft . empty ( ) ) {
2020-06-16 20:20:18 +00:00
auto path = pathsLeft . front ( ) ;
2018-10-22 19:49:56 +00:00
pathsLeft . pop ( ) ;
2020-06-16 20:20:18 +00:00
if ( ! pathsDone . insert ( path ) . second ) continue ;
2018-10-22 19:49:56 +00:00
2019-12-05 18:11:09 +00:00
auto i = outputsByPath . find ( worker . store . printStorePath ( path ) ) ;
2018-10-22 19:49:56 +00:00
if ( i ! = outputsByPath . end ( ) ) {
closureSize + = i - > second . narSize ;
for ( auto & ref : i - > second . references )
2020-06-16 20:20:18 +00:00
pathsLeft . push ( ref ) ;
2018-10-22 19:49:56 +00:00
} else {
auto info = worker . store . queryPathInfo ( path ) ;
closureSize + = info - > narSize ;
for ( auto & ref : info - > references )
2020-06-16 20:20:18 +00:00
pathsLeft . push ( ref ) ;
2018-10-22 19:49:56 +00:00
}
}
2019-12-05 18:11:09 +00:00
return std : : make_pair ( std : : move ( pathsDone ) , closureSize ) ;
2018-10-22 19:49:56 +00:00
} ;
auto applyChecks = [ & ] ( const Checks & checks )
{
if ( checks . maxSize & & info . narSize > * checks . maxSize )
throw BuildError ( " path '%s' is too large at %d bytes; limit is %d bytes " ,
2019-12-05 18:11:09 +00:00
worker . store . printStorePath ( info . path ) , info . narSize , * checks . maxSize ) ;
2018-10-22 19:49:56 +00:00
if ( checks . maxClosureSize ) {
uint64_t closureSize = getClosure ( info . path ) . second ;
if ( closureSize > * checks . maxClosureSize )
throw BuildError ( " closure of path '%s' is too large at %d bytes; limit is %d bytes " ,
2019-12-05 18:11:09 +00:00
worker . store . printStorePath ( info . path ) , closureSize , * checks . maxClosureSize ) ;
2018-10-22 19:49:56 +00:00
}
2019-02-12 12:43:32 +00:00
auto checkRefs = [ & ] ( const std : : optional < Strings > & value , bool allowed , bool recursive )
2018-10-27 13:40:09 +00:00
{
if ( ! value ) return ;
2020-08-07 19:09:26 +00:00
/* Parse a list of reference specifiers. Each element must
either be a store path , or the symbolic name of the output
of the derivation ( such as ` out ' ) . */
StorePathSet spec ;
for ( auto & i : * value ) {
if ( worker . store . isStorePath ( i ) )
spec . insert ( worker . store . parseStorePath ( i ) ) ;
else if ( finalOutputs . count ( i ) )
spec . insert ( finalOutputs . at ( i ) ) ;
else throw BuildError ( " derivation contains an illegal reference specifier '%s' " , i ) ;
}
2018-10-27 13:40:09 +00:00
2020-03-30 22:31:51 +00:00
auto used = recursive
2020-06-16 20:20:18 +00:00
? getClosure ( info . path ) . first
: info . references ;
2018-10-27 13:40:09 +00:00
if ( recursive & & checks . ignoreSelfRefs )
used . erase ( info . path ) ;
2019-12-05 18:11:09 +00:00
StorePathSet badPaths ;
2018-10-27 13:40:09 +00:00
for ( auto & i : used )
if ( allowed ) {
if ( ! spec . count ( i ) )
2020-06-16 20:20:18 +00:00
badPaths . insert ( i ) ;
2018-10-27 13:40:09 +00:00
} else {
if ( spec . count ( i ) )
2020-06-16 20:20:18 +00:00
badPaths . insert ( i ) ;
2018-10-27 13:40:09 +00:00
}
if ( ! badPaths . empty ( ) ) {
string badPathsStr ;
for ( auto & i : badPaths ) {
badPathsStr + = " \n " ;
2019-12-05 18:11:09 +00:00
badPathsStr + = worker . store . printStorePath ( i ) ;
2018-10-27 13:40:09 +00:00
}
2019-12-05 18:11:09 +00:00
throw BuildError ( " output '%s' is not allowed to refer to the following paths:%s " ,
worker . store . printStorePath ( info . path ) , badPathsStr ) ;
2018-10-27 13:40:09 +00:00
}
} ;
2018-10-22 19:49:56 +00:00
checkRefs ( checks . allowedReferences , true , false ) ;
checkRefs ( checks . allowedRequisites , true , true ) ;
checkRefs ( checks . disallowedReferences , false , false ) ;
checkRefs ( checks . disallowedRequisites , false , true ) ;
} ;
if ( auto structuredAttrs = parsedDrv - > getStructuredAttrs ( ) ) {
auto outputChecks = structuredAttrs - > find ( " outputChecks " ) ;
if ( outputChecks ! = structuredAttrs - > end ( ) ) {
auto output = outputChecks - > find ( outputName ) ;
if ( output ! = outputChecks - > end ( ) ) {
Checks checks ;
auto maxSize = output - > find ( " maxSize " ) ;
if ( maxSize ! = output - > end ( ) )
checks . maxSize = maxSize - > get < uint64_t > ( ) ;
auto maxClosureSize = output - > find ( " maxClosureSize " ) ;
if ( maxClosureSize ! = output - > end ( ) )
checks . maxClosureSize = maxClosureSize - > get < uint64_t > ( ) ;
2019-02-12 12:43:32 +00:00
auto get = [ & ] ( const std : : string & name ) - > std : : optional < Strings > {
2018-10-22 19:49:56 +00:00
auto i = output - > find ( name ) ;
if ( i ! = output - > end ( ) ) {
Strings res ;
for ( auto j = i - > begin ( ) ; j ! = i - > end ( ) ; + + j ) {
if ( ! j - > is_string ( ) )
2019-12-05 18:11:09 +00:00
throw Error ( " attribute '%s' of derivation '%s' must be a list of strings " , name , worker . store . printStorePath ( drvPath ) ) ;
2018-10-22 19:49:56 +00:00
res . push_back ( j - > get < std : : string > ( ) ) ;
}
checks . disallowedRequisites = res ;
return res ;
}
return { } ;
} ;
checks . allowedReferences = get ( " allowedReferences " ) ;
checks . allowedRequisites = get ( " allowedRequisites " ) ;
checks . disallowedReferences = get ( " disallowedReferences " ) ;
checks . disallowedRequisites = get ( " disallowedRequisites " ) ;
applyChecks ( checks ) ;
}
}
} else {
// legacy non-structured-attributes case
Checks checks ;
2018-10-27 13:40:09 +00:00
checks . ignoreSelfRefs = true ;
2018-10-22 19:49:56 +00:00
checks . allowedReferences = parsedDrv - > getStringsAttr ( " allowedReferences " ) ;
checks . allowedRequisites = parsedDrv - > getStringsAttr ( " allowedRequisites " ) ;
checks . disallowedReferences = parsedDrv - > getStringsAttr ( " disallowedReferences " ) ;
checks . disallowedRequisites = parsedDrv - > getStringsAttr ( " disallowedRequisites " ) ;
applyChecks ( checks ) ;
}
}
}
2008-11-12 11:08:27 +00:00
Path DerivationGoal : : openLogFile ( )
2004-05-13 19:14:49 +00:00
{
2013-09-02 09:58:18 +00:00
logSize = 0 ;
2012-07-30 23:55:41 +00:00
if ( ! settings . keepLog ) return " " ;
2012-07-27 13:59:18 +00:00
2019-12-05 18:11:09 +00:00
auto baseName = std : : string ( baseNameOf ( worker . store . printStorePath ( drvPath ) ) ) ;
2013-03-08 00:24:59 +00:00
2004-05-13 19:14:49 +00:00
/* Create a log file. */
2017-04-13 13:55:38 +00:00
Path dir = fmt ( " %s/%s/%s/ " , worker . store . logDir , worker . store . drvsLogDir , string ( baseName , 0 , 2 ) ) ;
2005-03-24 17:46:38 +00:00
createDirs ( dir ) ;
2012-05-30 14:12:29 +00:00
2017-04-13 13:55:38 +00:00
Path logFileName = fmt ( " %s/%s%s " , dir , string ( baseName , 2 ) ,
settings . compressLog ? " .bz2 " : " " ) ;
2012-05-30 14:12:29 +00:00
2016-05-04 13:46:25 +00:00
fdLogFile = open ( logFileName . c_str ( ) , O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC , 0666 ) ;
2020-04-21 23:07:07 +00:00
if ( ! fdLogFile ) throw SysError ( " creating log file '%1%' " , logFileName ) ;
2012-07-17 13:40:12 +00:00
2016-07-11 19:44:44 +00:00
logFileSink = std : : make_shared < FdSink > ( fdLogFile . get ( ) ) ;
2012-05-30 14:12:29 +00:00
2016-05-04 13:46:25 +00:00
if ( settings . compressLog )
logSink = std : : shared_ptr < CompressionSink > ( makeCompressionSink ( " bzip2 " , * logFileSink ) ) ;
else
logSink = logFileSink ;
2004-05-13 19:14:49 +00:00
2016-05-04 13:46:25 +00:00
return logFileName ;
2004-05-13 19:14:49 +00:00
}
2012-05-30 14:12:29 +00:00
void DerivationGoal : : closeLogFile ( )
{
2016-05-04 13:46:25 +00:00
auto logSink2 = std : : dynamic_pointer_cast < CompressionSink > ( logSink ) ;
if ( logSink2 ) logSink2 - > finish ( ) ;
if ( logFileSink ) logFileSink - > flush ( ) ;
logSink = logFileSink = 0 ;
2016-07-11 19:44:44 +00:00
fdLogFile = - 1 ;
2012-05-30 14:12:29 +00:00
}
2005-01-19 11:16:11 +00:00
void DerivationGoal : : deleteTmpDir ( bool force )
2004-05-13 19:14:49 +00:00
{
2004-06-18 18:09:32 +00:00
if ( tmpDir ! = " " ) {
2017-02-16 14:42:49 +00:00
/* Don't keep temporary directories for builtins because they
might have privileged stuff ( like a copy of netrc ) . */
if ( settings . keepFailed & & ! force & & ! drv - > isBuiltin ( ) ) {
2019-12-05 18:11:09 +00:00
printError ( " note: keeping build directory '%s' " , tmpDir ) ;
2012-07-26 19:04:40 +00:00
chmod ( tmpDir . c_str ( ) , 0755 ) ;
2006-12-07 23:27:40 +00:00
}
2004-06-18 18:09:32 +00:00
else
2013-11-14 10:57:37 +00:00
deletePath ( tmpDir ) ;
2004-06-18 18:09:32 +00:00
tmpDir = " " ;
}
2004-05-11 18:05:44 +00:00
}
2005-10-17 15:33:24 +00:00
void DerivationGoal : : handleChildOutput ( int fd , const string & data )
2004-06-29 09:41:50 +00:00
{
2016-07-11 19:44:44 +00:00
if ( ( hook & & fd = = hook - > builderOut . readSide . get ( ) ) | |
( ! hook & & fd = = builderOut . readSide . get ( ) ) )
2010-08-31 12:36:24 +00:00
{
2013-09-02 09:58:18 +00:00
logSize + = data . size ( ) ;
if ( settings . maxLogSize & & logSize > settings . maxLogSize ) {
2015-10-06 15:33:30 +00:00
killChild ( ) ;
2020-06-15 17:25:35 +00:00
done (
BuildResult : : LogLimitExceeded ,
Error ( " %s killed after writing more than %d bytes of log output " ,
getName ( ) , settings . maxLogSize ) ) ;
2013-09-02 09:58:18 +00:00
return ;
}
2016-04-25 14:47:46 +00:00
2016-04-25 17:18:28 +00:00
for ( auto c : data )
if ( c = = ' \r ' )
2016-04-28 12:27:00 +00:00
currentLogLinePos = 0 ;
2016-04-25 17:18:28 +00:00
else if ( c = = ' \n ' )
flushLine ( ) ;
2016-04-28 12:27:00 +00:00
else {
if ( currentLogLinePos > = currentLogLine . size ( ) )
currentLogLine . resize ( currentLogLinePos + 1 ) ;
currentLogLine [ currentLogLinePos + + ] = c ;
}
2016-04-25 14:47:46 +00:00
2016-05-04 13:46:25 +00:00
if ( logSink ) ( * logSink ) ( data ) ;
2010-08-31 12:36:24 +00:00
}
2017-10-24 11:41:52 +00:00
if ( hook & & fd = = hook - > fromHook . readSide . get ( ) ) {
for ( auto c : data )
if ( c = = ' \n ' ) {
2017-10-24 12:47:23 +00:00
handleJSONLogMessage ( currentHookLine , worker . act , hook - > activities , true ) ;
2017-10-24 11:41:52 +00:00
currentHookLine . clear ( ) ;
} else
currentHookLine + = c ;
}
2005-10-17 15:33:24 +00:00
}
void DerivationGoal : : handleEOF ( int fd )
{
2016-04-28 12:27:00 +00:00
if ( ! currentLogLine . empty ( ) ) flushLine ( ) ;
2010-08-25 20:44:28 +00:00
worker . wakeUp ( shared_from_this ( ) ) ;
2004-06-29 09:41:50 +00:00
}
2016-04-25 14:47:46 +00:00
void DerivationGoal : : flushLine ( )
{
2017-10-24 12:47:23 +00:00
if ( handleJSONLogMessage ( currentLogLine , * act , builderActivities , false ) )
2017-10-24 11:41:52 +00:00
;
2017-08-21 10:01:21 +00:00
2016-04-25 14:47:46 +00:00
else {
2020-06-05 16:20:11 +00:00
logTail . push_back ( currentLogLine ) ;
if ( logTail . size ( ) > settings . logLines ) logTail . pop_front ( ) ;
2017-08-21 10:01:21 +00:00
act - > result ( resBuildLogLine , currentLogLine ) ;
2016-04-25 14:47:46 +00:00
}
2017-08-21 10:01:21 +00:00
2016-04-25 14:47:46 +00:00
currentLogLine = " " ;
2016-04-28 12:27:00 +00:00
currentLogLinePos = 0 ;
2016-04-25 14:47:46 +00:00
}
2020-08-20 18:14:12 +00:00
std : : map < std : : string , std : : optional < StorePath > > DerivationGoal : : queryPartialDerivationOutputMap ( )
2004-06-18 18:09:32 +00:00
{
2020-09-09 18:55:43 +00:00
if ( ! useDerivation | | drv - > type ( ) ! = DerivationType : : CAFloating ) {
2020-08-07 19:09:26 +00:00
std : : map < std : : string , std : : optional < StorePath > > res ;
for ( auto & [ name , output ] : drv - > outputs )
2020-09-15 15:21:39 +00:00
res . insert_or_assign ( name , output . path ( worker . store , drv - > name , name ) ) ;
2020-08-07 19:09:26 +00:00
return res ;
} else {
2020-08-20 18:14:12 +00:00
return worker . store . queryPartialDerivationOutputMap ( drvPath ) ;
2020-08-07 19:09:26 +00:00
}
}
2020-08-20 18:14:12 +00:00
OutputPathMap DerivationGoal : : queryDerivationOutputMap ( )
2020-08-07 19:09:26 +00:00
{
2020-09-09 18:55:43 +00:00
if ( ! useDerivation | | drv - > type ( ) ! = DerivationType : : CAFloating ) {
2020-08-07 19:09:26 +00:00
OutputPathMap res ;
2020-08-14 17:00:13 +00:00
for ( auto & [ name , output ] : drv - > outputsAndOptPaths ( worker . store ) )
res . insert_or_assign ( name , * output . second ) ;
2020-08-07 19:09:26 +00:00
return res ;
} else {
2020-08-20 18:14:12 +00:00
return worker . store . queryDerivationOutputMap ( drvPath ) ;
2020-08-07 19:09:26 +00:00
}
}
void DerivationGoal : : checkPathValidity ( )
{
bool checkHash = buildMode = = bmRepair ;
2020-08-20 18:14:12 +00:00
for ( auto & i : queryPartialDerivationOutputMap ( ) ) {
2020-09-04 15:15:51 +00:00
InitialOutput info {
2020-08-07 19:09:26 +00:00
. wanted = wantOutput ( i . first , wantedOutputs ) ,
} ;
if ( i . second ) {
auto outputPath = * i . second ;
2020-09-04 15:15:51 +00:00
info . known = {
2020-08-07 19:09:26 +00:00
. path = outputPath ,
2020-09-15 15:19:45 +00:00
. status = ! worker . store . isValidPath ( outputPath )
? PathStatus : : Absent
: ! checkHash | | worker . pathContentsGood ( outputPath )
? PathStatus : : Valid
: PathStatus : : Corrupt ,
2020-08-07 19:09:26 +00:00
} ;
}
2020-09-04 15:15:51 +00:00
initialOutputs . insert_or_assign ( i . first , info ) ;
2012-10-02 21:13:46 +00:00
}
2004-06-28 10:42:57 +00:00
}
2020-08-07 19:09:26 +00:00
StorePath DerivationGoal : : makeFallbackPath ( std : : string_view outputName )
{
return worker . store . makeStorePath (
" rewrite: " + std : : string ( drvPath . to_string ( ) ) + " :name: " + std : : string ( outputName ) ,
Hash ( htSHA256 ) , outputPathName ( drv - > name , outputName ) ) ;
}
StorePath DerivationGoal : : makeFallbackPath ( const StorePath & path )
2012-10-02 21:13:46 +00:00
{
2020-08-07 19:09:26 +00:00
return worker . store . makeStorePath (
2019-12-05 18:11:09 +00:00
" rewrite: " + std : : string ( drvPath . to_string ( ) ) + " : " + std : : string ( path . to_string ( ) ) ,
Hash ( htSHA256 ) , path . name ( ) ) ;
2012-10-02 21:13:46 +00:00
}
2020-06-15 17:25:35 +00:00
void DerivationGoal : : done ( BuildResult : : Status status , std : : optional < Error > ex )
2015-07-20 01:15:45 +00:00
{
result . status = status ;
2020-06-15 17:25:35 +00:00
if ( ex )
result . errorMsg = ex - > what ( ) ;
amDone ( result . success ( ) ? ecSuccess : ecFailed , ex ) ;
2015-07-20 01:15:45 +00:00
if ( result . status = = BuildResult : : TimedOut )
worker . timedOut = true ;
2016-04-08 16:16:53 +00:00
if ( result . status = = BuildResult : : PermanentFailure )
2015-07-20 01:15:45 +00:00
worker . permanentFailure = true ;
2017-08-15 13:31:59 +00:00
mcExpectedBuilds . reset ( ) ;
mcRunningBuilds . reset ( ) ;
if ( result . success ( ) ) {
if ( status = = BuildResult : : Built )
worker . doneBuilds + + ;
} else {
if ( status ! = BuildResult : : DependencyFailed )
worker . failedBuilds + + ;
}
worker . updateProgress ( ) ;
2015-07-20 01:15:45 +00:00
}
2004-06-18 18:09:32 +00:00
//////////////////////////////////////////////////////////////////////
2003-07-20 19:29:38 +00:00
2004-06-18 18:09:32 +00:00
class SubstitutionGoal : public Goal
{
2007-08-12 00:29:28 +00:00
friend class Worker ;
2012-07-27 13:59:18 +00:00
2004-06-18 18:09:32 +00:00
private :
/* The store path that should be realised through a substitute. */
2019-12-05 18:11:09 +00:00
StorePath storePath ;
2003-08-20 11:30:45 +00:00
2020-07-02 15:12:05 +00:00
/* The path the substituter refers to the path as. This will be
* different when the stores have different names . */
std : : optional < StorePath > subPath ;
2007-08-12 00:29:28 +00:00
/* The remaining substituters. */
2016-04-29 11:57:08 +00:00
std : : list < ref < Store > > subs ;
2004-06-20 19:17:54 +00:00
2007-08-12 00:29:28 +00:00
/* The current substituter. */
2016-04-29 11:57:08 +00:00
std : : shared_ptr < Store > sub ;
2004-06-20 19:17:54 +00:00
2018-06-05 14:04:41 +00:00
/* Whether a substituter failed. */
bool substituterFailed = false ;
2012-07-08 22:39:24 +00:00
2008-08-02 12:54:35 +00:00
/* Path info returned by the substituter's query info operation. */
2016-04-29 11:57:08 +00:00
std : : shared_ptr < const ValidPathInfo > info ;
2005-01-25 17:08:52 +00:00
2012-07-27 16:16:02 +00:00
/* Pipe for the substituter's standard output. */
Pipe outPipe ;
2016-04-29 11:57:08 +00:00
/* The substituter thread. */
std : : thread thr ;
2004-06-20 19:17:54 +00:00
2016-04-29 11:57:08 +00:00
std : : promise < void > promise ;
2012-07-27 13:59:18 +00:00
2012-10-02 18:08:59 +00:00
/* Whether to try to repair a valid path. */
2017-06-28 16:11:01 +00:00
RepairFlag repair ;
2012-10-02 18:08:59 +00:00
/* Location where we're downloading the substitute. Differs from
storePath when doing a repair . */
Path destPath ;
2017-08-14 20:12:36 +00:00
std : : unique_ptr < MaintainCount < uint64_t > > maintainExpectedSubstitutions ,
2017-08-14 20:42:17 +00:00
maintainRunningSubstitutions , maintainExpectedNar , maintainExpectedDownload ;
2017-08-14 18:14:55 +00:00
2004-06-18 18:09:32 +00:00
typedef void ( SubstitutionGoal : : * GoalState ) ( ) ;
GoalState state ;
2003-08-20 11:30:45 +00:00
2020-06-13 05:07:42 +00:00
/* Content address for recomputing store path */
2020-06-22 17:08:11 +00:00
std : : optional < ContentAddress > ca ;
2020-06-13 05:07:42 +00:00
2004-06-18 18:09:32 +00:00
public :
2020-06-22 17:08:11 +00:00
SubstitutionGoal ( const StorePath & storePath , Worker & worker , RepairFlag repair = NoRepair , std : : optional < ContentAddress > ca = std : : nullopt ) ;
2004-06-25 15:36:09 +00:00
~ SubstitutionGoal ( ) ;
* Change the abstract syntax of slices. It used to be that ids were used as
keys to reference slice elements, e.g.,
Slice(["1ef7..."], [("/nix/store/1ef7...-foo", "1ef7", ["8c99..."]), ...])
This was wrong, since ids represent contents, not locations. Therefore we
now have:
Slice(["/nix/store/1ef7..."], [("/nix/store/1ef7...-foo", "1ef7", ["/nix/store/8c99-..."]), ...])
* Fix a bug in the computation of slice closures that could cause slice
elements to be duplicated.
2003-08-20 12:39:56 +00:00
2020-06-15 17:25:35 +00:00
void timedOut ( Error & & ex ) override { abort ( ) ; } ;
2012-07-27 13:59:18 +00:00
2017-05-30 12:35:50 +00:00
string key ( ) override
2014-11-24 15:48:04 +00:00
{
/* "a$" ensures substitution goals happen before derivation
goals . */
2019-12-05 18:11:09 +00:00
return " a$ " + std : : string ( storePath . name ( ) ) + " $ " + worker . store . printStorePath ( storePath ) ;
2014-11-24 15:48:04 +00:00
}
2017-05-30 12:35:50 +00:00
void work ( ) override ;
2003-08-20 11:30:45 +00:00
2004-06-18 18:09:32 +00:00
/* The states. */
void init ( ) ;
2004-06-20 19:17:54 +00:00
void tryNext ( ) ;
2007-08-12 00:29:28 +00:00
void gotInfo ( ) ;
void referencesValid ( ) ;
2004-06-22 09:00:31 +00:00
void tryToRun ( ) ;
2004-06-20 19:17:54 +00:00
void finished ( ) ;
2004-06-22 17:04:10 +00:00
2004-06-29 09:41:50 +00:00
/* Callback used by the worker to write to the log. */
2017-05-30 12:35:50 +00:00
void handleChildOutput ( int fd , const string & data ) override ;
void handleEOF ( int fd ) override ;
2012-06-27 20:58:15 +00:00
2020-06-16 20:20:18 +00:00
StorePath getStorePath ( ) { return storePath ; }
2004-06-18 18:09:32 +00:00
} ;
2003-08-20 11:30:45 +00:00
2004-06-18 18:09:32 +00:00
2020-06-22 17:08:11 +00:00
SubstitutionGoal : : SubstitutionGoal ( const StorePath & storePath , Worker & worker , RepairFlag repair , std : : optional < ContentAddress > ca )
2005-01-19 11:16:11 +00:00
: Goal ( worker )
2020-06-16 20:20:18 +00:00
, storePath ( storePath )
2012-10-02 18:08:59 +00:00
, repair ( repair )
2020-06-13 05:07:42 +00:00
, ca ( ca )
2004-06-18 18:09:32 +00:00
{
state = & SubstitutionGoal : : init ;
2020-01-06 21:18:00 +00:00
name = fmt ( " substitution of '%s' " , worker . store . printStorePath ( this - > storePath ) ) ;
2005-02-18 09:50:20 +00:00
trace ( " created " ) ;
2017-08-14 20:12:36 +00:00
maintainExpectedSubstitutions = std : : make_unique < MaintainCount < uint64_t > > ( worker . expectedSubstitutions ) ;
2004-06-18 18:09:32 +00:00
}
2004-06-25 15:36:09 +00:00
SubstitutionGoal : : ~ SubstitutionGoal ( )
{
2016-04-29 11:57:08 +00:00
try {
if ( thr . joinable ( ) ) {
2016-08-30 13:45:39 +00:00
// FIXME: signal worker thread to quit.
2016-04-29 11:57:08 +00:00
thr . join ( ) ;
2016-08-30 13:45:39 +00:00
worker . childTerminated ( this ) ;
2016-04-29 11:57:08 +00:00
}
} catch ( . . . ) {
ignoreException ( ) ;
2006-12-08 18:41:48 +00:00
}
}
2004-06-18 18:09:32 +00:00
void SubstitutionGoal : : work ( )
{
( this - > * state ) ( ) ;
}
void SubstitutionGoal : : init ( )
{
2004-06-22 17:04:10 +00:00
trace ( " init " ) ;
2004-06-18 18:09:32 +00:00
2008-06-09 13:52:45 +00:00
worker . store . addTempRoot ( storePath ) ;
2012-07-27 13:59:18 +00:00
2004-06-18 18:09:32 +00:00
/* If the path already exists we're done. */
2012-10-02 18:08:59 +00:00
if ( ! repair & & worker . store . isValidPath ( storePath ) ) {
2006-12-08 17:26:21 +00:00
amDone ( ecSuccess ) ;
2004-06-18 18:09:32 +00:00
return ;
2003-08-20 11:30:45 +00:00
}
2012-07-30 23:55:41 +00:00
if ( settings . readOnlyMode )
2019-12-05 18:11:09 +00:00
throw Error ( " cannot substitute path '%s' - no write access to the Nix store " , worker . store . printStorePath ( storePath ) ) ;
2009-12-09 17:45:22 +00:00
2016-06-02 12:25:07 +00:00
subs = settings . useSubstitutes ? getDefaultSubstituters ( ) : std : : list < ref < Store > > ( ) ;
2012-07-27 13:59:18 +00:00
2008-08-04 13:15:35 +00:00
tryNext ( ) ;
}
void SubstitutionGoal : : tryNext ( )
{
trace ( " trying next substituter " ) ;
if ( subs . size ( ) = = 0 ) {
/* None left. Terminate this goal and let someone else deal
with it . */
2019-12-05 18:11:09 +00:00
debug ( " path '%s' is required, but there is no substituter that can build it " , worker . store . printStorePath ( storePath ) ) ;
2016-01-06 21:07:59 +00:00
2012-07-08 22:39:24 +00:00
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build . */
2018-06-05 14:04:41 +00:00
amDone ( substituterFailed ? ecFailed : ecNoSubstituters ) ;
2017-08-15 13:31:59 +00:00
2018-06-05 14:04:41 +00:00
if ( substituterFailed ) {
2017-08-15 13:31:59 +00:00
worker . failedSubstitutions + + ;
worker . updateProgress ( ) ;
}
2007-08-12 00:29:28 +00:00
return ;
}
2008-08-04 13:15:35 +00:00
sub = subs . front ( ) ;
subs . pop_front ( ) ;
2020-06-22 17:08:11 +00:00
if ( ca ) {
2020-06-13 05:07:42 +00:00
subPath = sub - > makeFixedOutputPathFromCA ( storePath . name ( ) , * ca ) ;
2020-06-17 18:04:46 +00:00
if ( sub - > storeDir = = worker . store . storeDir )
assert ( subPath = = storePath ) ;
2020-07-02 14:59:24 +00:00
} else if ( sub - > storeDir ! = worker . store . storeDir ) {
2016-06-01 14:40:49 +00:00
tryNext ( ) ;
return ;
}
2016-04-29 11:57:08 +00:00
try {
// FIXME: make async
2020-07-02 15:12:05 +00:00
info = sub - > queryPathInfo ( subPath ? * subPath : storePath ) ;
2016-04-29 11:57:08 +00:00
} catch ( InvalidPath & ) {
tryNext ( ) ;
return ;
2018-09-07 15:08:43 +00:00
} catch ( SubstituterDisabled & ) {
if ( settings . tryFallback ) {
tryNext ( ) ;
return ;
}
throw ;
2018-09-07 14:35:48 +00:00
} catch ( Error & e ) {
if ( settings . tryFallback ) {
2020-05-03 14:01:25 +00:00
logError ( e . info ( ) ) ;
2018-09-07 14:35:48 +00:00
tryNext ( ) ;
return ;
}
throw ;
2016-04-29 11:57:08 +00:00
}
2020-06-13 05:07:42 +00:00
if ( info - > path ! = storePath ) {
2020-06-17 18:14:22 +00:00
if ( info - > isContentAddressed ( * sub ) & & info - > references . empty ( ) ) {
auto info2 = std : : make_shared < ValidPathInfo > ( * info ) ;
2020-06-17 17:18:47 +00:00
info2 - > path = storePath ;
2020-06-13 05:07:42 +00:00
info = info2 ;
} else {
printError ( " asked '%s' for '%s' but got '%s' " ,
sub - > getUri ( ) , worker . store . printStorePath ( storePath ) , sub - > printStorePath ( info - > path ) ) ;
tryNext ( ) ;
return ;
}
}
2017-08-14 18:14:55 +00:00
/* Update the total expected download size. */
auto narInfo = std : : dynamic_pointer_cast < const NarInfo > ( info ) ;
2018-02-05 16:46:43 +00:00
maintainExpectedNar = std : : make_unique < MaintainCount < uint64_t > > ( worker . expectedNarSize , info - > narSize ) ;
2017-08-14 18:14:55 +00:00
maintainExpectedDownload =
narInfo & & narInfo - > fileSize
? std : : make_unique < MaintainCount < uint64_t > > ( worker . expectedDownloadSize , narInfo - > fileSize )
: nullptr ;
worker . updateProgress ( ) ;
2016-05-30 13:09:01 +00:00
/* Bail out early if this substituter lacks a valid
signature . LocalStore : : addToStore ( ) also checks for this , but
only after we ' ve downloaded the path . */
2018-02-05 17:08:30 +00:00
if ( worker . store . requireSigs
& & ! sub - > isTrusted
2018-04-13 13:42:35 +00:00
& & ! info - > checkSignatures ( worker . store , worker . store . getPublicKeys ( ) ) )
2018-02-05 17:08:30 +00:00
{
2020-06-02 14:22:24 +00:00
logWarning ( {
. name = " Invalid path signature " ,
. hint = hintfmt ( " substituter '%s' does not have a valid signature for path '%s' " ,
sub - > getUri ( ) , worker . store . printStorePath ( storePath ) )
} ) ;
2016-05-30 13:09:01 +00:00
tryNext ( ) ;
return ;
}
2005-09-20 16:14:00 +00:00
/* To maintain the closure invariant, we first have to realise the
2005-01-25 17:08:52 +00:00
paths referenced by this one . */
2016-04-29 11:57:08 +00:00
for ( auto & i : info - > references )
2015-07-17 17:24:28 +00:00
if ( i ! = storePath ) /* ignore self-references */
addWaitee ( worker . makeSubstitutionGoal ( i ) ) ;
2005-01-25 17:08:52 +00:00
if ( waitees . empty ( ) ) /* to prevent hang (no wake-up event) */
referencesValid ( ) ;
else
state = & SubstitutionGoal : : referencesValid ;
}
void SubstitutionGoal : : referencesValid ( )
{
2007-08-12 00:29:28 +00:00
trace ( " all references realised " ) ;
2005-01-25 17:08:52 +00:00
2006-12-08 17:26:21 +00:00
if ( nrFailed > 0 ) {
2019-12-05 18:11:09 +00:00
debug ( " some references of path '%s' could not be realised " , worker . store . printStorePath ( storePath ) ) ;
2013-01-02 11:38:28 +00:00
amDone ( nrNoSubstituters > 0 | | nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed ) ;
2006-12-08 17:26:21 +00:00
return ;
}
2005-01-25 17:08:52 +00:00
2016-04-29 11:57:08 +00:00
for ( auto & i : info - > references )
2015-07-17 17:24:28 +00:00
if ( i ! = storePath ) /* ignore self-references */
assert ( worker . store . isValidPath ( i ) ) ;
2004-06-20 19:17:54 +00:00
2004-06-22 09:00:31 +00:00
state = & SubstitutionGoal : : tryToRun ;
2009-03-31 21:14:07 +00:00
worker . wakeUp ( shared_from_this ( ) ) ;
2004-06-22 09:00:31 +00:00
}
void SubstitutionGoal : : tryToRun ( )
{
2004-06-22 17:04:10 +00:00
trace ( " trying to run " ) ;
2009-03-31 21:14:07 +00:00
/* Make sure that we are allowed to start a build. Note that even
2017-04-14 12:42:20 +00:00
if maxBuildJobs = = 0 ( no local builds allowed ) , we still allow
2009-03-31 21:14:07 +00:00
a substituter to run . This is because substitutions cannot be
distributed to another machine via the build hook . */
2017-04-28 14:20:46 +00:00
if ( worker . getNrLocalBuilds ( ) > = std : : max ( 1U , ( unsigned int ) settings . maxBuildJobs ) ) {
2004-06-22 09:00:31 +00:00
worker . waitForBuildSlot ( shared_from_this ( ) ) ;
return ;
}
2017-08-14 20:42:17 +00:00
maintainRunningSubstitutions = std : : make_unique < MaintainCount < uint64_t > > ( worker . runningSubstitutions ) ;
worker . updateProgress ( ) ;
2012-07-27 16:16:02 +00:00
outPipe . create ( ) ;
2012-11-09 17:00:33 +00:00
2016-04-29 11:57:08 +00:00
promise = std : : promise < void > ( ) ;
2012-11-09 17:00:33 +00:00
2016-04-29 11:57:08 +00:00
thr = std : : thread ( [ this ] ( ) {
try {
/* Wake up the worker loop when we're done. */
2016-07-11 19:44:44 +00:00
Finally updateStats ( [ this ] ( ) { outPipe . writeSide = - 1 ; } ) ;
2004-06-20 19:17:54 +00:00
2019-12-05 18:11:09 +00:00
Activity act ( * logger , actSubstitute , Logger : : Fields { worker . store . printStorePath ( storePath ) , sub - > getUri ( ) } ) ;
2017-08-25 15:49:40 +00:00
PushActivity pact ( act . id ) ;
2016-05-12 13:42:19 +00:00
copyStorePath ( ref < Store > ( sub ) , ref < Store > ( worker . store . shared_from_this ( ) ) ,
2020-07-02 15:12:05 +00:00
subPath ? * subPath : storePath , repair , sub - > isTrusted ? NoCheckSigs : CheckSigs ) ;
2004-06-20 19:17:54 +00:00
2016-04-29 11:57:08 +00:00
promise . set_value ( ) ;
} catch ( . . . ) {
promise . set_exception ( std : : current_exception ( ) ) ;
}
2014-07-10 14:50:51 +00:00
} ) ;
2012-07-27 16:16:02 +00:00
2016-07-11 19:44:44 +00:00
worker . childStarted ( shared_from_this ( ) , { outPipe . readSide . get ( ) } , true , false ) ;
2004-06-20 19:17:54 +00:00
state = & SubstitutionGoal : : finished ;
}
void SubstitutionGoal : : finished ( )
{
2004-06-22 17:04:10 +00:00
trace ( " substitute finished " ) ;
2004-06-20 19:17:54 +00:00
2016-04-29 11:57:08 +00:00
thr . join ( ) ;
2016-08-30 13:45:39 +00:00
worker . childTerminated ( this ) ;
2004-06-20 19:17:54 +00:00
2004-06-24 13:40:38 +00:00
try {
2016-04-29 11:57:08 +00:00
promise . get_future ( ) . get ( ) ;
2018-06-05 14:04:41 +00:00
} catch ( std : : exception & e ) {
printError ( e . what ( ) ) ;
/* Cause the parent build to fail unless --fallback is given,
or the substitute has disappeared . The latter case behaves
the same as the substitute never having existed in the
first place . */
try {
throw ;
} catch ( SubstituteGone & ) {
} catch ( . . . ) {
substituterFailed = true ;
}
2012-07-27 13:59:18 +00:00
2004-06-24 13:40:38 +00:00
/* Try the next substitute. */
state = & SubstitutionGoal : : tryNext ;
worker . wakeUp ( shared_from_this ( ) ) ;
return ;
}
2004-06-20 19:17:54 +00:00
2020-06-16 20:20:18 +00:00
worker . markContentsGood ( storePath ) ;
2012-10-03 14:38:09 +00:00
2019-12-05 18:11:09 +00:00
printMsg ( lvlChatty , " substitution of path '%s' succeeded " , worker . store . printStorePath ( storePath ) ) ;
2004-06-24 13:40:38 +00:00
2017-08-14 20:42:17 +00:00
maintainRunningSubstitutions . reset ( ) ;
2017-08-14 20:12:36 +00:00
maintainExpectedSubstitutions . reset ( ) ;
worker . doneSubstitutions + + ;
2017-08-14 18:14:55 +00:00
if ( maintainExpectedDownload ) {
auto fileSize = maintainExpectedDownload - > delta ;
maintainExpectedDownload . reset ( ) ;
worker . doneDownloadSize + = fileSize ;
}
worker . doneNarSize + = maintainExpectedNar - > delta ;
maintainExpectedNar . reset ( ) ;
worker . updateProgress ( ) ;
2006-12-08 17:26:21 +00:00
amDone ( ecSuccess ) ;
2004-06-18 18:09:32 +00:00
}
2005-10-17 15:33:24 +00:00
void SubstitutionGoal : : handleChildOutput ( int fd , const string & data )
2004-06-29 09:41:50 +00:00
{
}
2005-10-17 15:33:24 +00:00
void SubstitutionGoal : : handleEOF ( int fd )
{
2016-07-11 19:44:44 +00:00
if ( fd = = outPipe . readSide . get ( ) ) worker . wakeUp ( shared_from_this ( ) ) ;
2005-10-17 15:33:24 +00:00
}
2004-06-18 18:09:32 +00:00
//////////////////////////////////////////////////////////////////////
2008-06-09 13:52:45 +00:00
Worker : : Worker ( LocalStore & store )
2017-08-16 14:38:23 +00:00
: act ( * logger , actRealise )
, actDerivations ( * logger , actBuilds )
, actSubstitutions ( * logger , actCopyPaths )
2017-08-14 18:14:55 +00:00
, store ( store )
2004-06-18 18:09:32 +00:00
{
2012-07-27 13:59:18 +00:00
/* Debugging: prevent recursive workers. */
2009-03-31 21:14:07 +00:00
nrLocalBuilds = 0 ;
2016-12-06 20:58:04 +00:00
lastWokenUp = steady_time_point : : min ( ) ;
2010-12-13 16:53:23 +00:00
permanentFailure = false ;
2014-08-17 17:09:03 +00:00
timedOut = false ;
2019-07-01 22:12:12 +00:00
hashMismatch = false ;
checkMismatch = false ;
2004-06-18 18:09:32 +00:00
}
Worker : : ~ Worker ( )
{
2004-06-25 15:36:09 +00:00
/* Explicitly get rid of all strong pointers now. After this all
goals that refer to this worker should be gone . ( Otherwise we
are in trouble , since goals may call childTerminated ( ) etc . in
their destructors ) . */
topGoals . clear ( ) ;
2017-08-14 18:14:55 +00:00
2017-08-14 20:12:36 +00:00
assert ( expectedSubstitutions = = 0 ) ;
2017-08-14 18:14:55 +00:00
assert ( expectedDownloadSize = = 0 ) ;
assert ( expectedNarSize = = 0 ) ;
2004-06-18 18:09:32 +00:00
}
2020-08-22 20:44:47 +00:00
std : : shared_ptr < DerivationGoal > Worker : : makeDerivationGoalCommon (
const StorePath & drvPath ,
const StringSet & wantedOutputs ,
std : : function < std : : shared_ptr < DerivationGoal > ( ) > mkDrvGoal )
2004-06-18 18:09:32 +00:00
{
2020-08-22 20:44:47 +00:00
WeakGoalPtr & abstract_goal_weak = derivationGoals [ drvPath ] ;
GoalPtr abstract_goal = abstract_goal_weak . lock ( ) ; // FIXME
std : : shared_ptr < DerivationGoal > goal ;
if ( ! abstract_goal ) {
goal = mkDrvGoal ( ) ;
abstract_goal_weak = goal ;
2012-10-02 18:08:59 +00:00
wakeUp ( goal ) ;
2020-08-22 20:44:47 +00:00
} else {
goal = std : : dynamic_pointer_cast < DerivationGoal > ( abstract_goal ) ;
assert ( goal ) ;
goal - > addWantedOutputs ( wantedOutputs ) ;
}
2004-06-25 15:36:09 +00:00
return goal ;
2004-06-18 18:09:32 +00:00
}
2003-07-20 19:29:38 +00:00
2003-08-01 15:41:47 +00:00
2020-08-22 20:44:47 +00:00
std : : shared_ptr < DerivationGoal > Worker : : makeDerivationGoal ( const StorePath & drvPath ,
const StringSet & wantedOutputs , BuildMode buildMode )
{
return makeDerivationGoalCommon ( drvPath , wantedOutputs , [ & ] ( ) {
return std : : make_shared < DerivationGoal > ( drvPath , wantedOutputs , * this , buildMode ) ;
} ) ;
}
2020-06-16 20:20:18 +00:00
std : : shared_ptr < DerivationGoal > Worker : : makeBasicDerivationGoal ( const StorePath & drvPath ,
2020-08-22 20:44:47 +00:00
const BasicDerivation & drv , const StringSet & wantedOutputs , BuildMode buildMode )
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
{
2020-08-22 20:44:47 +00:00
return makeDerivationGoalCommon ( drvPath , wantedOutputs , [ & ] ( ) {
return std : : make_shared < DerivationGoal > ( drvPath , drv , wantedOutputs , * this , buildMode ) ;
} ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
}
2020-06-22 17:08:11 +00:00
GoalPtr Worker : : makeSubstitutionGoal ( const StorePath & path , RepairFlag repair , std : : optional < ContentAddress > ca )
2004-06-18 18:09:32 +00:00
{
2020-08-22 20:44:47 +00:00
WeakGoalPtr & goal_weak = substitutionGoals [ path ] ;
GoalPtr goal = goal_weak . lock ( ) ; // FIXME
2012-10-02 18:08:59 +00:00
if ( ! goal ) {
2020-06-17 17:18:47 +00:00
goal = std : : make_shared < SubstitutionGoal > ( path , * this , repair , ca ) ;
2020-08-22 20:44:47 +00:00
goal_weak = goal ;
2012-10-02 18:08:59 +00:00
wakeUp ( goal ) ;
}
return goal ;
2004-06-18 18:09:32 +00:00
}
2004-06-25 15:36:09 +00:00
static void removeGoal ( GoalPtr goal , WeakGoalMap & goalMap )
2004-06-18 18:09:32 +00:00
{
2005-02-18 09:50:20 +00:00
/* !!! inefficient */
for ( WeakGoalMap : : iterator i = goalMap . begin ( ) ;
i ! = goalMap . end ( ) ; )
if ( i - > second . lock ( ) = = goal ) {
WeakGoalMap : : iterator j = i ; + + j ;
goalMap . erase ( i ) ;
i = j ;
}
else + + i ;
2004-06-18 18:09:32 +00:00
}
2004-05-11 18:05:44 +00:00
2004-06-18 18:09:32 +00:00
void Worker : : removeGoal ( GoalPtr goal )
{
2006-09-04 21:06:23 +00:00
nix : : removeGoal ( goal , derivationGoals ) ;
nix : : removeGoal ( goal , substitutionGoals ) ;
2005-02-23 11:19:27 +00:00
if ( topGoals . find ( goal ) ! = topGoals . end ( ) ) {
topGoals . erase ( goal ) ;
/* If a top-level goal failed, then kill all other goals
( unless keepGoing was set ) . */
2020-06-15 17:25:35 +00:00
if ( goal - > exitCode = = Goal : : ecFailed & & ! settings . keepGoing )
2005-02-23 11:19:27 +00:00
topGoals . clear ( ) ;
}
2007-08-28 11:36:17 +00:00
/* Wake up goals waiting for any goal to finish. */
2015-07-17 17:24:28 +00:00
for ( auto & i : waitingForAnyGoal ) {
GoalPtr goal = i . lock ( ) ;
2007-08-28 11:36:17 +00:00
if ( goal ) wakeUp ( goal ) ;
}
waitingForAnyGoal . clear ( ) ;
2004-05-11 18:05:44 +00:00
}
2003-11-21 16:05:19 +00:00
2004-05-11 18:05:44 +00:00
2004-06-18 18:09:32 +00:00
void Worker : : wakeUp ( GoalPtr goal )
2004-05-11 18:05:44 +00:00
{
2004-06-25 15:36:09 +00:00
goal - > trace ( " woken up " ) ;
2014-03-29 23:49:23 +00:00
addToWeakGoals ( awake , goal ) ;
2004-06-18 18:09:32 +00:00
}
2004-05-11 18:05:44 +00:00
2004-06-18 18:09:32 +00:00
2009-03-31 21:14:07 +00:00
unsigned Worker : : getNrLocalBuilds ( )
2004-06-18 18:09:32 +00:00
{
2009-03-31 21:14:07 +00:00
return nrLocalBuilds ;
2003-07-20 19:29:38 +00:00
}
2016-04-29 11:57:08 +00:00
void Worker : : childStarted ( GoalPtr goal , const set < int > & fds ,
bool inBuildSlot , bool respectTimeouts )
2003-07-20 19:29:38 +00:00
{
2004-06-19 21:45:04 +00:00
Child child ;
child . goal = goal ;
2016-08-30 13:45:39 +00:00
child . goal2 = goal . get ( ) ;
2005-10-17 15:33:24 +00:00
child . fds = fds ;
2016-12-06 20:58:04 +00:00
child . timeStarted = child . lastOutput = steady_time_point : : clock : : now ( ) ;
2004-06-19 21:45:04 +00:00
child . inBuildSlot = inBuildSlot ;
2013-04-23 16:04:59 +00:00
child . respectTimeouts = respectTimeouts ;
2016-04-29 11:57:08 +00:00
children . emplace_back ( child ) ;
2009-03-31 21:14:07 +00:00
if ( inBuildSlot ) nrLocalBuilds + + ;
2004-06-18 18:09:32 +00:00
}
2003-07-20 19:29:38 +00:00
2004-06-18 18:09:32 +00:00
2016-08-30 13:45:39 +00:00
void Worker : : childTerminated ( Goal * goal , bool wakeSleepers )
2004-06-18 18:09:32 +00:00
{
2016-04-29 11:57:08 +00:00
auto i = std : : find_if ( children . begin ( ) , children . end ( ) ,
2016-08-30 13:45:39 +00:00
[ & ] ( const Child & child ) { return child . goal2 = = goal ; } ) ;
2016-09-08 15:29:50 +00:00
if ( i = = children . end ( ) ) return ;
2004-06-19 21:45:04 +00:00
2016-04-29 11:57:08 +00:00
if ( i - > inBuildSlot ) {
2009-03-31 21:14:07 +00:00
assert ( nrLocalBuilds > 0 ) ;
nrLocalBuilds - - ;
2004-06-19 21:45:04 +00:00
}
2016-04-29 11:57:08 +00:00
children . erase ( i ) ;
2004-06-18 18:09:32 +00:00
2004-07-01 16:24:35 +00:00
if ( wakeSleepers ) {
2012-07-27 13:59:18 +00:00
2004-07-01 16:24:35 +00:00
/* Wake up goals waiting for a build slot. */
2016-04-29 11:57:08 +00:00
for ( auto & j : wantingToBuild ) {
GoalPtr goal = j . lock ( ) ;
2004-07-01 16:24:35 +00:00
if ( goal ) wakeUp ( goal ) ;
}
wantingToBuild . clear ( ) ;
2004-06-25 15:36:09 +00:00
}
2003-10-16 16:29:57 +00:00
}
2006-12-08 17:26:21 +00:00
void Worker : : waitForBuildSlot ( GoalPtr goal )
2003-10-16 16:29:57 +00:00
{
2004-06-18 18:09:32 +00:00
debug ( " wait for build slot " ) ;
2012-07-30 23:55:41 +00:00
if ( getNrLocalBuilds ( ) < settings . maxBuildJobs )
2004-06-18 18:09:32 +00:00
wakeUp ( goal ) ; /* we can do it right away */
else
2014-03-29 23:49:23 +00:00
addToWeakGoals ( wantingToBuild , goal ) ;
2004-06-18 18:09:32 +00:00
}
2004-02-13 10:45:09 +00:00
2003-10-16 16:29:57 +00:00
2007-08-28 11:36:17 +00:00
void Worker : : waitForAnyGoal ( GoalPtr goal )
{
debug ( " wait for any goal " ) ;
2014-03-29 23:49:23 +00:00
addToWeakGoals ( waitingForAnyGoal , goal ) ;
2007-08-28 11:36:17 +00:00
}
2009-03-23 01:05:54 +00:00
void Worker : : waitForAWhile ( GoalPtr goal )
{
debug ( " wait for a while " ) ;
2014-03-29 23:49:23 +00:00
addToWeakGoals ( waitingForAWhile , goal ) ;
2009-03-23 01:05:54 +00:00
}
2005-02-23 11:19:27 +00:00
void Worker : : run ( const Goals & _topGoals )
2004-06-18 18:09:32 +00:00
{
2015-07-17 17:24:28 +00:00
for ( auto & i : _topGoals ) topGoals . insert ( i ) ;
2012-07-27 13:59:18 +00:00
2017-05-16 14:09:57 +00:00
debug ( " entered goal loop " ) ;
2004-06-18 18:09:32 +00:00
while ( 1 ) {
2004-01-15 20:23:55 +00:00
checkInterrupt ( ) ;
2004-06-18 18:09:32 +00:00
2017-09-05 18:43:42 +00:00
store . autoGC ( false ) ;
2014-11-24 15:48:04 +00:00
/* Call every wake goal (in the ordering established by
CompareGoalPtrs ) . */
2005-02-23 11:19:27 +00:00
while ( ! awake . empty ( ) & & ! topGoals . empty ( ) ) {
2014-11-24 15:48:04 +00:00
Goals awake2 ;
for ( auto & i : awake ) {
GoalPtr goal = i . lock ( ) ;
if ( goal ) awake2 . insert ( goal ) ;
}
2004-06-18 18:09:32 +00:00
awake . clear ( ) ;
2014-11-24 15:48:04 +00:00
for ( auto & goal : awake2 ) {
2004-06-18 18:09:32 +00:00
checkInterrupt ( ) ;
2014-11-24 15:48:04 +00:00
goal - > work ( ) ;
if ( topGoals . empty ( ) ) break ; // stuff may have been cancelled
2004-06-18 18:09:32 +00:00
}
2003-10-16 16:29:57 +00:00
}
2004-06-25 15:36:09 +00:00
if ( topGoals . empty ( ) ) break ;
2003-10-16 16:29:57 +00:00
2004-06-18 18:09:32 +00:00
/* Wait for input. */
2009-03-23 01:05:54 +00:00
if ( ! children . empty ( ) | | ! waitingForAWhile . empty ( ) )
2007-08-12 00:29:28 +00:00
waitForInput ( ) ;
2009-03-29 18:06:00 +00:00
else {
2020-05-13 21:56:39 +00:00
if ( awake . empty ( ) & & 0 = = settings . maxBuildJobs )
2020-08-05 16:58:00 +00:00
{
if ( getMachines ( ) . empty ( ) )
throw Error ( " unable to start any build; either increase '--max-jobs' "
" or enable remote builds. "
" \n https://nixos.org/nix/manual/#chap-distributed-builds " ) ;
else
throw Error ( " unable to start any build; remote machines may not have "
" all required system features. "
" \n https://nixos.org/nix/manual/#chap-distributed-builds " ) ;
}
2007-08-12 00:29:28 +00:00
assert ( ! awake . empty ( ) ) ;
2009-03-29 18:06:00 +00:00
}
2004-06-18 18:09:32 +00:00
}
2003-10-16 16:29:57 +00:00
2004-06-25 15:36:09 +00:00
/* If --keep-going is not set, it's possible that the main goal
exited while some of its subgoals were still active . But if
- - keep - going * is * set , then they must all be finished now . */
2012-07-30 23:55:41 +00:00
assert ( ! settings . keepGoing | | awake . empty ( ) ) ;
assert ( ! settings . keepGoing | | wantingToBuild . empty ( ) ) ;
assert ( ! settings . keepGoing | | children . empty ( ) ) ;
2003-07-20 19:29:38 +00:00
}
2004-06-18 18:09:32 +00:00
void Worker : : waitForInput ( )
2003-07-20 19:29:38 +00:00
{
2004-06-18 18:09:32 +00:00
printMsg ( lvlVomit , " waiting for children " ) ;
2003-07-20 19:29:38 +00:00
2005-10-17 15:33:24 +00:00
/* Process output from the file descriptors attached to the
children , namely log output and output path creation commands .
We also use this to detect child termination : if we get EOF on
the logger pipe of a build , we assume that the builder has
terminated . */
2004-06-18 18:09:32 +00:00
2009-03-23 01:05:54 +00:00
bool useTimeout = false ;
2020-04-21 00:32:50 +00:00
long timeout = 0 ;
2016-12-06 20:58:04 +00:00
auto before = steady_time_point : : clock : : now ( ) ;
2011-06-30 15:19:13 +00:00
2013-04-23 16:04:59 +00:00
/* If we're monitoring for silence on stdout/stderr, or if there
is a build timeout , then wait for input until the first
deadline for any child . */
2016-12-06 20:58:04 +00:00
auto nearest = steady_time_point : : max ( ) ; // nearest deadline
2017-09-05 18:43:42 +00:00
if ( settings . minFree . get ( ) ! = 0 )
// Periodicallty wake up to see if we need to run the garbage collector.
nearest = before + std : : chrono : : seconds ( 10 ) ;
2015-07-17 17:24:28 +00:00
for ( auto & i : children ) {
2016-04-29 11:57:08 +00:00
if ( ! i . respectTimeouts ) continue ;
2017-04-14 12:42:20 +00:00
if ( 0 ! = settings . maxSilentTime )
2016-12-06 20:58:04 +00:00
nearest = std : : min ( nearest , i . lastOutput + std : : chrono : : seconds ( settings . maxSilentTime ) ) ;
2017-04-14 12:42:20 +00:00
if ( 0 ! = settings . buildTimeout )
2016-12-06 20:58:04 +00:00
nearest = std : : min ( nearest , i . timeStarted + std : : chrono : : seconds ( settings . buildTimeout ) ) ;
2013-04-23 16:04:59 +00:00
}
2016-12-06 20:58:04 +00:00
if ( nearest ! = steady_time_point : : max ( ) ) {
2020-04-21 00:32:50 +00:00
timeout = std : : max ( 1L , ( long ) std : : chrono : : duration_cast < std : : chrono : : seconds > ( nearest - before ) . count ( ) ) ;
2012-07-27 14:56:33 +00:00
useTimeout = true ;
2006-12-08 15:44:00 +00:00
}
2009-03-23 01:05:54 +00:00
/* If we are polling goals that are waiting for a lock, then wake
up after a few seconds at most . */
if ( ! waitingForAWhile . empty ( ) ) {
useTimeout = true ;
2016-12-06 20:58:04 +00:00
if ( lastWokenUp = = steady_time_point : : min ( ) | | lastWokenUp > before ) lastWokenUp = before ;
2020-04-21 00:32:50 +00:00
timeout = std : : max ( 1L ,
2016-12-08 19:36:14 +00:00
( long ) std : : chrono : : duration_cast < std : : chrono : : seconds > (
2016-12-06 20:58:04 +00:00
lastWokenUp + std : : chrono : : seconds ( settings . pollInterval ) - before ) . count ( ) ) ;
} else lastWokenUp = steady_time_point : : min ( ) ;
if ( useTimeout )
2020-04-21 00:32:50 +00:00
vomit ( " sleeping %d seconds " , timeout ) ;
2009-03-23 01:05:54 +00:00
2004-06-18 18:09:32 +00:00
/* Use select() to wait for the input side of any logger pipe to
become ` available ' . Note that ` available ' ( i . e . , non - blocking )
includes EOF . */
2020-04-21 00:32:50 +00:00
std : : vector < struct pollfd > pollStatus ;
std : : map < int , int > fdToPollStatus ;
2015-07-17 17:24:28 +00:00
for ( auto & i : children ) {
2016-04-29 11:57:08 +00:00
for ( auto & j : i . fds ) {
2020-04-21 00:32:50 +00:00
pollStatus . push_back ( ( struct pollfd ) { . fd = j , . events = POLLIN } ) ;
fdToPollStatus [ j ] = pollStatus . size ( ) - 1 ;
2005-10-17 15:33:24 +00:00
}
2004-06-18 18:09:32 +00:00
}
2003-07-20 19:29:38 +00:00
2020-04-21 00:32:50 +00:00
if ( poll ( pollStatus . data ( ) , pollStatus . size ( ) ,
useTimeout ? timeout * 1000 : - 1 ) = = - 1 ) {
2004-06-18 18:09:32 +00:00
if ( errno = = EINTR ) return ;
throw SysError ( " waiting for input " ) ;
}
2003-07-20 19:29:38 +00:00
2016-12-06 20:58:04 +00:00
auto after = steady_time_point : : clock : : now ( ) ;
2006-12-08 15:44:00 +00:00
2017-01-19 15:58:39 +00:00
/* Process all available file descriptors. FIXME: this is
O ( children * fds ) . */
2016-04-29 11:57:08 +00:00
decltype ( children ) : : iterator i ;
for ( auto j = children . begin ( ) ; j ! = children . end ( ) ; j = i ) {
i = std : : next ( j ) ;
2006-12-08 18:41:48 +00:00
2004-06-18 18:09:32 +00:00
checkInterrupt ( ) ;
2016-04-29 11:57:08 +00:00
GoalPtr goal = j - > goal . lock ( ) ;
2004-06-25 15:36:09 +00:00
assert ( goal ) ;
2006-12-08 18:41:48 +00:00
2016-04-29 11:57:08 +00:00
set < int > fds2 ( j - > fds ) ;
2018-03-02 00:58:41 +00:00
std : : vector < unsigned char > buffer ( 4096 ) ;
2015-07-17 17:24:28 +00:00
for ( auto & k : fds2 ) {
2020-04-21 00:32:50 +00:00
if ( pollStatus . at ( fdToPollStatus . at ( k ) ) . revents ) {
2020-07-24 21:02:51 +00:00
ssize_t rd = : : read ( k , buffer . data ( ) , buffer . size ( ) ) ;
2019-05-17 20:29:15 +00:00
// FIXME: is there a cleaner way to handle pt close
// than EIO? Is this even standard?
if ( rd = = 0 | | ( rd = = - 1 & & errno = = EIO ) ) {
2020-05-11 21:52:15 +00:00
debug ( " %1%: got EOF " , goal - > getName ( ) ) ;
2015-07-17 17:24:28 +00:00
goal - > handleEOF ( k ) ;
2016-04-29 11:57:08 +00:00
j - > fds . erase ( k ) ;
2019-05-17 20:29:15 +00:00
} else if ( rd = = - 1 ) {
if ( errno ! = EINTR )
throw SysError ( " %s: read failed " , goal - > getName ( ) ) ;
2005-10-17 15:33:24 +00:00
} else {
2020-05-11 21:52:15 +00:00
printMsg ( lvlVomit , " %1%: read %2% bytes " ,
goal - > getName ( ) , rd ) ;
2018-03-01 21:00:58 +00:00
string data ( ( char * ) buffer . data ( ) , rd ) ;
2016-04-29 11:57:08 +00:00
j - > lastOutput = after ;
2015-07-17 17:24:28 +00:00
goal - > handleChildOutput ( k , data ) ;
2005-10-17 15:33:24 +00:00
}
2004-06-18 18:09:32 +00:00
}
}
2006-12-08 15:44:00 +00:00
2020-06-15 17:25:35 +00:00
if ( goal - > exitCode = = Goal : : ecBusy & &
2017-04-14 12:42:20 +00:00
0 ! = settings . maxSilentTime & &
2016-04-29 11:57:08 +00:00
j - > respectTimeouts & &
2016-12-06 20:58:04 +00:00
after - j - > lastOutput > = std : : chrono : : seconds ( settings . maxSilentTime ) )
2006-12-08 17:26:21 +00:00
{
2020-06-15 17:25:35 +00:00
goal - > timedOut ( Error (
2020-06-02 14:22:24 +00:00
" %1% timed out after %2% seconds of silence " ,
2020-06-15 17:25:35 +00:00
goal - > getName ( ) , settings . maxSilentTime ) ) ;
2006-12-08 17:26:21 +00:00
}
2011-06-30 15:19:13 +00:00
2020-06-15 17:25:35 +00:00
else if ( goal - > exitCode = = Goal : : ecBusy & &
2017-04-14 12:42:20 +00:00
0 ! = settings . buildTimeout & &
2016-04-29 11:57:08 +00:00
j - > respectTimeouts & &
2016-12-06 20:58:04 +00:00
after - j - > timeStarted > = std : : chrono : : seconds ( settings . buildTimeout ) )
2011-06-30 15:19:13 +00:00
{
2020-06-15 17:25:35 +00:00
goal - > timedOut ( Error (
2020-06-02 14:22:24 +00:00
" %1% timed out after %2% seconds " ,
2020-06-15 17:25:35 +00:00
goal - > getName ( ) , settings . buildTimeout ) ) ;
2011-06-30 15:19:13 +00:00
}
2004-06-18 18:09:32 +00:00
}
2009-03-23 01:05:54 +00:00
2016-12-06 20:58:04 +00:00
if ( ! waitingForAWhile . empty ( ) & & lastWokenUp + std : : chrono : : seconds ( settings . pollInterval ) < = after ) {
2009-03-23 01:05:54 +00:00
lastWokenUp = after ;
2015-07-17 17:24:28 +00:00
for ( auto & i : waitingForAWhile ) {
GoalPtr goal = i . lock ( ) ;
2009-03-23 01:05:54 +00:00
if ( goal ) wakeUp ( goal ) ;
}
waitingForAWhile . clear ( ) ;
}
2003-07-20 19:29:38 +00:00
}
2010-12-13 16:53:23 +00:00
unsigned int Worker : : exitStatus ( )
{
2019-06-15 13:28:32 +00:00
/*
* 1100100
* ^ ^ ^ ^
* | | | ` - timeout
* | | ` - - output hash mismatch
* | ` - - - build failure
* ` - - - - not deterministic
*/
2019-05-11 21:14:19 +00:00
unsigned int mask = 0 ;
2019-06-15 13:28:32 +00:00
bool buildFailure = permanentFailure | | timedOut | | hashMismatch ;
if ( buildFailure )
mask | = 0x04 ; // 100
2019-05-11 21:14:19 +00:00
if ( timedOut )
2019-06-15 13:28:32 +00:00
mask | = 0x01 ; // 101
2019-05-11 21:14:19 +00:00
if ( hashMismatch )
2019-06-15 13:28:32 +00:00
mask | = 0x02 ; // 102
if ( checkMismatch ) {
mask | = 0x08 ; // 104
}
2019-05-11 21:14:19 +00:00
2019-06-15 13:28:32 +00:00
if ( mask )
mask | = 0x60 ;
return mask ? mask : 1 ;
2010-12-13 16:53:23 +00:00
}
2005-10-17 15:33:24 +00:00
2019-12-05 18:11:09 +00:00
bool Worker : : pathContentsGood ( const StorePath & path )
2016-04-08 16:07:13 +00:00
{
2019-12-05 18:11:09 +00:00
auto i = pathContentsGoodCache . find ( path ) ;
2016-04-08 16:07:13 +00:00
if ( i ! = pathContentsGoodCache . end ( ) ) return i - > second ;
2019-12-05 18:11:09 +00:00
printInfo ( " checking path '%s'... " , store . printStorePath ( path ) ) ;
2016-04-19 16:50:15 +00:00
auto info = store . queryPathInfo ( path ) ;
2016-04-08 16:07:13 +00:00
bool res ;
2019-12-05 18:11:09 +00:00
if ( ! pathExists ( store . printStorePath ( path ) ) )
2016-04-08 16:07:13 +00:00
res = false ;
else {
2020-08-05 18:42:48 +00:00
HashResult current = hashPath ( info - > narHash . type , store . printStorePath ( path ) ) ;
2016-04-08 16:07:13 +00:00
Hash nullHash ( htSHA256 ) ;
2016-04-19 16:50:15 +00:00
res = info - > narHash = = nullHash | | info - > narHash = = current . first ;
2016-04-08 16:07:13 +00:00
}
2020-06-16 20:20:18 +00:00
pathContentsGoodCache . insert_or_assign ( path , res ) ;
2020-05-13 21:56:39 +00:00
if ( ! res )
2020-06-02 14:22:24 +00:00
logError ( {
. name = " Corrupted path " ,
. hint = hintfmt ( " path '%s' is corrupted or missing! " , store . printStorePath ( path ) )
2020-05-03 14:01:25 +00:00
} ) ;
2016-04-08 16:07:13 +00:00
return res ;
}
2020-06-16 20:20:18 +00:00
void Worker : : markContentsGood ( const StorePath & path )
2016-04-08 16:07:13 +00:00
{
2020-06-16 20:20:18 +00:00
pathContentsGoodCache . insert_or_assign ( path , true ) ;
2016-04-08 16:07:13 +00:00
}
2004-06-18 18:09:32 +00:00
//////////////////////////////////////////////////////////////////////
2019-12-05 18:11:09 +00:00
static void primeCache ( Store & store , const std : : vector < StorePathWithOutputs > & paths )
2017-08-31 14:02:36 +00:00
{
2019-12-05 18:11:09 +00:00
StorePathSet willBuild , willSubstitute , unknown ;
2020-07-30 11:10:49 +00:00
uint64_t downloadSize , narSize ;
2017-08-31 14:02:36 +00:00
store . queryMissing ( paths , willBuild , willSubstitute , unknown , downloadSize , narSize ) ;
2019-04-01 19:09:49 +00:00
if ( ! willBuild . empty ( ) & & 0 = = settings . maxBuildJobs & & getMachines ( ) . empty ( ) )
throw Error (
" %d derivations need to be built, but neither local builds ('--max-jobs') "
" nor remote builds ('--builders') are enabled " , willBuild . size ( ) ) ;
2017-08-31 14:02:36 +00:00
}
2019-12-05 18:11:09 +00:00
void LocalStore : : buildPaths ( const std : : vector < StorePathWithOutputs > & drvPaths , BuildMode buildMode )
2003-07-29 10:43:12 +00:00
{
2008-06-09 13:52:45 +00:00
Worker worker ( * this ) ;
2005-01-19 15:02:02 +00:00
2017-08-31 14:02:36 +00:00
primeCache ( * this , drvPaths ) ;
2005-01-19 15:02:02 +00:00
Goals goals ;
2019-12-05 18:11:09 +00:00
for ( auto & path : drvPaths ) {
if ( path . path . isDerivation ( ) )
goals . insert ( worker . makeDerivationGoal ( path . path , path . outputs , buildMode ) ) ;
2012-06-27 20:58:15 +00:00
else
2019-12-05 18:11:09 +00:00
goals . insert ( worker . makeSubstitutionGoal ( path . path , buildMode = = bmRepair ? Repair : NoRepair ) ) ;
2012-11-26 14:39:10 +00:00
}
2005-02-23 11:19:27 +00:00
worker . run ( goals ) ;
2019-12-05 18:11:09 +00:00
StorePathSet failed ;
2020-06-15 17:25:35 +00:00
std : : optional < Error > ex ;
2016-12-07 12:16:06 +00:00
for ( auto & i : goals ) {
2020-06-15 17:25:35 +00:00
if ( i - > ex ) {
if ( ex )
logError ( i - > ex - > info ( ) ) ;
else
ex = i - > ex ;
}
if ( i - > exitCode ! = Goal : : ecSuccess ) {
2015-07-17 17:24:28 +00:00
DerivationGoal * i2 = dynamic_cast < DerivationGoal * > ( i . get ( ) ) ;
2012-06-27 20:58:15 +00:00
if ( i2 ) failed . insert ( i2 - > getDrvPath ( ) ) ;
2015-07-17 17:24:28 +00:00
else failed . insert ( dynamic_cast < SubstitutionGoal * > ( i . get ( ) ) - > getStorePath ( ) ) ;
2005-02-23 11:19:27 +00:00
}
2016-12-07 12:16:06 +00:00
}
2012-07-27 13:59:18 +00:00
2020-06-15 17:25:35 +00:00
if ( failed . size ( ) = = 1 & & ex ) {
ex - > status = worker . exitStatus ( ) ;
throw * ex ;
} else if ( ! failed . empty ( ) ) {
if ( ex ) logError ( ex - > info ( ) ) ;
2016-11-08 19:19:02 +00:00
throw Error ( worker . exitStatus ( ) , " build of %s failed " , showPaths ( failed ) ) ;
2020-06-15 17:25:35 +00:00
}
2003-07-29 10:43:12 +00:00
}
2019-12-05 18:11:09 +00:00
BuildResult LocalStore : : buildDerivation ( const StorePath & drvPath , const BasicDerivation & drv ,
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
BuildMode buildMode )
{
Worker worker ( * this ) ;
2020-08-22 20:44:47 +00:00
auto goal = worker . makeBasicDerivationGoal ( drvPath , drv , { } , buildMode ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
BuildResult result ;
try {
worker . run ( Goals { goal } ) ;
2015-07-20 01:15:45 +00:00
result = goal - > getResult ( ) ;
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
} catch ( Error & e ) {
result . status = BuildResult : : MiscFailure ;
result . errorMsg = e . msg ( ) ;
}
return result ;
}
2019-12-05 18:11:09 +00:00
void LocalStore : : ensurePath ( const StorePath & path )
2003-07-20 19:29:38 +00:00
{
2004-06-18 18:09:32 +00:00
/* If the path is already valid, we're done. */
2008-06-09 13:52:45 +00:00
if ( isValidPath ( path ) ) return ;
2004-06-18 18:09:32 +00:00
2020-06-16 20:20:18 +00:00
primeCache ( * this , { { path } } ) ;
2017-08-31 14:02:36 +00:00
2008-06-09 13:52:45 +00:00
Worker worker ( * this ) ;
2005-02-23 11:19:27 +00:00
GoalPtr goal = worker . makeSubstitutionGoal ( path ) ;
2016-05-04 14:04:52 +00:00
Goals goals = { goal } ;
2005-02-23 11:19:27 +00:00
worker . run ( goals ) ;
2020-06-15 17:25:35 +00:00
if ( goal - > exitCode ! = Goal : : ecSuccess ) {
if ( goal - > ex ) {
goal - > ex - > status = worker . exitStatus ( ) ;
throw * goal - > ex ;
} else
throw Error ( worker . exitStatus ( ) , " path '%s' does not exist and cannot be created " , printStorePath ( path ) ) ;
}
2003-07-20 19:29:38 +00:00
}
2006-09-04 21:06:23 +00:00
2012-07-27 13:59:18 +00:00
2019-12-05 18:11:09 +00:00
void LocalStore : : repairPath ( const StorePath & path )
2012-10-02 18:08:59 +00:00
{
Worker worker ( * this ) ;
2017-06-28 16:11:01 +00:00
GoalPtr goal = worker . makeSubstitutionGoal ( path , Repair ) ;
2016-05-04 14:04:52 +00:00
Goals goals = { goal } ;
2012-10-02 18:08:59 +00:00
worker . run ( goals ) ;
2020-06-15 17:25:35 +00:00
if ( goal - > exitCode ! = Goal : : ecSuccess ) {
2016-01-06 21:07:59 +00:00
/* Since substituting the path didn't work, if we have a valid
deriver , then rebuild the deriver . */
2019-12-05 18:11:09 +00:00
auto info = queryPathInfo ( path ) ;
if ( info - > deriver & & isValidPath ( * info - > deriver ) ) {
2016-01-06 21:07:59 +00:00
goals . clear ( ) ;
2019-12-05 18:11:09 +00:00
goals . insert ( worker . makeDerivationGoal ( * info - > deriver , StringSet ( ) , bmRepair ) ) ;
2016-01-06 21:07:59 +00:00
worker . run ( goals ) ;
} else
2019-12-05 18:11:09 +00:00
throw Error ( worker . exitStatus ( ) , " cannot repair path '%s' " , printStorePath ( path ) ) ;
2016-01-06 21:07:59 +00:00
}
2012-10-02 18:08:59 +00:00
}
2006-09-04 21:06:23 +00:00
}