forked from lix-project/lix
Merge remote-tracking branch 'origin/master' into flakes
This commit is contained in:
commit
c3c23a52ee
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -4,9 +4,10 @@ perl/Makefile.config
|
||||||
# /
|
# /
|
||||||
/aclocal.m4
|
/aclocal.m4
|
||||||
/autom4te.cache
|
/autom4te.cache
|
||||||
|
/precompiled-headers.h.gch
|
||||||
|
/precompiled-headers.h.pch
|
||||||
/config.*
|
/config.*
|
||||||
/configure
|
/configure
|
||||||
/nix.spec
|
|
||||||
/stamp-h1
|
/stamp-h1
|
||||||
/svn-revision
|
/svn-revision
|
||||||
/libtool
|
/libtool
|
||||||
|
@ -84,6 +85,7 @@ perl/Makefile.config
|
||||||
/tests/restricted-innocent
|
/tests/restricted-innocent
|
||||||
/tests/shell
|
/tests/shell
|
||||||
/tests/shell.drv
|
/tests/shell.drv
|
||||||
|
/tests/config.nix
|
||||||
|
|
||||||
# /tests/lang/
|
# /tests/lang/
|
||||||
/tests/lang/*.out
|
/tests/lang/*.out
|
||||||
|
@ -117,3 +119,5 @@ GPATH
|
||||||
GRTAGS
|
GRTAGS
|
||||||
GSYMS
|
GSYMS
|
||||||
GTAGS
|
GTAGS
|
||||||
|
|
||||||
|
nix-rust/target
|
||||||
|
|
1
Makefile
1
Makefile
|
@ -1,6 +1,7 @@
|
||||||
makefiles = \
|
makefiles = \
|
||||||
mk/precompiled-headers.mk \
|
mk/precompiled-headers.mk \
|
||||||
local.mk \
|
local.mk \
|
||||||
|
nix-rust/local.mk \
|
||||||
src/libutil/local.mk \
|
src/libutil/local.mk \
|
||||||
src/libstore/local.mk \
|
src/libstore/local.mk \
|
||||||
src/libmain/local.mk \
|
src/libmain/local.mk \
|
||||||
|
|
42
configure.ac
42
configure.ac
|
@ -117,27 +117,16 @@ fi
|
||||||
])
|
])
|
||||||
|
|
||||||
NEED_PROG(bash, bash)
|
NEED_PROG(bash, bash)
|
||||||
NEED_PROG(patch, patch)
|
|
||||||
AC_PATH_PROG(xmllint, xmllint, false)
|
AC_PATH_PROG(xmllint, xmllint, false)
|
||||||
AC_PATH_PROG(xsltproc, xsltproc, false)
|
AC_PATH_PROG(xsltproc, xsltproc, false)
|
||||||
AC_PATH_PROG(flex, flex, false)
|
AC_PATH_PROG(flex, flex, false)
|
||||||
AC_PATH_PROG(bison, bison, false)
|
AC_PATH_PROG(bison, bison, false)
|
||||||
NEED_PROG(sed, sed)
|
|
||||||
NEED_PROG(tar, tar)
|
|
||||||
NEED_PROG(bzip2, bzip2)
|
|
||||||
NEED_PROG(gzip, gzip)
|
|
||||||
NEED_PROG(xz, xz)
|
|
||||||
AC_PATH_PROG(dot, dot)
|
AC_PATH_PROG(dot, dot)
|
||||||
AC_PATH_PROG(lsof, lsof, lsof)
|
AC_PATH_PROG(lsof, lsof, lsof)
|
||||||
NEED_PROG(jq, jq)
|
NEED_PROG(jq, jq)
|
||||||
|
|
||||||
|
|
||||||
NEED_PROG(cat, cat)
|
AC_SUBST(coreutils, [$(dirname $(type -p cat))])
|
||||||
NEED_PROG(tr, tr)
|
|
||||||
AC_ARG_WITH(coreutils-bin, AC_HELP_STRING([--with-coreutils-bin=PATH],
|
|
||||||
[path of cat, mkdir, etc.]),
|
|
||||||
coreutils=$withval, coreutils=$(dirname $cat))
|
|
||||||
AC_SUBST(coreutils)
|
|
||||||
|
|
||||||
|
|
||||||
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
|
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
|
||||||
|
@ -168,7 +157,20 @@ if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
|
||||||
LIBS="-latomic $LIBS"
|
LIBS="-latomic $LIBS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Look for OpenSSL, a required dependency.
|
PKG_PROG_PKG_CONFIG
|
||||||
|
|
||||||
|
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
|
||||||
|
[Build shared libraries for Nix [default=yes]]),
|
||||||
|
shared=$enableval, shared=yes)
|
||||||
|
if test "$shared" = yes; then
|
||||||
|
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
|
||||||
|
else
|
||||||
|
AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.])
|
||||||
|
PKG_CONFIG="$PKG_CONFIG --static"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Look for OpenSSL, a required dependency. FIXME: this is only (maybe)
|
||||||
|
# used by S3BinaryCacheStore.
|
||||||
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
|
||||||
|
@ -178,11 +180,9 @@ AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
|
||||||
AC_CHECK_HEADERS([bzlib.h], [true],
|
AC_CHECK_HEADERS([bzlib.h], [true],
|
||||||
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
|
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
|
||||||
|
|
||||||
|
|
||||||
# Look for SQLite, a required dependency.
|
# Look for SQLite, a required dependency.
|
||||||
PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
|
||||||
# Look for libcurl, a required dependency.
|
# Look for libcurl, a required dependency.
|
||||||
PKG_CHECK_MODULES([LIBCURL], [libcurl], [CXXFLAGS="$LIBCURL_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBCURL], [libcurl], [CXXFLAGS="$LIBCURL_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
@ -205,13 +205,11 @@ PKG_CHECK_MODULES([SODIUM], [libsodium],
|
||||||
have_sodium=1], [have_sodium=])
|
have_sodium=1], [have_sodium=])
|
||||||
AC_SUBST(HAVE_SODIUM, [$have_sodium])
|
AC_SUBST(HAVE_SODIUM, [$have_sodium])
|
||||||
|
|
||||||
|
|
||||||
# Look for liblzma, a required dependency.
|
# Look for liblzma, a required dependency.
|
||||||
PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
|
||||||
AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
|
AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
|
||||||
[AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
|
[AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
|
||||||
|
|
||||||
|
|
||||||
# Look for libbrotli{enc,dec}.
|
# Look for libbrotli{enc,dec}.
|
||||||
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
@ -300,16 +298,6 @@ AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
|
||||||
sandbox_shell=$withval)
|
sandbox_shell=$withval)
|
||||||
AC_SUBST(sandbox_shell)
|
AC_SUBST(sandbox_shell)
|
||||||
|
|
||||||
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
|
|
||||||
[Build shared libraries for Nix [default=yes]]),
|
|
||||||
shared=$enableval, shared=yes)
|
|
||||||
if test "$shared" = yes; then
|
|
||||||
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
|
|
||||||
else
|
|
||||||
AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.])
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Expand all variables in config.status.
|
# Expand all variables in config.status.
|
||||||
test "$prefix" = NONE && prefix=$ac_default_prefix
|
test "$prefix" = NONE && prefix=$ac_default_prefix
|
||||||
test "$exec_prefix" = NONE && exec_prefix='${prefix}'
|
test "$exec_prefix" = NONE && exec_prefix='${prefix}'
|
||||||
|
|
|
@ -1,29 +1,13 @@
|
||||||
|
# FIXME: remove this file?
|
||||||
let
|
let
|
||||||
fromEnv = var: def:
|
fromEnv = var: def:
|
||||||
let val = builtins.getEnv var; in
|
let val = builtins.getEnv var; in
|
||||||
if val != "" then val else def;
|
if val != "" then val else def;
|
||||||
in rec {
|
in rec {
|
||||||
shell = "@bash@";
|
|
||||||
coreutils = "@coreutils@";
|
|
||||||
bzip2 = "@bzip2@";
|
|
||||||
gzip = "@gzip@";
|
|
||||||
xz = "@xz@";
|
|
||||||
tar = "@tar@";
|
|
||||||
tarFlags = "@tarFlags@";
|
|
||||||
tr = "@tr@";
|
|
||||||
nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
|
nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
|
||||||
nixPrefix = "@prefix@";
|
nixPrefix = "@prefix@";
|
||||||
nixLibexecDir = fromEnv "NIX_LIBEXEC_DIR" "@libexecdir@";
|
nixLibexecDir = fromEnv "NIX_LIBEXEC_DIR" "@libexecdir@";
|
||||||
nixLocalstateDir = "@localstatedir@";
|
nixLocalstateDir = "@localstatedir@";
|
||||||
nixSysconfDir = "@sysconfdir@";
|
nixSysconfDir = "@sysconfdir@";
|
||||||
nixStoreDir = fromEnv "NIX_STORE_DIR" "@storedir@";
|
nixStoreDir = fromEnv "NIX_STORE_DIR" "@storedir@";
|
||||||
|
|
||||||
# If Nix is installed in the Nix store, then automatically add it as
|
|
||||||
# a dependency to the core packages. This ensures that they work
|
|
||||||
# properly in a chroot.
|
|
||||||
chrootDeps =
|
|
||||||
if dirOf nixPrefix == builtins.storeDir then
|
|
||||||
[ (builtins.storePath nixPrefix) ]
|
|
||||||
else
|
|
||||||
[ ];
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,39 +1,12 @@
|
||||||
with import <nix/config.nix>;
|
|
||||||
|
|
||||||
let
|
|
||||||
|
|
||||||
builder = builtins.toFile "unpack-channel.sh"
|
|
||||||
''
|
|
||||||
mkdir $out
|
|
||||||
cd $out
|
|
||||||
xzpat="\.xz\$"
|
|
||||||
gzpat="\.gz\$"
|
|
||||||
if [[ "$src" =~ $xzpat ]]; then
|
|
||||||
${xz} -d < $src | ${tar} xf - ${tarFlags}
|
|
||||||
elif [[ "$src" =~ $gzpat ]]; then
|
|
||||||
${gzip} -d < $src | ${tar} xf - ${tarFlags}
|
|
||||||
else
|
|
||||||
${bzip2} -d < $src | ${tar} xf - ${tarFlags}
|
|
||||||
fi
|
|
||||||
if [ * != $channelName ]; then
|
|
||||||
mv * $out/$channelName
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
in
|
|
||||||
|
|
||||||
{ name, channelName, src }:
|
{ name, channelName, src }:
|
||||||
|
|
||||||
derivation {
|
derivation {
|
||||||
system = builtins.currentSystem;
|
builder = "builtin:unpack-channel";
|
||||||
builder = shell;
|
|
||||||
args = [ "-e" builder ];
|
|
||||||
inherit name channelName src;
|
|
||||||
|
|
||||||
PATH = "${nixBinDir}:${coreutils}";
|
system = "builtin";
|
||||||
|
|
||||||
|
inherit name channelName src;
|
||||||
|
|
||||||
# No point in doing this remotely.
|
# No point in doing this remotely.
|
||||||
preferLocalBuild = true;
|
preferLocalBuild = true;
|
||||||
|
|
||||||
inherit chrootDeps;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
<replaceable>attrPath</replaceable>
|
<replaceable>attrPath</replaceable>
|
||||||
</arg>
|
</arg>
|
||||||
<arg><option>--no-out-link</option></arg>
|
<arg><option>--no-out-link</option></arg>
|
||||||
|
<arg><option>--dry-run</option></arg>
|
||||||
<arg>
|
<arg>
|
||||||
<group choice='req'>
|
<group choice='req'>
|
||||||
<arg choice='plain'><option>--out-link</option></arg>
|
<arg choice='plain'><option>--out-link</option></arg>
|
||||||
|
@ -98,6 +99,10 @@ also <xref linkend="sec-common-options" />.</phrase></para>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry><term><option>--dry-run</option></term>
|
||||||
|
<listitem><para>Show what store paths would be built or downloaded.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry xml:id='opt-out-link'><term><option>--out-link</option> /
|
<varlistentry xml:id='opt-out-link'><term><option>--out-link</option> /
|
||||||
<option>-o</option> <replaceable>outlink</replaceable></term>
|
<option>-o</option> <replaceable>outlink</replaceable></term>
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,12 @@
|
||||||
<arg choice='plain'><option>--packages</option></arg>
|
<arg choice='plain'><option>--packages</option></arg>
|
||||||
<arg choice='plain'><option>-p</option></arg>
|
<arg choice='plain'><option>-p</option></arg>
|
||||||
</group>
|
</group>
|
||||||
<arg choice='plain' rep='repeat'><replaceable>packages</replaceable></arg>
|
<arg choice='plain' rep='repeat'>
|
||||||
|
<group choice='req'>
|
||||||
|
<arg choice="plain"><replaceable>packages</replaceable></arg>
|
||||||
|
<arg choice="plain"><replaceable>expressions</replaceable></arg>
|
||||||
|
</group>
|
||||||
|
</arg>
|
||||||
</arg>
|
</arg>
|
||||||
<arg><replaceable>path</replaceable></arg>
|
<arg><replaceable>path</replaceable></arg>
|
||||||
</group>
|
</group>
|
||||||
|
@ -189,8 +194,8 @@ also <xref linkend="sec-common-options" />.</phrase></para>
|
||||||
<variablelist>
|
<variablelist>
|
||||||
|
|
||||||
<varlistentry><term><envar>NIX_BUILD_SHELL</envar></term>
|
<varlistentry><term><envar>NIX_BUILD_SHELL</envar></term>
|
||||||
|
|
||||||
<listitem><para>Shell used to start the interactive environment.
|
<listitem><para>Shell used to start the interactive environment.
|
||||||
Defaults to the <command>bash</command> found in <envar>PATH</envar>.</para></listitem>
|
Defaults to the <command>bash</command> found in <envar>PATH</envar>.</para></listitem>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
@ -222,8 +227,9 @@ $ nix-shell '<nixpkgs>' -A pan --pure \
|
||||||
--command 'export NIX_DEBUG=1; export NIX_CORES=8; return'
|
--command 'export NIX_DEBUG=1; export NIX_CORES=8; return'
|
||||||
</screen>
|
</screen>
|
||||||
|
|
||||||
Nix expressions can also be given on the command line. For instance,
|
Nix expressions can also be given on the command line using the
|
||||||
the following starts a shell containing the packages
|
<command>-E</command> and <command>-p</command> flags.
|
||||||
|
For instance, the following starts a shell containing the packages
|
||||||
<literal>sqlite</literal> and <literal>libX11</literal>:
|
<literal>sqlite</literal> and <literal>libX11</literal>:
|
||||||
|
|
||||||
<screen>
|
<screen>
|
||||||
|
@ -238,6 +244,14 @@ $ nix-shell -p sqlite xorg.libX11
|
||||||
… -L/nix/store/j1zg5v…-sqlite-3.8.0.2/lib -L/nix/store/0gmcz9…-libX11-1.6.1/lib …
|
… -L/nix/store/j1zg5v…-sqlite-3.8.0.2/lib -L/nix/store/0gmcz9…-libX11-1.6.1/lib …
|
||||||
</screen>
|
</screen>
|
||||||
|
|
||||||
|
Note that <command>-p</command> accepts multiple full nix expressions that
|
||||||
|
are valid in the <literal>buildInputs = [ ... ]</literal> shown above,
|
||||||
|
not only package names. So the following is also legal:
|
||||||
|
|
||||||
|
<screen>
|
||||||
|
$ nix-shell -p sqlite 'git.override { withManual = false; }'
|
||||||
|
</screen>
|
||||||
|
|
||||||
The <command>-p</command> flag looks up Nixpkgs in the Nix search
|
The <command>-p</command> flag looks up Nixpkgs in the Nix search
|
||||||
path. You can override it by passing <option>-I</option> or setting
|
path. You can override it by passing <option>-I</option> or setting
|
||||||
<envar>NIX_PATH</envar>. For example, the following gives you a shell
|
<envar>NIX_PATH</envar>. For example, the following gives you a shell
|
||||||
|
|
|
@ -323,7 +323,14 @@
|
||||||
Nix expressions to be parsed and evaluated, rather than as a list
|
Nix expressions to be parsed and evaluated, rather than as a list
|
||||||
of file names of Nix expressions.
|
of file names of Nix expressions.
|
||||||
(<command>nix-instantiate</command>, <command>nix-build</command>
|
(<command>nix-instantiate</command>, <command>nix-build</command>
|
||||||
and <command>nix-shell</command> only.)</para></listitem>
|
and <command>nix-shell</command> only.)</para>
|
||||||
|
|
||||||
|
<para>For <command>nix-shell</command>, this option is commonly used
|
||||||
|
to give you a shell in which you can build the packages returned
|
||||||
|
by the expression. If you want to get a shell which contain the
|
||||||
|
<emphasis>built</emphasis> packages ready for use, give your
|
||||||
|
expression to the <command>nix-shell -p</command> convenience flag
|
||||||
|
instead.</para></listitem>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
|
|
@ -1120,6 +1120,16 @@ Evaluates to <literal>[ "foo" ]</literal>.
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry xml:id='builtin-placeholder'>
|
||||||
|
<term><function>builtins.placeholder</function>
|
||||||
|
<replaceable>output</replaceable></term>
|
||||||
|
|
||||||
|
<listitem><para>Return a placeholder string for the specified
|
||||||
|
<replaceable>output</replaceable> that will be substituted by the
|
||||||
|
corresponding output path at build time. Typical outputs would be
|
||||||
|
<literal>"out"</literal>, <literal>"bin"</literal> or
|
||||||
|
<literal>"dev"</literal>.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry xml:id='builtin-readDir'>
|
<varlistentry xml:id='builtin-readDir'>
|
||||||
<term><function>builtins.readDir</function>
|
<term><function>builtins.readDir</function>
|
||||||
|
|
51
flake.nix
51
flake.nix
|
@ -72,6 +72,7 @@
|
||||||
openssl pkgconfig sqlite boehmgc
|
openssl pkgconfig sqlite boehmgc
|
||||||
boost
|
boost
|
||||||
(nlohmann_json.override { multipleHeaders = true; })
|
(nlohmann_json.override { multipleHeaders = true; })
|
||||||
|
rustc cargo
|
||||||
|
|
||||||
# Tests
|
# Tests
|
||||||
git
|
git
|
||||||
|
@ -156,6 +157,52 @@
|
||||||
|
|
||||||
hydraJobs = {
|
hydraJobs = {
|
||||||
|
|
||||||
|
# Create a "vendor" directory that contains the crates listed in
|
||||||
|
# Cargo.lock, and include it in the Nix tarball. This allows Nix
|
||||||
|
# to be built without network access.
|
||||||
|
vendoredCrates =
|
||||||
|
with nixpkgsFor.x86_64-linux;
|
||||||
|
|
||||||
|
let
|
||||||
|
lockFile = builtins.fromTOML (builtins.readFile nix-rust/Cargo.lock);
|
||||||
|
|
||||||
|
files = map (pkg: import <nix/fetchurl.nix> {
|
||||||
|
url = "https://crates.io/api/v1/crates/${pkg.name}/${pkg.version}/download";
|
||||||
|
sha256 = lockFile.metadata."checksum ${pkg.name} ${pkg.version} (registry+https://github.com/rust-lang/crates.io-index)";
|
||||||
|
}) (builtins.filter (pkg: pkg.source or "" == "registry+https://github.com/rust-lang/crates.io-index") lockFile.package);
|
||||||
|
|
||||||
|
in pkgs.runCommand "cargo-vendor-dir" {}
|
||||||
|
''
|
||||||
|
mkdir -p $out/vendor
|
||||||
|
|
||||||
|
cat > $out/vendor/config <<EOF
|
||||||
|
[source.crates-io]
|
||||||
|
replace-with = "vendored-sources"
|
||||||
|
|
||||||
|
[source.vendored-sources]
|
||||||
|
directory = "vendor"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
${toString (builtins.map (file: ''
|
||||||
|
mkdir $out/vendor/tmp
|
||||||
|
tar xvf ${file} -C $out/vendor/tmp
|
||||||
|
dir=$(echo $out/vendor/tmp/*)
|
||||||
|
|
||||||
|
# Add just enough metadata to keep Cargo happy.
|
||||||
|
printf '{"files":{},"package":"${file.outputHash}"}' > "$dir/.cargo-checksum.json"
|
||||||
|
|
||||||
|
# Clean up some cruft from the winapi crates. FIXME: find
|
||||||
|
# a way to remove winapi* from our dependencies.
|
||||||
|
if [[ $dir =~ /winapi ]]; then
|
||||||
|
find $dir -name "*.a" -print0 | xargs -0 rm -f --
|
||||||
|
fi
|
||||||
|
|
||||||
|
mv "$dir" $out/vendor/
|
||||||
|
|
||||||
|
rm -rf $out/vendor/tmp
|
||||||
|
'') files)}
|
||||||
|
'';
|
||||||
|
|
||||||
# Source tarball.
|
# Source tarball.
|
||||||
tarball =
|
tarball =
|
||||||
with nixpkgsFor.x86_64-linux;
|
with nixpkgsFor.x86_64-linux;
|
||||||
|
@ -184,6 +231,8 @@
|
||||||
|
|
||||||
distPhase =
|
distPhase =
|
||||||
''
|
''
|
||||||
|
cp -prd ${vendoredCrates}/vendor/ nix-rust/vendor/
|
||||||
|
|
||||||
runHook preDist
|
runHook preDist
|
||||||
make dist
|
make dist
|
||||||
mkdir -p $out/tarballs
|
mkdir -p $out/tarballs
|
||||||
|
@ -446,7 +495,7 @@
|
||||||
stdenv.mkDerivation {
|
stdenv.mkDerivation {
|
||||||
name = "nix";
|
name = "nix";
|
||||||
|
|
||||||
buildInputs = buildDeps ++ tarballDeps ++ perlDeps;
|
buildInputs = buildDeps ++ tarballDeps ++ perlDeps ++ [ pkgs.rustfmt ];
|
||||||
|
|
||||||
inherit configureFlags;
|
inherit configureFlags;
|
||||||
|
|
||||||
|
|
2
local.mk
2
local.mk
|
@ -2,7 +2,7 @@ ifeq ($(MAKECMDGOALS), dist)
|
||||||
dist-files += $(shell cat .dist-files)
|
dist-files += $(shell cat .dist-files)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
dist-files += configure config.h.in nix.spec perl/configure
|
dist-files += configure config.h.in perl/configure
|
||||||
|
|
||||||
clean-files += Makefile.config
|
clean-files += Makefile.config
|
||||||
|
|
||||||
|
|
84
nix-rust/Cargo.lock
generated
Normal file
84
nix-rust/Cargo.lock
generated
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
# This file is automatically @generated by Cargo.
|
||||||
|
# It is not intended for manual editing.
|
||||||
|
[[package]]
|
||||||
|
name = "cfg-if"
|
||||||
|
version = "0.1.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "filetime"
|
||||||
|
version = "0.2.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libc"
|
||||||
|
version = "0.2.65"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nix-rust"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "redox_syscall"
|
||||||
|
version = "0.1.56"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tar"
|
||||||
|
version = "0.4.26"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"filetime 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winapi"
|
||||||
|
version = "0.3.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winapi-i686-pc-windows-gnu"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "winapi-x86_64-pc-windows-gnu"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "xattr"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[metadata]
|
||||||
|
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||||
|
"checksum filetime 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1ff6d4dab0aa0c8e6346d46052e93b13a16cf847b54ed357087c35011048cc7d"
|
||||||
|
"checksum libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)" = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8"
|
||||||
|
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
||||||
|
"checksum tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)" = "b3196bfbffbba3e57481b6ea32249fbaf590396a52505a2615adbb79d9d826d3"
|
||||||
|
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||||
|
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||||
|
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||||
|
"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c"
|
13
nix-rust/Cargo.toml
Normal file
13
nix-rust/Cargo.toml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
[package]
|
||||||
|
name = "nix-rust"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Eelco Dolstra <edolstra@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "nixrust"
|
||||||
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tar = "0.4"
|
||||||
|
libc = "0.2"
|
38
nix-rust/local.mk
Normal file
38
nix-rust/local.mk
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
ifeq ($(OPTIMIZE), 1)
|
||||||
|
RUST_MODE = --release
|
||||||
|
RUST_DIR = release
|
||||||
|
else
|
||||||
|
RUST_MODE =
|
||||||
|
RUST_DIR = debug
|
||||||
|
endif
|
||||||
|
|
||||||
|
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
|
||||||
|
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
|
||||||
|
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl
|
||||||
|
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl
|
||||||
|
|
||||||
|
ifeq ($(OS), Darwin)
|
||||||
|
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
|
||||||
|
else
|
||||||
|
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
|
||||||
|
libnixrust_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$(libdir)
|
||||||
|
endif
|
||||||
|
|
||||||
|
$(libnixrust_PATH): $(wildcard $(d)/src/*.rs) $(d)/Cargo.toml
|
||||||
|
$(trace-gen) cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) \
|
||||||
|
$(libnixrust_BUILD_FLAGS) \
|
||||||
|
cargo build $(RUST_MODE) $$(if [[ -d vendor ]]; then echo --offline; fi) \
|
||||||
|
&& touch target/$(RUST_DIR)/libnixrust.$(SO_EXT)
|
||||||
|
|
||||||
|
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
|
||||||
|
$(target-gen) cp $^ $@
|
||||||
|
ifeq ($(OS), Darwin)
|
||||||
|
install_name_tool -id $@ $@
|
||||||
|
endif
|
||||||
|
|
||||||
|
dist-files += $(d)/vendor
|
||||||
|
|
||||||
|
clean: clean-rust
|
||||||
|
|
||||||
|
clean-rust:
|
||||||
|
$(suppress) rm -rfv nix-rust/target
|
31
nix-rust/src/error.rs
Normal file
31
nix-rust/src/error.rs
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
IOError(std::io::Error),
|
||||||
|
Misc(String),
|
||||||
|
Foreign(CppException),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<std::io::Error> for Error {
|
||||||
|
fn from(err: std::io::Error) -> Self {
|
||||||
|
Error::IOError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for CppException {
|
||||||
|
fn from(err: Error) -> Self {
|
||||||
|
match err {
|
||||||
|
Error::Foreign(ex) => ex,
|
||||||
|
Error::Misc(s) => unsafe { make_error(&s) },
|
||||||
|
Error::IOError(err) => unsafe { make_error(&err.to_string()) },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct CppException(*const libc::c_void); // == std::exception_ptr*
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#[allow(improper_ctypes)] // YOLO
|
||||||
|
fn make_error(s: &str) -> CppException;
|
||||||
|
}
|
14
nix-rust/src/foreign.rs
Normal file
14
nix-rust/src/foreign.rs
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
/// A wrapper around Nix's Source class that provides the Read trait.
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct Source {
|
||||||
|
fun: extern "C" fn(this: *mut libc::c_void, data: &mut [u8]) -> usize,
|
||||||
|
this: *mut libc::c_void,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::io::Read for Source {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> std::result::Result<usize, std::io::Error> {
|
||||||
|
let n = (self.fun)(self.this, buf);
|
||||||
|
assert!(n <= buf.len());
|
||||||
|
Ok(n)
|
||||||
|
}
|
||||||
|
}
|
32
nix-rust/src/lib.rs
Normal file
32
nix-rust/src/lib.rs
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
mod error;
|
||||||
|
mod foreign;
|
||||||
|
mod tarfile;
|
||||||
|
|
||||||
|
pub use error::Error;
|
||||||
|
|
||||||
|
pub struct CBox<T> {
|
||||||
|
pub ptr: *mut libc::c_void,
|
||||||
|
phantom: std::marker::PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> CBox<T> {
|
||||||
|
fn new(t: T) -> Self {
|
||||||
|
unsafe {
|
||||||
|
let size = std::mem::size_of::<T>();
|
||||||
|
let ptr = libc::malloc(size);
|
||||||
|
*(ptr as *mut T) = t; // FIXME: probably UB
|
||||||
|
Self {
|
||||||
|
ptr,
|
||||||
|
phantom: std::marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn unpack_tarfile(
|
||||||
|
source: foreign::Source,
|
||||||
|
dest_dir: &str,
|
||||||
|
) -> CBox<Result<(), error::CppException>> {
|
||||||
|
CBox::new(tarfile::unpack_tarfile(source, dest_dir).map_err(|err| err.into()))
|
||||||
|
}
|
46
nix-rust/src/tarfile.rs
Normal file
46
nix-rust/src/tarfile.rs
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
use crate::{foreign::Source, Error};
|
||||||
|
use std::fs;
|
||||||
|
use std::io;
|
||||||
|
use std::os::unix::fs::OpenOptionsExt;
|
||||||
|
use std::path::Path;
|
||||||
|
use tar::Archive;
|
||||||
|
|
||||||
|
pub fn unpack_tarfile(source: Source, dest_dir: &str) -> Result<(), Error> {
|
||||||
|
let dest_dir = Path::new(dest_dir);
|
||||||
|
|
||||||
|
let mut tar = Archive::new(source);
|
||||||
|
|
||||||
|
for file in tar.entries()? {
|
||||||
|
let mut file = file?;
|
||||||
|
|
||||||
|
let dest_file = dest_dir.join(file.path()?);
|
||||||
|
|
||||||
|
fs::create_dir_all(dest_file.parent().unwrap())?;
|
||||||
|
|
||||||
|
match file.header().entry_type() {
|
||||||
|
tar::EntryType::Directory => {
|
||||||
|
fs::create_dir(dest_file)?;
|
||||||
|
}
|
||||||
|
tar::EntryType::Regular => {
|
||||||
|
let mode = if file.header().mode()? & (libc::S_IXUSR as u32) == 0 {
|
||||||
|
0o666
|
||||||
|
} else {
|
||||||
|
0o777
|
||||||
|
};
|
||||||
|
let mut f = fs::OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.mode(mode)
|
||||||
|
.open(dest_file)?;
|
||||||
|
io::copy(&mut file, &mut f)?;
|
||||||
|
}
|
||||||
|
tar::EntryType::Symlink => {
|
||||||
|
std::os::unix::fs::symlink(file.header().link_name()?.unwrap(), dest_file)?;
|
||||||
|
}
|
||||||
|
tar::EntryType::XGlobalHeader | tar::EntryType::XHeader => {}
|
||||||
|
t => return Err(Error::Misc(format!("unsupported tar entry type '{:?}'", t))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
173
nix.spec.in
173
nix.spec.in
|
@ -1,173 +0,0 @@
|
||||||
%undefine _hardened_build
|
|
||||||
|
|
||||||
%global nixbld_user "nix-builder-"
|
|
||||||
%global nixbld_group "nixbld"
|
|
||||||
|
|
||||||
# NOTE: BUILD on EL7 requires
|
|
||||||
# - Centos / RHEL7 software collection repository
|
|
||||||
# yum install centos-release-scl
|
|
||||||
#
|
|
||||||
# - Recent boost backport
|
|
||||||
# curl https://copr.fedorainfracloud.org/coprs/whosthere/boost/repo/epel-7/whosthere-boost-epel-7.repo -o /etc/yum.repos.d/whosthere-boost-epel-7.repo
|
|
||||||
#
|
|
||||||
|
|
||||||
# Disable documentation generation
|
|
||||||
# necessary on some platforms
|
|
||||||
%bcond_without docgen
|
|
||||||
|
|
||||||
Summary: The Nix software deployment system
|
|
||||||
Name: nix
|
|
||||||
Version: @PACKAGE_VERSION@
|
|
||||||
Release: 2%{?dist}
|
|
||||||
License: LGPLv2+
|
|
||||||
Group: Applications/System
|
|
||||||
URL: http://nixos.org/
|
|
||||||
Source0: %{name}-%{version}.tar.bz2
|
|
||||||
|
|
||||||
Requires: curl
|
|
||||||
Requires: bzip2
|
|
||||||
Requires: gzip
|
|
||||||
Requires: xz
|
|
||||||
BuildRequires: bison
|
|
||||||
BuildRequires: boost-devel >= 1.60
|
|
||||||
BuildRequires: bzip2-devel
|
|
||||||
|
|
||||||
# for RHEL <= 7, we need software collections for a C++14 compatible compatible compiler
|
|
||||||
%if 0%{?rhel}
|
|
||||||
BuildRequires: devtoolset-7-gcc
|
|
||||||
BuildRequires: devtoolset-7-gcc-c++
|
|
||||||
%endif
|
|
||||||
|
|
||||||
BuildRequires: flex
|
|
||||||
BuildRequires: libcurl-devel
|
|
||||||
BuildRequires: libseccomp-devel
|
|
||||||
BuildRequires: openssl-devel
|
|
||||||
BuildRequires: sqlite-devel
|
|
||||||
BuildRequires: xz-devel
|
|
||||||
|
|
||||||
%description
|
|
||||||
Nix is a purely functional package manager. It allows multiple
|
|
||||||
versions of a package to be installed side-by-side, ensures that
|
|
||||||
dependency specifications are complete, supports atomic upgrades and
|
|
||||||
rollbacks, allows non-root users to install software, and has many
|
|
||||||
other features. It is the basis of the NixOS Linux distribution, but
|
|
||||||
it can be used equally well under other Unix systems.
|
|
||||||
|
|
||||||
%package devel
|
|
||||||
Summary: Development files for %{name}
|
|
||||||
Requires: %{name}%{?_isa} = %{version}-%{release}
|
|
||||||
|
|
||||||
%description devel
|
|
||||||
The %{name}-devel package contains libraries and header files for
|
|
||||||
developing applications that use %{name}.
|
|
||||||
|
|
||||||
|
|
||||||
%package doc
|
|
||||||
Summary: Documentation files for %{name}
|
|
||||||
BuildArch: noarch
|
|
||||||
Requires: %{name} = %{version}-%{release}
|
|
||||||
|
|
||||||
%description doc
|
|
||||||
The %{name}-doc package contains documentation files for %{name}.
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%setup -q
|
|
||||||
|
|
||||||
|
|
||||||
%build
|
|
||||||
%if 0%{?rhel}
|
|
||||||
source /opt/rh/devtoolset-7/enable
|
|
||||||
%endif
|
|
||||||
extraFlags=
|
|
||||||
# - override docdir so large documentation files are owned by the
|
|
||||||
# -doc subpackage
|
|
||||||
# - set localstatedir by hand to the preferred nix value
|
|
||||||
%configure --localstatedir=/nix/var \
|
|
||||||
%{!?without_docgen:--disable-doc-gen} \
|
|
||||||
--docdir=%{_defaultdocdir}/%{name}-doc-%{version} \
|
|
||||||
$extraFlags
|
|
||||||
make V=1 %{?_smp_mflags}
|
|
||||||
|
|
||||||
|
|
||||||
%install
|
|
||||||
%if 0%{?rhel}
|
|
||||||
source /opt/rh/devtoolset-7/enable
|
|
||||||
%endif
|
|
||||||
|
|
||||||
make DESTDIR=$RPM_BUILD_ROOT install
|
|
||||||
|
|
||||||
find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';'
|
|
||||||
|
|
||||||
# make the store
|
|
||||||
mkdir -p $RPM_BUILD_ROOT/nix/store
|
|
||||||
chmod 1775 $RPM_BUILD_ROOT/nix/store
|
|
||||||
|
|
||||||
# make per-user directories
|
|
||||||
for d in profiles gcroots;
|
|
||||||
do
|
|
||||||
mkdir -p $RPM_BUILD_ROOT/nix/var/nix/$d/per-user
|
|
||||||
chmod 755 $RPM_BUILD_ROOT/nix/var/nix/$d/per-user
|
|
||||||
done
|
|
||||||
|
|
||||||
# fix permission of nix profile
|
|
||||||
# (until this is fixed in the relevant Makefile)
|
|
||||||
chmod -x $RPM_BUILD_ROOT%{_sysconfdir}/profile.d/nix.sh
|
|
||||||
|
|
||||||
# we ship this file in the base package
|
|
||||||
rm -f $RPM_BUILD_ROOT%{_defaultdocdir}/%{name}-doc-%{version}/README
|
|
||||||
|
|
||||||
# Get rid of Upstart job.
|
|
||||||
rm -rf $RPM_BUILD_ROOT%{_sysconfdir}/init
|
|
||||||
|
|
||||||
|
|
||||||
%clean
|
|
||||||
rm -rf $RPM_BUILD_ROOT
|
|
||||||
|
|
||||||
|
|
||||||
%pre
|
|
||||||
getent group %{nixbld_group} >/dev/null || groupadd -r %{nixbld_group}
|
|
||||||
for i in $(seq 10);
|
|
||||||
do
|
|
||||||
getent passwd %{nixbld_user}$i >/dev/null || \
|
|
||||||
useradd -r -g %{nixbld_group} -G %{nixbld_group} -d /var/empty \
|
|
||||||
-s %{_sbindir}/nologin \
|
|
||||||
-c "Nix build user $i" %{nixbld_user}$i
|
|
||||||
done
|
|
||||||
|
|
||||||
%post
|
|
||||||
chgrp %{nixbld_group} /nix/store
|
|
||||||
%if ! 0%{?rhel} || 0%{?rhel} >= 7
|
|
||||||
# Enable and start Nix worker
|
|
||||||
systemctl enable nix-daemon.socket nix-daemon.service
|
|
||||||
systemctl start nix-daemon.socket
|
|
||||||
%endif
|
|
||||||
|
|
||||||
%files
|
|
||||||
%license COPYING
|
|
||||||
%{_bindir}/nix*
|
|
||||||
%{_libdir}/*.so
|
|
||||||
%{_prefix}/libexec/*
|
|
||||||
%if ! 0%{?rhel} || 0%{?rhel} >= 7
|
|
||||||
%{_prefix}/lib/systemd/system/nix-daemon.socket
|
|
||||||
%{_prefix}/lib/systemd/system/nix-daemon.service
|
|
||||||
%endif
|
|
||||||
%{_datadir}/nix
|
|
||||||
#%if ! %{without docgen}
|
|
||||||
#%{_mandir}/man1/*.1*
|
|
||||||
#%{_mandir}/man5/*.5*
|
|
||||||
#%{_mandir}/man8/*.8*
|
|
||||||
#%endif
|
|
||||||
%config(noreplace) %{_sysconfdir}/profile.d/nix.sh
|
|
||||||
%config(noreplace) %{_sysconfdir}/profile.d/nix-daemon.sh
|
|
||||||
/nix
|
|
||||||
|
|
||||||
%files devel
|
|
||||||
%{_includedir}/nix
|
|
||||||
%{_prefix}/lib/pkgconfig/*.pc
|
|
||||||
|
|
||||||
|
|
||||||
#%if ! %{without docgen}
|
|
||||||
#%files doc
|
|
||||||
#%docdir %{_defaultdocdir}/%{name}-doc-%{version}
|
|
||||||
#%{_defaultdocdir}/%{name}-doc-%{version}
|
|
||||||
#%endif
|
|
|
@ -11,10 +11,6 @@ $logDir = $ENV{"NIX_LOG_DIR"} || "@nixlocalstatedir@/log/nix";
|
||||||
$confDir = $ENV{"NIX_CONF_DIR"} || "@nixsysconfdir@/nix";
|
$confDir = $ENV{"NIX_CONF_DIR"} || "@nixsysconfdir@/nix";
|
||||||
$storeDir = $ENV{"NIX_STORE_DIR"} || "@nixstoredir@";
|
$storeDir = $ENV{"NIX_STORE_DIR"} || "@nixstoredir@";
|
||||||
|
|
||||||
$bzip2 = "@bzip2@";
|
|
||||||
$xz = "@xz@";
|
|
||||||
$curl = "@curl@";
|
|
||||||
|
|
||||||
$useBindings = 1;
|
$useBindings = 1;
|
||||||
|
|
||||||
%config = ();
|
%config = ();
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi
|
if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi
|
||||||
__ETC_PROFILE_NIX_SOURCED=1
|
__ETC_PROFILE_NIX_SOURCED=1
|
||||||
|
|
||||||
export NIX_USER_PROFILE_DIR="@localstatedir@/nix/profiles/per-user/$USER"
|
|
||||||
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
|
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
|
||||||
|
|
||||||
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
|
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
|
||||||
|
@ -25,5 +24,4 @@ else
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels"
|
|
||||||
export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
|
export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
|
||||||
|
|
|
@ -5,12 +5,6 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
|
||||||
|
|
||||||
NIX_LINK=$HOME/.nix-profile
|
NIX_LINK=$HOME/.nix-profile
|
||||||
|
|
||||||
NIX_USER_PROFILE_DIR=@localstatedir@/nix/profiles/per-user/$USER
|
|
||||||
|
|
||||||
# Append ~/.nix-defexpr/channels to $NIX_PATH so that <nixpkgs>
|
|
||||||
# paths work when the user has fetched the Nixpkgs channel.
|
|
||||||
export NIX_PATH=${NIX_PATH:+$NIX_PATH:}$HOME/.nix-defexpr/channels
|
|
||||||
|
|
||||||
# Set up environment.
|
# Set up environment.
|
||||||
# This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix
|
# This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix
|
||||||
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
|
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
|
||||||
|
@ -35,5 +29,5 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export PATH="$NIX_LINK/bin:$PATH"
|
export PATH="$NIX_LINK/bin:$PATH"
|
||||||
unset NIX_LINK NIX_USER_PROFILE_DIR
|
unset NIX_LINK
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -188,7 +188,7 @@ static int _main(int argc, char * * argv)
|
||||||
|
|
||||||
Store::Params storeParams;
|
Store::Params storeParams;
|
||||||
if (hasPrefix(bestMachine->storeUri, "ssh://")) {
|
if (hasPrefix(bestMachine->storeUri, "ssh://")) {
|
||||||
storeParams["max-connections"] ="1";
|
storeParams["max-connections"] = "1";
|
||||||
storeParams["log-fd"] = "4";
|
storeParams["log-fd"] = "4";
|
||||||
if (bestMachine->sshKey != "")
|
if (bestMachine->sshKey != "")
|
||||||
storeParams["ssh-key"] = bestMachine->sshKey;
|
storeParams["ssh-key"] = bestMachine->sshKey;
|
||||||
|
|
|
@ -244,7 +244,7 @@ void initGC()
|
||||||
that GC_expand_hp() causes a lot of virtual, but not physical
|
that GC_expand_hp() causes a lot of virtual, but not physical
|
||||||
(resident) memory to be allocated. This might be a problem on
|
(resident) memory to be allocated. This might be a problem on
|
||||||
systems that don't overcommit. */
|
systems that don't overcommit. */
|
||||||
if (!getenv("GC_INITIAL_HEAP_SIZE")) {
|
if (!getEnv("GC_INITIAL_HEAP_SIZE")) {
|
||||||
size_t size = 32 * 1024 * 1024;
|
size_t size = 32 * 1024 * 1024;
|
||||||
#if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES)
|
#if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES)
|
||||||
size_t maxSize = 384 * 1024 * 1024;
|
size_t maxSize = 384 * 1024 * 1024;
|
||||||
|
@ -335,7 +335,7 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
||||||
, baseEnv(allocEnv(128))
|
, baseEnv(allocEnv(128))
|
||||||
, staticBaseEnv(false, 0)
|
, staticBaseEnv(false, 0)
|
||||||
{
|
{
|
||||||
countCalls = getEnv("NIX_COUNT_CALLS", "0") != "0";
|
countCalls = getEnv("NIX_COUNT_CALLS").value_or("0") != "0";
|
||||||
|
|
||||||
assert(gcInitialised);
|
assert(gcInitialised);
|
||||||
|
|
||||||
|
@ -343,9 +343,8 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
||||||
|
|
||||||
/* Initialise the Nix expression search path. */
|
/* Initialise the Nix expression search path. */
|
||||||
if (!evalSettings.pureEval) {
|
if (!evalSettings.pureEval) {
|
||||||
Strings paths = parseNixPath(getEnv("NIX_PATH", ""));
|
|
||||||
for (auto & i : _searchPath) addToSearchPath(i);
|
for (auto & i : _searchPath) addToSearchPath(i);
|
||||||
for (auto & i : paths) addToSearchPath(i);
|
for (auto & i : evalSettings.nixPath.get()) addToSearchPath(i);
|
||||||
}
|
}
|
||||||
addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true));
|
addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true));
|
||||||
|
|
||||||
|
@ -461,7 +460,7 @@ Path EvalState::toRealPath(const Path & path, const PathSet & context)
|
||||||
!context.empty() && store->isInStore(path)
|
!context.empty() && store->isInStore(path)
|
||||||
? store->toRealPath(path)
|
? store->toRealPath(path)
|
||||||
: path;
|
: path;
|
||||||
};
|
}
|
||||||
|
|
||||||
|
|
||||||
Value * EvalState::addConstant(const string & name, Value & v)
|
Value * EvalState::addConstant(const string & name, Value & v)
|
||||||
|
@ -651,13 +650,9 @@ Value * EvalState::allocValue()
|
||||||
|
|
||||||
Env & EvalState::allocEnv(size_t size)
|
Env & EvalState::allocEnv(size_t size)
|
||||||
{
|
{
|
||||||
if (size > std::numeric_limits<decltype(Env::size)>::max())
|
|
||||||
throw Error("environment size %d is too big", size);
|
|
||||||
|
|
||||||
nrEnvs++;
|
nrEnvs++;
|
||||||
nrValuesInEnvs += size;
|
nrValuesInEnvs += size;
|
||||||
Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
|
Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *));
|
||||||
env->size = (decltype(Env::size)) size;
|
|
||||||
env->type = Env::Plain;
|
env->type = Env::Plain;
|
||||||
|
|
||||||
/* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
|
/* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */
|
||||||
|
@ -917,7 +912,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
||||||
if (hasOverrides) {
|
if (hasOverrides) {
|
||||||
Value * vOverrides = (*v.attrs)[overrides->second.displ].value;
|
Value * vOverrides = (*v.attrs)[overrides->second.displ].value;
|
||||||
state.forceAttrs(*vOverrides);
|
state.forceAttrs(*vOverrides);
|
||||||
Bindings * newBnds = state.allocBindings(v.attrs->size() + vOverrides->attrs->size());
|
Bindings * newBnds = state.allocBindings(v.attrs->capacity() + vOverrides->attrs->size());
|
||||||
for (auto & i : *v.attrs)
|
for (auto & i : *v.attrs)
|
||||||
newBnds->push_back(i);
|
newBnds->push_back(i);
|
||||||
for (auto & i : *vOverrides->attrs) {
|
for (auto & i : *vOverrides->attrs) {
|
||||||
|
@ -1794,7 +1789,7 @@ bool EvalState::eqValues(Value & v1, Value & v2)
|
||||||
|
|
||||||
void EvalState::printStats()
|
void EvalState::printStats()
|
||||||
{
|
{
|
||||||
bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0";
|
bool showStats = getEnv("NIX_SHOW_STATS").value_or("0") != "0";
|
||||||
|
|
||||||
struct rusage buf;
|
struct rusage buf;
|
||||||
getrusage(RUSAGE_SELF, &buf);
|
getrusage(RUSAGE_SELF, &buf);
|
||||||
|
@ -1810,7 +1805,7 @@ void EvalState::printStats()
|
||||||
GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes);
|
GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes);
|
||||||
#endif
|
#endif
|
||||||
if (showStats) {
|
if (showStats) {
|
||||||
auto outPath = getEnv("NIX_SHOW_STATS_PATH","-");
|
auto outPath = getEnv("NIX_SHOW_STATS_PATH").value_or("-");
|
||||||
std::fstream fs;
|
std::fstream fs;
|
||||||
if (outPath != "-")
|
if (outPath != "-")
|
||||||
fs.open(outPath, std::fstream::out);
|
fs.open(outPath, std::fstream::out);
|
||||||
|
@ -1902,7 +1897,7 @@ void EvalState::printStats()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getEnv("NIX_SHOW_SYMBOLS", "0") != "0") {
|
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
|
||||||
auto list = topObj.list("symbols");
|
auto list = topObj.list("symbols");
|
||||||
symbols.dump([&](const std::string & s) { list.elem(s); });
|
symbols.dump([&](const std::string & s) { list.elem(s); });
|
||||||
}
|
}
|
||||||
|
@ -1910,93 +1905,6 @@ void EvalState::printStats()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
size_t valueSize(Value & v)
|
|
||||||
{
|
|
||||||
std::set<const void *> seen;
|
|
||||||
|
|
||||||
auto doString = [&](const char * s) -> size_t {
|
|
||||||
if (!seen.insert(s).second) return 0;
|
|
||||||
return strlen(s) + 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::function<size_t(Value & v)> doValue;
|
|
||||||
std::function<size_t(Env & v)> doEnv;
|
|
||||||
|
|
||||||
doValue = [&](Value & v) -> size_t {
|
|
||||||
if (!seen.insert(&v).second) return 0;
|
|
||||||
|
|
||||||
size_t sz = sizeof(Value);
|
|
||||||
|
|
||||||
switch (v.type) {
|
|
||||||
case tString:
|
|
||||||
sz += doString(v.string.s);
|
|
||||||
if (v.string.context)
|
|
||||||
for (const char * * p = v.string.context; *p; ++p)
|
|
||||||
sz += doString(*p);
|
|
||||||
break;
|
|
||||||
case tPath:
|
|
||||||
sz += doString(v.path);
|
|
||||||
break;
|
|
||||||
case tAttrs:
|
|
||||||
if (seen.insert(v.attrs).second) {
|
|
||||||
sz += sizeof(Bindings) + sizeof(Attr) * v.attrs->capacity();
|
|
||||||
for (auto & i : *v.attrs)
|
|
||||||
sz += doValue(*i.value);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case tList1:
|
|
||||||
case tList2:
|
|
||||||
case tListN:
|
|
||||||
if (seen.insert(v.listElems()).second) {
|
|
||||||
sz += v.listSize() * sizeof(Value *);
|
|
||||||
for (size_t n = 0; n < v.listSize(); ++n)
|
|
||||||
sz += doValue(*v.listElems()[n]);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case tThunk:
|
|
||||||
sz += doEnv(*v.thunk.env);
|
|
||||||
break;
|
|
||||||
case tApp:
|
|
||||||
sz += doValue(*v.app.left);
|
|
||||||
sz += doValue(*v.app.right);
|
|
||||||
break;
|
|
||||||
case tLambda:
|
|
||||||
sz += doEnv(*v.lambda.env);
|
|
||||||
break;
|
|
||||||
case tPrimOpApp:
|
|
||||||
sz += doValue(*v.primOpApp.left);
|
|
||||||
sz += doValue(*v.primOpApp.right);
|
|
||||||
break;
|
|
||||||
case tExternal:
|
|
||||||
if (!seen.insert(v.external).second) break;
|
|
||||||
sz += v.external->valueSize(seen);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
||||||
return sz;
|
|
||||||
};
|
|
||||||
|
|
||||||
doEnv = [&](Env & env) -> size_t {
|
|
||||||
if (!seen.insert(&env).second) return 0;
|
|
||||||
|
|
||||||
size_t sz = sizeof(Env) + sizeof(Value *) * env.size;
|
|
||||||
|
|
||||||
if (env.type != Env::HasWithExpr)
|
|
||||||
for (size_t i = 0; i < env.size; ++i)
|
|
||||||
if (env.values[i])
|
|
||||||
sz += doValue(*env.values[i]);
|
|
||||||
|
|
||||||
if (env.up) sz += doEnv(*env.up);
|
|
||||||
|
|
||||||
return sz;
|
|
||||||
};
|
|
||||||
|
|
||||||
return doValue(v);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
string ExternalValueBase::coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const
|
string ExternalValueBase::coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const
|
||||||
{
|
{
|
||||||
throw TypeError(format("cannot coerce %1% to a string, at %2%") %
|
throw TypeError(format("cannot coerce %1% to a string, at %2%") %
|
||||||
|
@ -2015,6 +1923,22 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
EvalSettings::EvalSettings()
|
||||||
|
{
|
||||||
|
auto var = getEnv("NIX_PATH");
|
||||||
|
if (var) nixPath = parseNixPath(*var);
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings EvalSettings::getDefaultNixPath()
|
||||||
|
{
|
||||||
|
Strings res;
|
||||||
|
auto add = [&](const Path & p) { if (pathExists(p)) { res.push_back(p); } };
|
||||||
|
add(getHome() + "/.nix-defexpr/channels");
|
||||||
|
add("nixpkgs=" + settings.nixStateDir + "/nix/profiles/per-user/root/channels/nixpkgs");
|
||||||
|
add(settings.nixStateDir + "/nix/profiles/per-user/root/channels");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
EvalSettings evalSettings;
|
EvalSettings evalSettings;
|
||||||
|
|
||||||
static GlobalConfig::Register r1(&evalSettings);
|
static GlobalConfig::Register r1(&evalSettings);
|
||||||
|
|
|
@ -40,7 +40,6 @@ struct PrimOp
|
||||||
struct Env
|
struct Env
|
||||||
{
|
{
|
||||||
Env * up;
|
Env * up;
|
||||||
unsigned short size; // used by ‘valueSize’
|
|
||||||
unsigned short prevWith:14; // nr of levels up to next `with' environment
|
unsigned short prevWith:14; // nr of levels up to next `with' environment
|
||||||
enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
|
enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
|
||||||
Value * values[0];
|
Value * values[0];
|
||||||
|
@ -363,9 +362,16 @@ struct InvalidPathError : EvalError
|
||||||
|
|
||||||
struct EvalSettings : Config
|
struct EvalSettings : Config
|
||||||
{
|
{
|
||||||
|
EvalSettings();
|
||||||
|
|
||||||
|
static Strings getDefaultNixPath();
|
||||||
|
|
||||||
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
|
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
|
||||||
"Whether builtin functions that allow executing native code should be enabled."};
|
"Whether builtin functions that allow executing native code should be enabled."};
|
||||||
|
|
||||||
|
Setting<Strings> nixPath{this, getDefaultNixPath(), "nix-path",
|
||||||
|
"List of directories to be searched for <...> file references."};
|
||||||
|
|
||||||
Setting<bool> restrictEval{this, false, "restrict-eval",
|
Setting<bool> restrictEval{this, false, "restrict-eval",
|
||||||
"Whether to restrict file system access to paths in $NIX_PATH, "
|
"Whether to restrict file system access to paths in $NIX_PATH, "
|
||||||
"and network access to the URI prefixes listed in 'allowed-uris'."};
|
"and network access to the URI prefixes listed in 'allowed-uris'."};
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
MakeError(JSONParseError, EvalError)
|
MakeError(JSONParseError, EvalError);
|
||||||
|
|
||||||
void parseJSON(EvalState & state, const string & s, Value & v);
|
void parseJSON(EvalState & state, const string & s, Value & v);
|
||||||
|
|
||||||
|
|
|
@ -9,14 +9,14 @@
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
MakeError(EvalError, Error)
|
MakeError(EvalError, Error);
|
||||||
MakeError(ParseError, Error)
|
MakeError(ParseError, Error);
|
||||||
MakeError(AssertionError, EvalError)
|
MakeError(AssertionError, EvalError);
|
||||||
MakeError(ThrownError, AssertionError)
|
MakeError(ThrownError, AssertionError);
|
||||||
MakeError(Abort, EvalError)
|
MakeError(Abort, EvalError);
|
||||||
MakeError(TypeError, EvalError)
|
MakeError(TypeError, EvalError);
|
||||||
MakeError(UndefinedVarError, Error)
|
MakeError(UndefinedVarError, Error);
|
||||||
MakeError(RestrictedPathError, Error)
|
MakeError(RestrictedPathError, Error);
|
||||||
|
|
||||||
|
|
||||||
/* Position objects. */
|
/* Position objects. */
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
#include "nixexpr.hh"
|
#include "nixexpr.hh"
|
||||||
#include "eval.hh"
|
#include "eval.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -401,7 +402,12 @@ expr_simple
|
||||||
new ExprVar(data->symbols.create("__nixPath"))),
|
new ExprVar(data->symbols.create("__nixPath"))),
|
||||||
new ExprString(data->symbols.create(path)));
|
new ExprString(data->symbols.create(path)));
|
||||||
}
|
}
|
||||||
| URI { $$ = new ExprString(data->symbols.create($1)); }
|
| URI {
|
||||||
|
static bool noURLLiterals = settings.isExperimentalFeatureEnabled("no-url-literals");
|
||||||
|
if (noURLLiterals)
|
||||||
|
throw ParseError("URL literals are disabled, at %s", CUR_POS);
|
||||||
|
$$ = new ExprString(data->symbols.create($1));
|
||||||
|
}
|
||||||
| '(' expr ')' { $$ = $2; }
|
| '(' expr ')' { $$ = $2; }
|
||||||
/* Let expressions `let {..., body = ...}' are just desugared
|
/* Let expressions `let {..., body = ...}' are just desugared
|
||||||
into `(rec {..., body = ...}).body'. */
|
into `(rec {..., body = ...}).body'. */
|
||||||
|
|
|
@ -469,7 +469,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val
|
||||||
static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
{
|
{
|
||||||
string name = state.forceStringNoCtx(*args[0], pos);
|
string name = state.forceStringNoCtx(*args[0], pos);
|
||||||
mkString(v, evalSettings.restrictEval || evalSettings.pureEval ? "" : getEnv(name));
|
mkString(v, evalSettings.restrictEval || evalSettings.pureEval ? "" : getEnv(name).value_or(""));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -506,13 +506,6 @@ static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void prim_valueSize(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
||||||
{
|
|
||||||
/* We're not forcing the argument on purpose. */
|
|
||||||
mkInt(v, valueSize(*args[0]));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*************************************************************
|
/*************************************************************
|
||||||
* Derivations
|
* Derivations
|
||||||
*************************************************************/
|
*************************************************************/
|
||||||
|
@ -2206,7 +2199,6 @@ void EvalState::createBaseEnv()
|
||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
addPrimOp("__trace", 2, prim_trace);
|
addPrimOp("__trace", 2, prim_trace);
|
||||||
addPrimOp("__valueSize", 1, prim_valueSize);
|
|
||||||
|
|
||||||
// Paths
|
// Paths
|
||||||
addPrimOp("__toPath", 1, prim_toPath);
|
addPrimOp("__toPath", 1, prim_toPath);
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "pathlocks.hh"
|
#include "pathlocks.hh"
|
||||||
#include "hash.hh"
|
#include "hash.hh"
|
||||||
|
#include "tarfile.hh"
|
||||||
|
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
|
|
||||||
|
@ -164,7 +165,6 @@ GitInfo exportGit(ref<Store> store, std::string uri,
|
||||||
isLocal = true;
|
isLocal = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, uri).to_string(Base32, false);
|
|
||||||
Path repoDir;
|
Path repoDir;
|
||||||
|
|
||||||
if (isLocal) {
|
if (isLocal) {
|
||||||
|
@ -172,13 +172,11 @@ GitInfo exportGit(ref<Store> store, std::string uri,
|
||||||
if (!rev)
|
if (!rev)
|
||||||
rev = Hash(chomp(runProgram("git", true, { "-C", uri, "rev-parse", *ref })), htSHA1);
|
rev = Hash(chomp(runProgram("git", true, { "-C", uri, "rev-parse", *ref })), htSHA1);
|
||||||
|
|
||||||
if (!pathExists(cacheDir))
|
|
||||||
createDirs(cacheDir);
|
|
||||||
|
|
||||||
repoDir = uri;
|
repoDir = uri;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, uri).to_string(Base32, false);
|
||||||
repoDir = cacheDir;
|
repoDir = cacheDir;
|
||||||
|
|
||||||
if (!pathExists(cacheDir)) {
|
if (!pathExists(cacheDir)) {
|
||||||
|
@ -256,12 +254,16 @@ GitInfo exportGit(ref<Store> store, std::string uri,
|
||||||
|
|
||||||
// FIXME: should pipe this, or find some better way to extract a
|
// FIXME: should pipe this, or find some better way to extract a
|
||||||
// revision.
|
// revision.
|
||||||
auto tar = runProgram("git", true, { "-C", repoDir, "archive", gitInfo.rev.gitRev() });
|
auto source = sinkToSource([&](Sink & sink) {
|
||||||
|
RunOptions gitOptions("git", { "-C", repoDir, "archive", gitInfo.rev.gitRev() });
|
||||||
|
gitOptions.standardOut = &sink;
|
||||||
|
runProgram2(gitOptions);
|
||||||
|
});
|
||||||
|
|
||||||
Path tmpDir = createTempDir();
|
Path tmpDir = createTempDir();
|
||||||
AutoDelete delTmpDir(tmpDir, true);
|
AutoDelete delTmpDir(tmpDir, true);
|
||||||
|
|
||||||
runProgram("tar", true, { "x", "-C", tmpDir }, tar);
|
unpackTarfile(*source, tmpDir);
|
||||||
|
|
||||||
gitInfo.storePath = store->addToStore(name, tmpDir);
|
gitInfo.storePath = store->addToStore(name, tmpDir);
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@ struct Env;
|
||||||
struct Expr;
|
struct Expr;
|
||||||
struct ExprLambda;
|
struct ExprLambda;
|
||||||
struct PrimOp;
|
struct PrimOp;
|
||||||
struct PrimOp;
|
|
||||||
class Symbol;
|
class Symbol;
|
||||||
struct Pos;
|
struct Pos;
|
||||||
class EvalState;
|
class EvalState;
|
||||||
|
@ -63,9 +62,6 @@ class ExternalValueBase
|
||||||
/* Return a string to be used in builtins.typeOf */
|
/* Return a string to be used in builtins.typeOf */
|
||||||
virtual string typeOf() const = 0;
|
virtual string typeOf() const = 0;
|
||||||
|
|
||||||
/* How much space does this value take up */
|
|
||||||
virtual size_t valueSize(std::set<const void *> & seen) const = 0;
|
|
||||||
|
|
||||||
/* Coerce the value to a string. Defaults to uncoercable, i.e. throws an
|
/* Coerce the value to a string. Defaults to uncoercable, i.e. throws an
|
||||||
* error
|
* error
|
||||||
*/
|
*/
|
||||||
|
@ -261,12 +257,6 @@ static inline void mkPathNoCopy(Value & v, const char * s)
|
||||||
void mkPath(Value & v, const char * s);
|
void mkPath(Value & v, const char * s);
|
||||||
|
|
||||||
|
|
||||||
/* Compute the size in bytes of the given value, including all values
|
|
||||||
and environments reachable from it. Static expressions (Exprs) are
|
|
||||||
not included. */
|
|
||||||
size_t valueSize(Value & v);
|
|
||||||
|
|
||||||
|
|
||||||
#if HAVE_BOEHMGC
|
#if HAVE_BOEHMGC
|
||||||
typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
|
typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
|
||||||
typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
|
typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
|
||||||
|
|
|
@ -155,7 +155,7 @@ void initNix()
|
||||||
sshd). This breaks build users because they don't have access
|
sshd). This breaks build users because they don't have access
|
||||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||||
#if __APPLE__
|
#if __APPLE__
|
||||||
if (getuid() == 0 && hasPrefix(getEnv("TMPDIR"), "/var/folders/"))
|
if (getuid() == 0 && hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
|
||||||
unsetenv("TMPDIR");
|
unsetenv("TMPDIR");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include "nar-info.hh"
|
#include "nar-info.hh"
|
||||||
#include "parsed-derivations.hh"
|
#include "parsed-derivations.hh"
|
||||||
#include "machines.hh"
|
#include "machines.hh"
|
||||||
|
#include "daemon.hh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
@ -34,6 +35,7 @@
|
||||||
#include <sys/select.h>
|
#include <sys/select.h>
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
|
#include <sys/un.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <netdb.h>
|
#include <netdb.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
@ -766,9 +768,6 @@ private:
|
||||||
immediate input paths). */
|
immediate input paths). */
|
||||||
PathSet inputPaths;
|
PathSet inputPaths;
|
||||||
|
|
||||||
/* Referenceable paths (i.e., input and output paths). */
|
|
||||||
PathSet allPaths;
|
|
||||||
|
|
||||||
/* Outputs that are already valid. If we're repairing, these are
|
/* Outputs that are already valid. If we're repairing, these are
|
||||||
the outputs that are valid *and* not corrupt. */
|
the outputs that are valid *and* not corrupt. */
|
||||||
PathSet validPaths;
|
PathSet validPaths;
|
||||||
|
@ -806,9 +805,13 @@ private:
|
||||||
/* Pipe for the builder's standard output/error. */
|
/* Pipe for the builder's standard output/error. */
|
||||||
Pipe builderOut;
|
Pipe builderOut;
|
||||||
|
|
||||||
/* Pipe for synchronising updates to the builder user namespace. */
|
/* Pipe for synchronising updates to the builder namespaces. */
|
||||||
Pipe userNamespaceSync;
|
Pipe userNamespaceSync;
|
||||||
|
|
||||||
|
/* The mount namespace of the builder, used to add additional
|
||||||
|
paths to the sandbox as a result of recursive Nix calls. */
|
||||||
|
AutoCloseFD sandboxMountNamespace;
|
||||||
|
|
||||||
/* The build hook. */
|
/* The build hook. */
|
||||||
std::unique_ptr<HookInstance> hook;
|
std::unique_ptr<HookInstance> hook;
|
||||||
|
|
||||||
|
@ -887,6 +890,29 @@ private:
|
||||||
/* The remote machine on which we're building. */
|
/* The remote machine on which we're building. */
|
||||||
std::string machineName;
|
std::string machineName;
|
||||||
|
|
||||||
|
/* The recursive Nix daemon socket. */
|
||||||
|
AutoCloseFD daemonSocket;
|
||||||
|
|
||||||
|
/* The daemon main thread. */
|
||||||
|
std::thread daemonThread;
|
||||||
|
|
||||||
|
/* The daemon worker threads. */
|
||||||
|
std::vector<std::thread> daemonWorkerThreads;
|
||||||
|
|
||||||
|
/* Paths that were added via recursive Nix calls. */
|
||||||
|
PathSet addedPaths;
|
||||||
|
|
||||||
|
/* Recursive Nix calls are only allowed to build or realize paths
|
||||||
|
in the original input closure or added via a recursive Nix call
|
||||||
|
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
||||||
|
/nix/store/<bla> is some arbitrary path in a binary cache). */
|
||||||
|
bool isAllowed(const Path & path)
|
||||||
|
{
|
||||||
|
return inputPaths.count(path) || addedPaths.count(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
friend class RestrictedStore;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
|
DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
|
||||||
Worker & worker, BuildMode buildMode = bmNormal);
|
Worker & worker, BuildMode buildMode = bmNormal);
|
||||||
|
@ -940,9 +966,20 @@ private:
|
||||||
/* Fill in the environment for the builder. */
|
/* Fill in the environment for the builder. */
|
||||||
void initEnv();
|
void initEnv();
|
||||||
|
|
||||||
|
/* Setup tmp dir location. */
|
||||||
|
void initTmpDir();
|
||||||
|
|
||||||
/* Write a JSON file containing the derivation attributes. */
|
/* Write a JSON file containing the derivation attributes. */
|
||||||
void writeStructuredAttrs();
|
void writeStructuredAttrs();
|
||||||
|
|
||||||
|
void startDaemon();
|
||||||
|
|
||||||
|
void stopDaemon();
|
||||||
|
|
||||||
|
/* Add 'path' to the set of paths that may be referenced by the
|
||||||
|
outputs, and make it appear in the sandbox. */
|
||||||
|
void addDependency(const Path & path);
|
||||||
|
|
||||||
/* Make a file owned by the builder. */
|
/* Make a file owned by the builder. */
|
||||||
void chownToBuilder(const Path & path);
|
void chownToBuilder(const Path & path);
|
||||||
|
|
||||||
|
@ -1044,6 +1081,7 @@ DerivationGoal::~DerivationGoal()
|
||||||
/* Careful: we should never ever throw an exception from a
|
/* Careful: we should never ever throw an exception from a
|
||||||
destructor. */
|
destructor. */
|
||||||
try { killChild(); } catch (...) { ignoreException(); }
|
try { killChild(); } catch (...) { ignoreException(); }
|
||||||
|
try { stopDaemon(); } catch (...) { ignoreException(); }
|
||||||
try { deleteTmpDir(false); } catch (...) { ignoreException(); }
|
try { deleteTmpDir(false); } catch (...) { ignoreException(); }
|
||||||
try { closeLogFile(); } catch (...) { ignoreException(); }
|
try { closeLogFile(); } catch (...) { ignoreException(); }
|
||||||
}
|
}
|
||||||
|
@ -1333,12 +1371,6 @@ void DerivationGoal::inputsRealised()
|
||||||
/* Gather information necessary for computing the closure and/or
|
/* Gather information necessary for computing the closure and/or
|
||||||
running the build hook. */
|
running the build hook. */
|
||||||
|
|
||||||
/* The outputs are referenceable paths. */
|
|
||||||
for (auto & i : drv->outputs) {
|
|
||||||
debug(format("building path '%1%'") % i.second.path);
|
|
||||||
allPaths.insert(i.second.path);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the full set of input paths. */
|
/* Determine the full set of input paths. */
|
||||||
|
|
||||||
/* First, the input derivations. */
|
/* First, the input derivations. */
|
||||||
|
@ -1363,8 +1395,6 @@ void DerivationGoal::inputsRealised()
|
||||||
|
|
||||||
debug(format("added input paths %1%") % showPaths(inputPaths));
|
debug(format("added input paths %1%") % showPaths(inputPaths));
|
||||||
|
|
||||||
allPaths.insert(inputPaths.begin(), inputPaths.end());
|
|
||||||
|
|
||||||
/* Is this a fixed-output derivation? */
|
/* Is this a fixed-output derivation? */
|
||||||
fixedOutput = drv->isFixedOutput();
|
fixedOutput = drv->isFixedOutput();
|
||||||
|
|
||||||
|
@ -1516,7 +1546,7 @@ void replaceValidPath(const Path & storePath, const Path tmpPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
MakeError(NotDeterministic, BuildError)
|
MakeError(NotDeterministic, BuildError);
|
||||||
|
|
||||||
|
|
||||||
void DerivationGoal::buildDone()
|
void DerivationGoal::buildDone()
|
||||||
|
@ -1528,6 +1558,8 @@ void DerivationGoal::buildDone()
|
||||||
uid and then messing around with our output. */
|
uid and then messing around with our output. */
|
||||||
Finally releaseBuildUser([&]() { buildUser.reset(); });
|
Finally releaseBuildUser([&]() { buildUser.reset(); });
|
||||||
|
|
||||||
|
sandboxMountNamespace = -1;
|
||||||
|
|
||||||
/* Since we got an EOF on the logger pipe, the builder is presumed
|
/* Since we got an EOF on the logger pipe, the builder is presumed
|
||||||
to have terminated. In fact, the builder could also have
|
to have terminated. In fact, the builder could also have
|
||||||
simply have closed its end of the pipe, so just to be sure,
|
simply have closed its end of the pipe, so just to be sure,
|
||||||
|
@ -1559,6 +1591,9 @@ void DerivationGoal::buildDone()
|
||||||
root. */
|
root. */
|
||||||
if (buildUser) buildUser->kill();
|
if (buildUser) buildUser->kill();
|
||||||
|
|
||||||
|
/* Terminate the recursive Nix daemon. */
|
||||||
|
stopDaemon();
|
||||||
|
|
||||||
bool diskFull = false;
|
bool diskFull = false;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -1957,13 +1992,6 @@ void DerivationGoal::startBuilder()
|
||||||
auto drvName = storePathToName(drvPath);
|
auto drvName = storePathToName(drvPath);
|
||||||
tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700);
|
tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700);
|
||||||
|
|
||||||
/* In a sandbox, for determinism, always use the same temporary
|
|
||||||
directory. */
|
|
||||||
#if __linux__
|
|
||||||
tmpDirInSandbox = useChroot ? settings.sandboxBuildDir : tmpDir;
|
|
||||||
#else
|
|
||||||
tmpDirInSandbox = tmpDir;
|
|
||||||
#endif
|
|
||||||
chownToBuilder(tmpDir);
|
chownToBuilder(tmpDir);
|
||||||
|
|
||||||
/* Substitute output placeholders with the actual output paths. */
|
/* Substitute output placeholders with the actual output paths. */
|
||||||
|
@ -2218,6 +2246,11 @@ void DerivationGoal::startBuilder()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Fire up a Nix daemon to process recursive Nix calls from the
|
||||||
|
builder. */
|
||||||
|
if (parsedDrv->getRequiredSystemFeatures().count("recursive-nix"))
|
||||||
|
startDaemon();
|
||||||
|
|
||||||
/* Run the builder. */
|
/* Run the builder. */
|
||||||
printMsg(lvlChatty, format("executing builder '%1%'") % drv->builder);
|
printMsg(lvlChatty, format("executing builder '%1%'") % drv->builder);
|
||||||
|
|
||||||
|
@ -2367,7 +2400,7 @@ void DerivationGoal::startBuilder()
|
||||||
int res = helper.wait();
|
int res = helper.wait();
|
||||||
if (res != 0 && settings.sandboxFallback) {
|
if (res != 0 && settings.sandboxFallback) {
|
||||||
useChroot = false;
|
useChroot = false;
|
||||||
tmpDirInSandbox = tmpDir;
|
initTmpDir();
|
||||||
goto fallback;
|
goto fallback;
|
||||||
} else if (res != 0)
|
} else if (res != 0)
|
||||||
throw Error("unable to start build process");
|
throw Error("unable to start build process");
|
||||||
|
@ -2392,6 +2425,12 @@ void DerivationGoal::startBuilder()
|
||||||
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
|
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
|
||||||
(format("%d %d 1") % sandboxGid % hostGid).str());
|
(format("%d %d 1") % sandboxGid % hostGid).str());
|
||||||
|
|
||||||
|
/* Save the mount namespace of the child. We have to do this
|
||||||
|
*before* the child does a chroot. */
|
||||||
|
sandboxMountNamespace = open(fmt("/proc/%d/ns/mnt", (pid_t) pid).c_str(), O_RDONLY);
|
||||||
|
if (sandboxMountNamespace.get() == -1)
|
||||||
|
throw SysError("getting sandbox mount namespace");
|
||||||
|
|
||||||
/* Signal the builder that we've updated its user namespace. */
|
/* Signal the builder that we've updated its user namespace. */
|
||||||
writeFull(userNamespaceSync.writeSide.get(), "1");
|
writeFull(userNamespaceSync.writeSide.get(), "1");
|
||||||
userNamespaceSync.writeSide = -1;
|
userNamespaceSync.writeSide = -1;
|
||||||
|
@ -2423,31 +2462,14 @@ void DerivationGoal::startBuilder()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DerivationGoal::initEnv()
|
void DerivationGoal::initTmpDir() {
|
||||||
{
|
/* In a sandbox, for determinism, always use the same temporary
|
||||||
env.clear();
|
directory. */
|
||||||
|
#if __linux__
|
||||||
/* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
|
tmpDirInSandbox = useChroot ? settings.sandboxBuildDir : tmpDir;
|
||||||
PATH is not set. We don't want this, so we fill it in with some dummy
|
#else
|
||||||
value. */
|
tmpDirInSandbox = tmpDir;
|
||||||
env["PATH"] = "/path-not-set";
|
#endif
|
||||||
|
|
||||||
/* Set HOME to a non-existing path to prevent certain programs from using
|
|
||||||
/etc/passwd (or NIS, or whatever) to locate the home directory (for
|
|
||||||
example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd
|
|
||||||
if HOME is not set, but they will just assume that the settings file
|
|
||||||
they are looking for does not exist if HOME is set but points to some
|
|
||||||
non-existing path. */
|
|
||||||
env["HOME"] = homeDir;
|
|
||||||
|
|
||||||
/* Tell the builder where the Nix store is. Usually they
|
|
||||||
shouldn't care, but this is useful for purity checking (e.g.,
|
|
||||||
the compiler or linker might only want to accept paths to files
|
|
||||||
in the store or in the build directory). */
|
|
||||||
env["NIX_STORE"] = worker.store.storeDir;
|
|
||||||
|
|
||||||
/* The maximum number of cores to utilize for parallel building. */
|
|
||||||
env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
|
|
||||||
|
|
||||||
/* In non-structured mode, add all bindings specified in the
|
/* In non-structured mode, add all bindings specified in the
|
||||||
derivation via the environment, except those listed in the
|
derivation via the environment, except those listed in the
|
||||||
|
@ -2486,6 +2508,35 @@ void DerivationGoal::initEnv()
|
||||||
inode of the current directory doesn't appear in .. (because
|
inode of the current directory doesn't appear in .. (because
|
||||||
getdents returns the inode of the mount point). */
|
getdents returns the inode of the mount point). */
|
||||||
env["PWD"] = tmpDirInSandbox;
|
env["PWD"] = tmpDirInSandbox;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DerivationGoal::initEnv()
|
||||||
|
{
|
||||||
|
env.clear();
|
||||||
|
|
||||||
|
/* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
|
||||||
|
PATH is not set. We don't want this, so we fill it in with some dummy
|
||||||
|
value. */
|
||||||
|
env["PATH"] = "/path-not-set";
|
||||||
|
|
||||||
|
/* Set HOME to a non-existing path to prevent certain programs from using
|
||||||
|
/etc/passwd (or NIS, or whatever) to locate the home directory (for
|
||||||
|
example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd
|
||||||
|
if HOME is not set, but they will just assume that the settings file
|
||||||
|
they are looking for does not exist if HOME is set but points to some
|
||||||
|
non-existing path. */
|
||||||
|
env["HOME"] = homeDir;
|
||||||
|
|
||||||
|
/* Tell the builder where the Nix store is. Usually they
|
||||||
|
shouldn't care, but this is useful for purity checking (e.g.,
|
||||||
|
the compiler or linker might only want to accept paths to files
|
||||||
|
in the store or in the build directory). */
|
||||||
|
env["NIX_STORE"] = worker.store.storeDir;
|
||||||
|
|
||||||
|
/* The maximum number of cores to utilize for parallel building. */
|
||||||
|
env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
|
||||||
|
|
||||||
|
initTmpDir();
|
||||||
|
|
||||||
/* Compatibility hack with Nix <= 0.7: if this is a fixed-output
|
/* Compatibility hack with Nix <= 0.7: if this is a fixed-output
|
||||||
derivation, tell the builder, so that for instance `fetchurl'
|
derivation, tell the builder, so that for instance `fetchurl'
|
||||||
|
@ -2504,7 +2555,7 @@ void DerivationGoal::initEnv()
|
||||||
already know the cryptographic hash of the output). */
|
already know the cryptographic hash of the output). */
|
||||||
if (fixedOutput) {
|
if (fixedOutput) {
|
||||||
for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings()))
|
for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings()))
|
||||||
env[i] = getEnv(i);
|
env[i] = getEnv(i).value_or("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Currently structured log messages piggyback on stderr, but we
|
/* Currently structured log messages piggyback on stderr, but we
|
||||||
|
@ -2622,6 +2673,319 @@ void DerivationGoal::writeStructuredAttrs()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* A wrapper around LocalStore that only allows building/querying of
|
||||||
|
paths that are in the input closures of the build or were added via
|
||||||
|
recursive Nix calls. */
|
||||||
|
struct RestrictedStore : public LocalFSStore
|
||||||
|
{
|
||||||
|
ref<LocalStore> next;
|
||||||
|
|
||||||
|
DerivationGoal & goal;
|
||||||
|
|
||||||
|
RestrictedStore(const Params & params, ref<LocalStore> next, DerivationGoal & goal)
|
||||||
|
: Store(params), LocalFSStore(params), next(next), goal(goal)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
Path getRealStoreDir() override
|
||||||
|
{ return next->realStoreDir; }
|
||||||
|
|
||||||
|
std::string getUri() override
|
||||||
|
{ return next->getUri(); }
|
||||||
|
|
||||||
|
PathSet queryAllValidPaths() override
|
||||||
|
{
|
||||||
|
PathSet paths;
|
||||||
|
for (auto & p : goal.inputPaths) paths.insert(p);
|
||||||
|
for (auto & p : goal.addedPaths) paths.insert(p);
|
||||||
|
return paths;
|
||||||
|
}
|
||||||
|
|
||||||
|
void queryPathInfoUncached(const Path & path,
|
||||||
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override
|
||||||
|
{
|
||||||
|
if (goal.isAllowed(path)) {
|
||||||
|
try {
|
||||||
|
/* Censor impure information. */
|
||||||
|
auto info = std::make_shared<ValidPathInfo>(*next->queryPathInfo(path));
|
||||||
|
info->deriver.clear();
|
||||||
|
info->registrationTime = 0;
|
||||||
|
info->ultimate = false;
|
||||||
|
info->sigs.clear();
|
||||||
|
callback(info);
|
||||||
|
} catch (InvalidPath &) {
|
||||||
|
callback(nullptr);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
callback(nullptr);
|
||||||
|
};
|
||||||
|
|
||||||
|
void queryReferrers(const Path & path, PathSet & referrers) override
|
||||||
|
{ }
|
||||||
|
|
||||||
|
PathSet queryDerivationOutputs(const Path & path) override
|
||||||
|
{ throw Error("queryDerivationOutputs"); }
|
||||||
|
|
||||||
|
StringSet queryDerivationOutputNames(const Path & path) override
|
||||||
|
{ throw Error("queryDerivationOutputNames"); }
|
||||||
|
|
||||||
|
Path queryPathFromHashPart(const string & hashPart) override
|
||||||
|
{ throw Error("queryPathFromHashPart"); }
|
||||||
|
|
||||||
|
Path addToStore(const string & name, const Path & srcPath,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
|
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
|
||||||
|
{ throw Error("addToStore"); }
|
||||||
|
|
||||||
|
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||||
|
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs,
|
||||||
|
std::shared_ptr<FSAccessor> accessor = 0) override
|
||||||
|
{
|
||||||
|
next->addToStore(info, narSource, repair, checkSigs, accessor);
|
||||||
|
goal.addDependency(info.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Path addToStoreFromDump(const string & dump, const string & name,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
|
||||||
|
{
|
||||||
|
auto path = next->addToStoreFromDump(dump, name, recursive, hashAlgo, repair);
|
||||||
|
goal.addDependency(path);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
Path addTextToStore(const string & name, const string & s,
|
||||||
|
const PathSet & references, RepairFlag repair = NoRepair) override
|
||||||
|
{
|
||||||
|
auto path = next->addTextToStore(name, s, references, repair);
|
||||||
|
goal.addDependency(path);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
void narFromPath(const Path & path, Sink & sink) override
|
||||||
|
{
|
||||||
|
if (!goal.isAllowed(path))
|
||||||
|
throw InvalidPath("cannot dump unknown path '%s' in recursive Nix", path);
|
||||||
|
LocalFSStore::narFromPath(path, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ensurePath(const Path & path) override
|
||||||
|
{
|
||||||
|
if (!goal.isAllowed(path))
|
||||||
|
throw InvalidPath("cannot substitute unknown path '%s' in recursive Nix", path);
|
||||||
|
/* Nothing to be done; 'path' must already be valid. */
|
||||||
|
}
|
||||||
|
|
||||||
|
void buildPaths(const PathSet & paths, BuildMode buildMode) override
|
||||||
|
{
|
||||||
|
if (buildMode != bmNormal) throw Error("unsupported build mode");
|
||||||
|
|
||||||
|
PathSet newPaths;
|
||||||
|
|
||||||
|
for (auto & path : paths) {
|
||||||
|
DrvPathWithOutputs i = parseDrvPathWithOutputs(path);
|
||||||
|
if (isDerivation(i.first)) {
|
||||||
|
if (!goal.isAllowed(i.first))
|
||||||
|
throw InvalidPath("cannot build unknown path '%s' in recursive Nix", i.first);
|
||||||
|
auto drv = derivationFromPath(i.first);
|
||||||
|
for (auto & output : drv.outputs)
|
||||||
|
if (wantOutput(output.first, i.second))
|
||||||
|
newPaths.insert(output.second.path);
|
||||||
|
} else if (!goal.isAllowed(path))
|
||||||
|
throw InvalidPath("cannot build unknown path '%s' in recursive Nix", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
next->buildPaths(paths, buildMode);
|
||||||
|
|
||||||
|
PathSet closure;
|
||||||
|
next->computeFSClosure(newPaths, closure);
|
||||||
|
for (auto & path : closure)
|
||||||
|
goal.addDependency(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||||
|
BuildMode buildMode = bmNormal) override
|
||||||
|
{ unsupported("buildDerivation"); }
|
||||||
|
|
||||||
|
void addTempRoot(const Path & path)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
void addIndirectRoot(const Path & path)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
Roots findRoots()
|
||||||
|
{ return Roots(); }
|
||||||
|
|
||||||
|
void collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
void addSignatures(const Path & storePath, const StringSet & sigs)
|
||||||
|
{ unsupported("addSignatures"); }
|
||||||
|
|
||||||
|
void queryMissing(const PathSet & targets,
|
||||||
|
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
|
||||||
|
unsigned long long & downloadSize, unsigned long long & narSize)
|
||||||
|
{
|
||||||
|
/* This is slightly impure since it leaks information to the
|
||||||
|
client about what paths will be built/substituted or are
|
||||||
|
already present. Probably not a big deal. */
|
||||||
|
|
||||||
|
PathSet allowed;
|
||||||
|
for (auto & path : targets) {
|
||||||
|
DrvPathWithOutputs i = parseDrvPathWithOutputs(path);
|
||||||
|
if (goal.isAllowed(i.first))
|
||||||
|
allowed.insert(i.first);
|
||||||
|
else
|
||||||
|
unknown.insert(i.first);
|
||||||
|
}
|
||||||
|
|
||||||
|
next->queryMissing(allowed, willBuild, willSubstitute,
|
||||||
|
unknown, downloadSize, narSize);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void DerivationGoal::startDaemon()
|
||||||
|
{
|
||||||
|
settings.requireExperimentalFeature("recursive-nix");
|
||||||
|
|
||||||
|
Store::Params params;
|
||||||
|
params["path-info-cache-size"] = "0";
|
||||||
|
params["store"] = worker.store.storeDir;
|
||||||
|
params["root"] = worker.store.rootDir;
|
||||||
|
params["state"] = "/no-such-path";
|
||||||
|
params["log"] = "/no-such-path";
|
||||||
|
auto store = make_ref<RestrictedStore>(params,
|
||||||
|
ref<LocalStore>(std::dynamic_pointer_cast<LocalStore>(worker.store.shared_from_this())),
|
||||||
|
*this);
|
||||||
|
|
||||||
|
addedPaths.clear();
|
||||||
|
|
||||||
|
auto socketName = ".nix-socket";
|
||||||
|
Path socketPath = tmpDir + "/" + socketName;
|
||||||
|
env["NIX_REMOTE"] = "unix://" + tmpDirInSandbox + "/" + socketName;
|
||||||
|
|
||||||
|
daemonSocket = createUnixDomainSocket(socketPath, 0600);
|
||||||
|
|
||||||
|
chownToBuilder(socketPath);
|
||||||
|
|
||||||
|
daemonThread = std::thread([this, store]() {
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
|
||||||
|
/* Accept a connection. */
|
||||||
|
struct sockaddr_un remoteAddr;
|
||||||
|
socklen_t remoteAddrLen = sizeof(remoteAddr);
|
||||||
|
|
||||||
|
AutoCloseFD remote = accept(daemonSocket.get(),
|
||||||
|
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
|
||||||
|
if (!remote) {
|
||||||
|
if (errno == EINTR) continue;
|
||||||
|
if (errno == EINVAL) break;
|
||||||
|
throw SysError("accepting connection");
|
||||||
|
}
|
||||||
|
|
||||||
|
closeOnExec(remote.get());
|
||||||
|
|
||||||
|
debug("received daemon connection");
|
||||||
|
|
||||||
|
auto workerThread = std::thread([this, store, remote{std::move(remote)}]() {
|
||||||
|
FdSource from(remote.get());
|
||||||
|
FdSink to(remote.get());
|
||||||
|
try {
|
||||||
|
daemon::processConnection(store, from, to,
|
||||||
|
daemon::NotTrusted, daemon::Recursive, "nobody", 65535);
|
||||||
|
debug("terminated daemon connection");
|
||||||
|
} catch (SysError &) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
daemonWorkerThreads.push_back(std::move(workerThread));
|
||||||
|
}
|
||||||
|
|
||||||
|
debug("daemon shutting down");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DerivationGoal::stopDaemon()
|
||||||
|
{
|
||||||
|
if (daemonSocket && shutdown(daemonSocket.get(), SHUT_RDWR) == -1)
|
||||||
|
throw SysError("shutting down daemon socket");
|
||||||
|
|
||||||
|
if (daemonThread.joinable())
|
||||||
|
daemonThread.join();
|
||||||
|
|
||||||
|
// FIXME: should prune worker threads more quickly.
|
||||||
|
// FIXME: shutdown the client socket to speed up worker termination.
|
||||||
|
for (auto & thread : daemonWorkerThreads)
|
||||||
|
thread.join();
|
||||||
|
daemonWorkerThreads.clear();
|
||||||
|
|
||||||
|
daemonSocket = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DerivationGoal::addDependency(const Path & path)
|
||||||
|
{
|
||||||
|
worker.store.assertStorePath(path);
|
||||||
|
|
||||||
|
if (isAllowed(path)) return;
|
||||||
|
|
||||||
|
addedPaths.insert(path);
|
||||||
|
|
||||||
|
/* If we're doing a sandbox build, then we have to make the path
|
||||||
|
appear in the sandbox. */
|
||||||
|
if (useChroot) {
|
||||||
|
|
||||||
|
debug("materialising '%s' in the sandbox", path);
|
||||||
|
|
||||||
|
#if __linux__
|
||||||
|
|
||||||
|
Path source = worker.store.toRealPath(path);
|
||||||
|
Path target = chrootRootDir + path;
|
||||||
|
debug("bind-mounting %s -> %s", target, source);
|
||||||
|
|
||||||
|
if (pathExists(target))
|
||||||
|
throw Error("store path '%s' already exists in the sandbox", path);
|
||||||
|
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(source.c_str(), &st))
|
||||||
|
throw SysError("getting attributes of path '%s'", source);
|
||||||
|
|
||||||
|
if (S_ISDIR(st.st_mode)) {
|
||||||
|
|
||||||
|
/* Bind-mount the path into the sandbox. This requires
|
||||||
|
entering its mount namespace, which is not possible
|
||||||
|
in multithreaded programs. So we do this in a
|
||||||
|
child process.*/
|
||||||
|
Pid child(startProcess([&]() {
|
||||||
|
|
||||||
|
if (setns(sandboxMountNamespace.get(), 0) == -1)
|
||||||
|
throw SysError("entering sandbox mount namespace");
|
||||||
|
|
||||||
|
createDirs(target);
|
||||||
|
|
||||||
|
if (mount(source.c_str(), target.c_str(), "", MS_BIND, 0) == -1)
|
||||||
|
throw SysError("bind mount from '%s' to '%s' failed", source, target);
|
||||||
|
|
||||||
|
_exit(0);
|
||||||
|
}));
|
||||||
|
|
||||||
|
int status = child.wait();
|
||||||
|
if (status != 0)
|
||||||
|
throw Error("could not add path '%s' to sandbox", path);
|
||||||
|
|
||||||
|
} else
|
||||||
|
linkOrCopy(source, target);
|
||||||
|
|
||||||
|
#else
|
||||||
|
throw Error("don't know how to make path '%s' (produced by a recursive Nix call) appear in the sandbox", path);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void DerivationGoal::chownToBuilder(const Path & path)
|
void DerivationGoal::chownToBuilder(const Path & path)
|
||||||
{
|
{
|
||||||
if (!buildUser) return;
|
if (!buildUser) return;
|
||||||
|
@ -2757,15 +3121,30 @@ void DerivationGoal::runChild()
|
||||||
outside of the namespace. Making a subtree private is
|
outside of the namespace. Making a subtree private is
|
||||||
local to the namespace, though, so setting MS_PRIVATE
|
local to the namespace, though, so setting MS_PRIVATE
|
||||||
does not affect the outside world. */
|
does not affect the outside world. */
|
||||||
if (mount(0, "/", 0, MS_REC|MS_PRIVATE, 0) == -1) {
|
if (mount(0, "/", 0, MS_PRIVATE | MS_REC, 0) == -1)
|
||||||
throw SysError("unable to make '/' private mount");
|
throw SysError("unable to make '/' private");
|
||||||
}
|
|
||||||
|
|
||||||
/* Bind-mount chroot directory to itself, to treat it as a
|
/* Bind-mount chroot directory to itself, to treat it as a
|
||||||
different filesystem from /, as needed for pivot_root. */
|
different filesystem from /, as needed for pivot_root. */
|
||||||
if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), 0, MS_BIND, 0) == -1)
|
if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), 0, MS_BIND, 0) == -1)
|
||||||
throw SysError(format("unable to bind mount '%1%'") % chrootRootDir);
|
throw SysError(format("unable to bind mount '%1%'") % chrootRootDir);
|
||||||
|
|
||||||
|
/* Bind-mount the sandbox's Nix store onto itself so that
|
||||||
|
we can mark it as a "shared" subtree, allowing bind
|
||||||
|
mounts made in *this* mount namespace to be propagated
|
||||||
|
into the child namespace created by the
|
||||||
|
unshare(CLONE_NEWNS) call below.
|
||||||
|
|
||||||
|
Marking chrootRootDir as MS_SHARED causes pivot_root()
|
||||||
|
to fail with EINVAL. Don't know why. */
|
||||||
|
Path chrootStoreDir = chrootRootDir + worker.store.storeDir;
|
||||||
|
|
||||||
|
if (mount(chrootStoreDir.c_str(), chrootStoreDir.c_str(), 0, MS_BIND, 0) == -1)
|
||||||
|
throw SysError("unable to bind mount the Nix store", chrootStoreDir);
|
||||||
|
|
||||||
|
if (mount(0, chrootStoreDir.c_str(), 0, MS_SHARED, 0) == -1)
|
||||||
|
throw SysError("unable to make '%s' shared", chrootStoreDir);
|
||||||
|
|
||||||
/* Set up a nearly empty /dev, unless the user asked to
|
/* Set up a nearly empty /dev, unless the user asked to
|
||||||
bind-mount the host /dev. */
|
bind-mount the host /dev. */
|
||||||
Strings ss;
|
Strings ss;
|
||||||
|
@ -2867,6 +3246,19 @@ void DerivationGoal::runChild()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Unshare this mount namespace. This is necessary because
|
||||||
|
pivot_root() below changes the root of the mount
|
||||||
|
namespace. This means that the call to setns() in
|
||||||
|
addDependency() would hide the host's filesystem,
|
||||||
|
making it impossible to bind-mount paths from the host
|
||||||
|
Nix store into the sandbox. Therefore, we save the
|
||||||
|
pre-pivot_root namespace in
|
||||||
|
sandboxMountNamespace. Since we made /nix/store a
|
||||||
|
shared subtree above, this allows addDependency() to
|
||||||
|
make paths appear in the sandbox. */
|
||||||
|
if (unshare(CLONE_NEWNS) == -1)
|
||||||
|
throw SysError("unsharing mount namespace");
|
||||||
|
|
||||||
/* Do the chroot(). */
|
/* Do the chroot(). */
|
||||||
if (chdir(chrootRootDir.c_str()) == -1)
|
if (chdir(chrootRootDir.c_str()) == -1)
|
||||||
throw SysError(format("cannot change directory to '%1%'") % chrootRootDir);
|
throw SysError(format("cannot change directory to '%1%'") % chrootRootDir);
|
||||||
|
@ -3076,7 +3468,7 @@ void DerivationGoal::runChild()
|
||||||
|
|
||||||
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
|
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
|
||||||
to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
|
to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
|
||||||
Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true);
|
Path globalTmpDir = canonPath(getEnv("TMPDIR").value_or("/tmp"), true);
|
||||||
|
|
||||||
/* They don't like trailing slashes on subpath directives */
|
/* They don't like trailing slashes on subpath directives */
|
||||||
if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
|
if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
|
||||||
|
@ -3121,6 +3513,8 @@ void DerivationGoal::runChild()
|
||||||
builtinFetchurl(drv2, netrcData);
|
builtinFetchurl(drv2, netrcData);
|
||||||
else if (drv->builder == "builtin:buildenv")
|
else if (drv->builder == "builtin:buildenv")
|
||||||
builtinBuildenv(drv2);
|
builtinBuildenv(drv2);
|
||||||
|
else if (drv->builder == "builtin:unpack-channel")
|
||||||
|
builtinUnpackChannel(drv2);
|
||||||
else
|
else
|
||||||
throw Error(format("unsupported builtin function '%1%'") % string(drv->builder, 8));
|
throw Error(format("unsupported builtin function '%1%'") % string(drv->builder, 8));
|
||||||
_exit(0);
|
_exit(0);
|
||||||
|
@ -3183,6 +3577,14 @@ void DerivationGoal::registerOutputs()
|
||||||
|
|
||||||
std::exception_ptr delayedException;
|
std::exception_ptr delayedException;
|
||||||
|
|
||||||
|
/* The paths that can be referenced are the input closures, the
|
||||||
|
output paths, and any paths that have been built via recursive
|
||||||
|
Nix calls. */
|
||||||
|
PathSet referenceablePaths;
|
||||||
|
for (auto & p : inputPaths) referenceablePaths.insert(p);
|
||||||
|
for (auto & i : drv->outputs) referenceablePaths.insert(i.second.path);
|
||||||
|
for (auto & p : addedPaths) referenceablePaths.insert(p);
|
||||||
|
|
||||||
/* Check whether the output paths were created, and grep each
|
/* Check whether the output paths were created, and grep each
|
||||||
output path to determine what other paths it references. Also make all
|
output path to determine what other paths it references. Also make all
|
||||||
output paths read-only. */
|
output paths read-only. */
|
||||||
|
@ -3318,7 +3720,7 @@ void DerivationGoal::registerOutputs()
|
||||||
verify later on whether nobody has messed with the store. */
|
verify later on whether nobody has messed with the store. */
|
||||||
debug("scanning for references inside '%1%'", path);
|
debug("scanning for references inside '%1%'", path);
|
||||||
HashResult hash;
|
HashResult hash;
|
||||||
PathSet references = scanForReferences(actualPath, allPaths, hash);
|
PathSet references = scanForReferences(actualPath, referenceablePaths, hash);
|
||||||
|
|
||||||
if (buildMode == bmCheck) {
|
if (buildMode == bmCheck) {
|
||||||
if (!worker.store.isValidPath(path)) continue;
|
if (!worker.store.isValidPath(path)) continue;
|
||||||
|
|
|
@ -6,5 +6,6 @@ namespace nix {
|
||||||
|
|
||||||
// TODO: make pluggable.
|
// TODO: make pluggable.
|
||||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
|
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
|
||||||
|
void builtinUnpackChannel(const BasicDerivation & drv);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
29
src/libstore/builtins/unpack-channel.cc
Normal file
29
src/libstore/builtins/unpack-channel.cc
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
#include "builtins.hh"
|
||||||
|
#include "tarfile.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
void builtinUnpackChannel(const BasicDerivation & drv)
|
||||||
|
{
|
||||||
|
auto getAttr = [&](const string & name) {
|
||||||
|
auto i = drv.env.find(name);
|
||||||
|
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||||
|
return i->second;
|
||||||
|
};
|
||||||
|
|
||||||
|
Path out = getAttr("out");
|
||||||
|
auto channelName = getAttr("channelName");
|
||||||
|
auto src = getAttr("src");
|
||||||
|
|
||||||
|
createDirs(out);
|
||||||
|
|
||||||
|
unpackTarfile(src, out);
|
||||||
|
|
||||||
|
auto entries = readDirectory(out);
|
||||||
|
if (entries.size() != 1)
|
||||||
|
throw Error("channel tarball '%s' contains more than one file", src);
|
||||||
|
if (rename((out + "/" + entries[0].name).c_str(), (out + "/" + channelName).c_str()) == -1)
|
||||||
|
throw SysError("renaming channel directory");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -186,8 +186,75 @@ struct RetrieveRegularNARSink : ParseSink
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ClientSettings
|
||||||
|
{
|
||||||
|
bool keepFailed;
|
||||||
|
bool keepGoing;
|
||||||
|
bool tryFallback;
|
||||||
|
Verbosity verbosity;
|
||||||
|
unsigned int maxBuildJobs;
|
||||||
|
time_t maxSilentTime;
|
||||||
|
bool verboseBuild;
|
||||||
|
unsigned int buildCores;
|
||||||
|
bool useSubstitutes;
|
||||||
|
StringMap overrides;
|
||||||
|
|
||||||
|
void apply(TrustedFlag trusted)
|
||||||
|
{
|
||||||
|
settings.keepFailed = keepFailed;
|
||||||
|
settings.keepGoing = keepGoing;
|
||||||
|
settings.tryFallback = tryFallback;
|
||||||
|
nix::verbosity = verbosity;
|
||||||
|
settings.maxBuildJobs.assign(maxBuildJobs);
|
||||||
|
settings.maxSilentTime = maxSilentTime;
|
||||||
|
settings.verboseBuild = verboseBuild;
|
||||||
|
settings.buildCores = buildCores;
|
||||||
|
settings.useSubstitutes = useSubstitutes;
|
||||||
|
|
||||||
|
for (auto & i : overrides) {
|
||||||
|
auto & name(i.first);
|
||||||
|
auto & value(i.second);
|
||||||
|
|
||||||
|
auto setSubstituters = [&](Setting<Strings> & res) {
|
||||||
|
if (name != res.name && res.aliases.count(name) == 0)
|
||||||
|
return false;
|
||||||
|
StringSet trusted = settings.trustedSubstituters;
|
||||||
|
for (auto & s : settings.substituters.get())
|
||||||
|
trusted.insert(s);
|
||||||
|
Strings subs;
|
||||||
|
auto ss = tokenizeString<Strings>(value);
|
||||||
|
for (auto & s : ss)
|
||||||
|
if (trusted.count(s))
|
||||||
|
subs.push_back(s);
|
||||||
|
else
|
||||||
|
warn("ignoring untrusted substituter '%s'", s);
|
||||||
|
res = subs;
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (name == "ssh-auth-sock") // obsolete
|
||||||
|
;
|
||||||
|
else if (trusted
|
||||||
|
|| name == settings.buildTimeout.name
|
||||||
|
|| name == "connect-timeout"
|
||||||
|
|| (name == "builders" && value == ""))
|
||||||
|
settings.set(name, value);
|
||||||
|
else if (setSubstituters(settings.substituters))
|
||||||
|
;
|
||||||
|
else if (setSubstituters(settings.extraSubstituters))
|
||||||
|
;
|
||||||
|
else
|
||||||
|
warn("ignoring the user-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
||||||
|
} catch (UsageError & e) {
|
||||||
|
warn(e.what());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
static void performOp(TunnelLogger * logger, ref<Store> store,
|
static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
bool trusted, unsigned int clientVersion,
|
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
|
||||||
Source & from, BufferedSink & to, unsigned int op)
|
Source & from, BufferedSink & to, unsigned int op)
|
||||||
{
|
{
|
||||||
switch (op) {
|
switch (op) {
|
||||||
|
@ -464,70 +531,37 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
}
|
}
|
||||||
|
|
||||||
case wopSetOptions: {
|
case wopSetOptions: {
|
||||||
settings.keepFailed = readInt(from);
|
|
||||||
settings.keepGoing = readInt(from);
|
ClientSettings clientSettings;
|
||||||
settings.tryFallback = readInt(from);
|
|
||||||
verbosity = (Verbosity) readInt(from);
|
clientSettings.keepFailed = readInt(from);
|
||||||
settings.maxBuildJobs.assign(readInt(from));
|
clientSettings.keepGoing = readInt(from);
|
||||||
settings.maxSilentTime = readInt(from);
|
clientSettings.tryFallback = readInt(from);
|
||||||
|
clientSettings.verbosity = (Verbosity) readInt(from);
|
||||||
|
clientSettings.maxBuildJobs = readInt(from);
|
||||||
|
clientSettings.maxSilentTime = readInt(from);
|
||||||
readInt(from); // obsolete useBuildHook
|
readInt(from); // obsolete useBuildHook
|
||||||
settings.verboseBuild = lvlError == (Verbosity) readInt(from);
|
clientSettings.verboseBuild = lvlError == (Verbosity) readInt(from);
|
||||||
readInt(from); // obsolete logType
|
readInt(from); // obsolete logType
|
||||||
readInt(from); // obsolete printBuildTrace
|
readInt(from); // obsolete printBuildTrace
|
||||||
settings.buildCores = readInt(from);
|
clientSettings.buildCores = readInt(from);
|
||||||
settings.useSubstitutes = readInt(from);
|
clientSettings.useSubstitutes = readInt(from);
|
||||||
|
|
||||||
StringMap overrides;
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
||||||
unsigned int n = readInt(from);
|
unsigned int n = readInt(from);
|
||||||
for (unsigned int i = 0; i < n; i++) {
|
for (unsigned int i = 0; i < n; i++) {
|
||||||
string name = readString(from);
|
string name = readString(from);
|
||||||
string value = readString(from);
|
string value = readString(from);
|
||||||
overrides.emplace(name, value);
|
clientSettings.overrides.emplace(name, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger->startWork();
|
logger->startWork();
|
||||||
|
|
||||||
for (auto & i : overrides) {
|
// FIXME: use some setting in recursive mode. Will need to use
|
||||||
auto & name(i.first);
|
// non-global variables.
|
||||||
auto & value(i.second);
|
if (!recursive)
|
||||||
|
clientSettings.apply(trusted);
|
||||||
auto setSubstituters = [&](Setting<Strings> & res) {
|
|
||||||
if (name != res.name && res.aliases.count(name) == 0)
|
|
||||||
return false;
|
|
||||||
StringSet trusted = settings.trustedSubstituters;
|
|
||||||
for (auto & s : settings.substituters.get())
|
|
||||||
trusted.insert(s);
|
|
||||||
Strings subs;
|
|
||||||
auto ss = tokenizeString<Strings>(value);
|
|
||||||
for (auto & s : ss)
|
|
||||||
if (trusted.count(s))
|
|
||||||
subs.push_back(s);
|
|
||||||
else
|
|
||||||
warn("ignoring untrusted substituter '%s'", s);
|
|
||||||
res = subs;
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (name == "ssh-auth-sock") // obsolete
|
|
||||||
;
|
|
||||||
else if (trusted
|
|
||||||
|| name == settings.buildTimeout.name
|
|
||||||
|| name == "connect-timeout"
|
|
||||||
|| (name == "builders" && value == ""))
|
|
||||||
settings.set(name, value);
|
|
||||||
else if (setSubstituters(settings.substituters))
|
|
||||||
;
|
|
||||||
else if (setSubstituters(settings.extraSubstituters))
|
|
||||||
;
|
|
||||||
else
|
|
||||||
warn("ignoring the user-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
|
||||||
} catch (UsageError & e) {
|
|
||||||
warn(e.what());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
break;
|
break;
|
||||||
|
@ -694,11 +728,12 @@ void processConnection(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
FdSource & from,
|
FdSource & from,
|
||||||
FdSink & to,
|
FdSink & to,
|
||||||
bool trusted,
|
TrustedFlag trusted,
|
||||||
|
RecursiveFlag recursive,
|
||||||
const std::string & userName,
|
const std::string & userName,
|
||||||
uid_t userId)
|
uid_t userId)
|
||||||
{
|
{
|
||||||
MonitorFdHup monitor(from.fd);
|
auto monitor = !recursive ? std::make_unique<MonitorFdHup>(from.fd) : nullptr;
|
||||||
|
|
||||||
/* Exchange the greeting. */
|
/* Exchange the greeting. */
|
||||||
unsigned int magic = readInt(from);
|
unsigned int magic = readInt(from);
|
||||||
|
@ -712,7 +747,9 @@ void processConnection(
|
||||||
|
|
||||||
auto tunnelLogger = new TunnelLogger(to, clientVersion);
|
auto tunnelLogger = new TunnelLogger(to, clientVersion);
|
||||||
auto prevLogger = nix::logger;
|
auto prevLogger = nix::logger;
|
||||||
logger = tunnelLogger;
|
// FIXME
|
||||||
|
if (!recursive)
|
||||||
|
logger = tunnelLogger;
|
||||||
|
|
||||||
unsigned int opCount = 0;
|
unsigned int opCount = 0;
|
||||||
|
|
||||||
|
@ -721,8 +758,10 @@ void processConnection(
|
||||||
prevLogger->log(lvlDebug, fmt("%d operations", opCount));
|
prevLogger->log(lvlDebug, fmt("%d operations", opCount));
|
||||||
});
|
});
|
||||||
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from)) {
|
||||||
setAffinityTo(readInt(from));
|
auto affinity = readInt(from);
|
||||||
|
setAffinityTo(affinity);
|
||||||
|
}
|
||||||
|
|
||||||
readInt(from); // obsolete reserveSpace
|
readInt(from); // obsolete reserveSpace
|
||||||
|
|
||||||
|
@ -760,7 +799,7 @@ void processConnection(
|
||||||
opCount++;
|
opCount++;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
performOp(tunnelLogger, store, trusted, clientVersion, from, to, op);
|
performOp(tunnelLogger, store, trusted, recursive, clientVersion, from, to, op);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
/* If we're not in a state where we can send replies, then
|
/* If we're not in a state where we can send replies, then
|
||||||
something went wrong processing the input of the
|
something went wrong processing the input of the
|
||||||
|
|
|
@ -3,11 +3,15 @@
|
||||||
|
|
||||||
namespace nix::daemon {
|
namespace nix::daemon {
|
||||||
|
|
||||||
|
enum TrustedFlag : bool { NotTrusted = false, Trusted = true };
|
||||||
|
enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
|
||||||
|
|
||||||
void processConnection(
|
void processConnection(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
FdSource & from,
|
FdSource & from,
|
||||||
FdSink & to,
|
FdSink & to,
|
||||||
bool trusted,
|
TrustedFlag trusted,
|
||||||
|
RecursiveFlag recursive,
|
||||||
const std::string & userName,
|
const std::string & userName,
|
||||||
uid_t userId);
|
uid_t userId);
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "compression.hh"
|
#include "compression.hh"
|
||||||
#include "pathlocks.hh"
|
#include "pathlocks.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
|
#include "tarfile.hh"
|
||||||
|
|
||||||
#ifdef ENABLE_S3
|
#ifdef ENABLE_S3
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
#include <aws/core/client/ClientConfiguration.h>
|
||||||
|
@ -289,6 +290,7 @@ struct CurlDownloader : public Downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
if (request.verifyTLS) {
|
if (request.verifyTLS) {
|
||||||
|
debug("verify TLS: Nix CA file = '%s'", settings.caFile);
|
||||||
if (settings.caFile != "")
|
if (settings.caFile != "")
|
||||||
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
|
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
|
||||||
} else {
|
} else {
|
||||||
|
@ -905,11 +907,10 @@ CachedDownloadResult Downloader::downloadCached(
|
||||||
result.lastModified = lstat(unpackedLink).st_mtime;
|
result.lastModified = lstat(unpackedLink).st_mtime;
|
||||||
}
|
}
|
||||||
if (unpackedStorePath.empty()) {
|
if (unpackedStorePath.empty()) {
|
||||||
printInfo(format("unpacking '%1%'...") % url);
|
printInfo("unpacking '%s'...", url);
|
||||||
Path tmpDir = createTempDir();
|
Path tmpDir = createTempDir();
|
||||||
AutoDelete autoDelete(tmpDir, true);
|
AutoDelete autoDelete(tmpDir, true);
|
||||||
// FIXME: this requires GNU tar for decompression.
|
unpackTarfile(store->toRealPath(storePath), tmpDir, baseNameOf(url));
|
||||||
runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir});
|
|
||||||
auto members = readDirectory(tmpDir);
|
auto members = readDirectory(tmpDir);
|
||||||
if (members.size() != 1)
|
if (members.size() != 1)
|
||||||
throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url);
|
throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url);
|
||||||
|
|
|
@ -870,11 +870,11 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
|
||||||
void LocalStore::autoGC(bool sync)
|
void LocalStore::autoGC(bool sync)
|
||||||
{
|
{
|
||||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE", "");
|
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE");
|
||||||
|
|
||||||
auto getAvail = [this]() -> uint64_t {
|
auto getAvail = [this]() -> uint64_t {
|
||||||
if (!fakeFreeSpaceFile.empty())
|
if (fakeFreeSpaceFile)
|
||||||
return std::stoll(readFile(fakeFreeSpaceFile));
|
return std::stoll(readFile(*fakeFreeSpaceFile));
|
||||||
|
|
||||||
struct statvfs st;
|
struct statvfs st;
|
||||||
if (statvfs(realStoreDir.c_str(), &st))
|
if (statvfs(realStoreDir.c_str(), &st))
|
||||||
|
|
|
@ -32,20 +32,20 @@ static GlobalConfig::Register r1(&settings);
|
||||||
|
|
||||||
Settings::Settings()
|
Settings::Settings()
|
||||||
: nixPrefix(NIX_PREFIX)
|
: nixPrefix(NIX_PREFIX)
|
||||||
, nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
|
, nixStore(canonPath(getEnv("NIX_STORE_DIR").value_or(getEnv("NIX_STORE").value_or(NIX_STORE_DIR))))
|
||||||
, nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
|
, nixDataDir(canonPath(getEnv("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
|
||||||
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
|
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
|
||||||
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)))
|
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||||
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)))
|
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||||
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
|
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR)))
|
||||||
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
|
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||||
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
|
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
|
||||||
{
|
{
|
||||||
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
|
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
|
||||||
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
|
lockCPU = getEnv("NIX_AFFINITY_HACK") == "1";
|
||||||
|
|
||||||
caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", ""));
|
caFile = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
|
||||||
if (caFile == "") {
|
if (caFile == "") {
|
||||||
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
||||||
if (pathExists(fn)) {
|
if (pathExists(fn)) {
|
||||||
|
@ -56,9 +56,9 @@ Settings::Settings()
|
||||||
|
|
||||||
/* Backwards compatibility. */
|
/* Backwards compatibility. */
|
||||||
auto s = getEnv("NIX_REMOTE_SYSTEMS");
|
auto s = getEnv("NIX_REMOTE_SYSTEMS");
|
||||||
if (s != "") {
|
if (s) {
|
||||||
Strings ss;
|
Strings ss;
|
||||||
for (auto & p : tokenizeString<Strings>(s, ":"))
|
for (auto & p : tokenizeString<Strings>(*s, ":"))
|
||||||
ss.push_back("@" + p);
|
ss.push_back("@" + p);
|
||||||
builders = concatStringsSep(" ", ss);
|
builders = concatStringsSep(" ", ss);
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ StringSet Settings::getDefaultSystemFeatures()
|
||||||
/* For backwards compatibility, accept some "features" that are
|
/* For backwards compatibility, accept some "features" that are
|
||||||
used in Nixpkgs to route builds to certain machines but don't
|
used in Nixpkgs to route builds to certain machines but don't
|
||||||
actually require anything special on the machines. */
|
actually require anything special on the machines. */
|
||||||
StringSet features{"nixos-test", "benchmark", "big-parallel"};
|
StringSet features{"nixos-test", "benchmark", "big-parallel", "recursive-nix"};
|
||||||
|
|
||||||
#if __linux__
|
#if __linux__
|
||||||
if (access("/dev/kvm", R_OK | W_OK) == 0)
|
if (access("/dev/kvm", R_OK | W_OK) == 0)
|
||||||
|
@ -105,10 +105,15 @@ StringSet Settings::getDefaultSystemFeatures()
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Settings::requireExperimentalFeature(const std::string & name)
|
bool Settings::isExperimentalFeatureEnabled(const std::string & name)
|
||||||
{
|
{
|
||||||
auto & f = experimentalFeatures.get();
|
auto & f = experimentalFeatures.get();
|
||||||
if (std::find(f.begin(), f.end(), name) == f.end())
|
return std::find(f.begin(), f.end(), name) != f.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Settings::requireExperimentalFeature(const std::string & name)
|
||||||
|
{
|
||||||
|
if (!isExperimentalFeatureEnabled(name))
|
||||||
throw Error("experimental Nix feature '%s' is disabled", name);
|
throw Error("experimental Nix feature '%s' is disabled", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ public:
|
||||||
/* File name of the socket the daemon listens to. */
|
/* File name of the socket the daemon listens to. */
|
||||||
Path nixDaemonSocketFile;
|
Path nixDaemonSocketFile;
|
||||||
|
|
||||||
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE", "auto"), "store",
|
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store",
|
||||||
"The default Nix store to use."};
|
"The default Nix store to use."};
|
||||||
|
|
||||||
Setting<bool> keepFailed{this, false, "keep-failed",
|
Setting<bool> keepFailed{this, false, "keep-failed",
|
||||||
|
@ -319,7 +319,7 @@ public:
|
||||||
"A program to run just before a build to set derivation-specific build settings."};
|
"A program to run just before a build to set derivation-specific build settings."};
|
||||||
|
|
||||||
Setting<std::string> postBuildHook{this, "", "post-build-hook",
|
Setting<std::string> postBuildHook{this, "", "post-build-hook",
|
||||||
"A program to run just after each succesful build."};
|
"A program to run just after each successful build."};
|
||||||
|
|
||||||
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||||
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
||||||
|
@ -360,6 +360,8 @@ public:
|
||||||
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
|
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
|
||||||
"Experimental Nix features to enable."};
|
"Experimental Nix features to enable."};
|
||||||
|
|
||||||
|
bool isExperimentalFeatureEnabled(const std::string & name);
|
||||||
|
|
||||||
void requireExperimentalFeature(const std::string & name);
|
void requireExperimentalFeature(const std::string & name);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,7 @@ LocalStore::LocalStore(const Params & params)
|
||||||
, trashDir(realStoreDir + "/trash")
|
, trashDir(realStoreDir + "/trash")
|
||||||
, tempRootsDir(stateDir + "/temproots")
|
, tempRootsDir(stateDir + "/temproots")
|
||||||
, fnTempRoots(fmt("%s/%d", tempRootsDir, getpid()))
|
, fnTempRoots(fmt("%s/%d", tempRootsDir, getpid()))
|
||||||
|
, locksHeld(tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS").value_or("")))
|
||||||
{
|
{
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
|
@ -577,6 +578,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
|
||||||
uint64_t LocalStore::addValidPath(State & state,
|
uint64_t LocalStore::addValidPath(State & state,
|
||||||
const ValidPathInfo & info, bool checkOutputs)
|
const ValidPathInfo & info, bool checkOutputs)
|
||||||
{
|
{
|
||||||
|
checkStoreName(storePathToName(info.path));
|
||||||
|
|
||||||
if (info.ca != "" && !info.isContentAddressed(*this))
|
if (info.ca != "" && !info.isContentAddressed(*this))
|
||||||
throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", info.path);
|
throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", info.path);
|
||||||
|
|
||||||
|
@ -1231,7 +1234,29 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
||||||
|
|
||||||
/* Optionally, check the content hashes (slow). */
|
/* Optionally, check the content hashes (slow). */
|
||||||
if (checkContents) {
|
if (checkContents) {
|
||||||
printInfo("checking hashes...");
|
|
||||||
|
printInfo("checking link hashes...");
|
||||||
|
|
||||||
|
for (auto & link : readDirectory(linksDir)) {
|
||||||
|
printMsg(lvlTalkative, "checking contents of '%s'", link.name);
|
||||||
|
Path linkPath = linksDir + "/" + link.name;
|
||||||
|
string hash = hashPath(htSHA256, linkPath).first.to_string(Base32, false);
|
||||||
|
if (hash != link.name) {
|
||||||
|
printError(
|
||||||
|
"link '%s' was modified! expected hash '%s', got '%s'",
|
||||||
|
linkPath, link.name, hash);
|
||||||
|
if (repair) {
|
||||||
|
if (unlink(linkPath.c_str()) == 0)
|
||||||
|
printError("removed link '%s'", linkPath);
|
||||||
|
else
|
||||||
|
throw SysError("removing corrupt link '%s'", linkPath);
|
||||||
|
} else {
|
||||||
|
errors = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
printInfo("checking store hashes...");
|
||||||
|
|
||||||
Hash nullHash(htSHA256);
|
Hash nullHash(htSHA256);
|
||||||
|
|
||||||
|
|
|
@ -107,7 +107,7 @@ private:
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// Hack for build-remote.cc.
|
// Hack for build-remote.cc.
|
||||||
PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS"));
|
PathSet locksHeld;
|
||||||
|
|
||||||
/* Initialise the local store, upgrading the schema if
|
/* Initialise the local store, upgrading the schema if
|
||||||
necessary. */
|
necessary. */
|
||||||
|
|
|
@ -148,7 +148,7 @@ public:
|
||||||
|
|
||||||
std::string getUri() override;
|
std::string getUri() override;
|
||||||
|
|
||||||
bool sameMachine()
|
bool sameMachine() override
|
||||||
{ return true; }
|
{ return true; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -35,7 +35,7 @@ public:
|
||||||
return uriScheme + host;
|
return uriScheme + host;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool sameMachine()
|
bool sameMachine() override
|
||||||
{ return false; }
|
{ return false; }
|
||||||
|
|
||||||
void narFromPath(const Path & path, Sink & sink) override;
|
void narFromPath(const Path & path, Sink & sink) override;
|
||||||
|
|
|
@ -16,7 +16,7 @@ SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool
|
||||||
|
|
||||||
void SSHMaster::addCommonSSHOpts(Strings & args)
|
void SSHMaster::addCommonSSHOpts(Strings & args)
|
||||||
{
|
{
|
||||||
for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
|
for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS").value_or("")))
|
||||||
args.push_back(i);
|
args.push_back(i);
|
||||||
if (!keyFile.empty())
|
if (!keyFile.empty())
|
||||||
args.insert(args.end(), {"-i", keyFile});
|
args.insert(args.end(), {"-i", keyFile});
|
||||||
|
|
|
@ -90,17 +90,22 @@ void checkStoreName(const string & name)
|
||||||
"Path names are alphanumeric and can include the symbols %1% "
|
"Path names are alphanumeric and can include the symbols %1% "
|
||||||
"and must not begin with a period. "
|
"and must not begin with a period. "
|
||||||
"Note: If '%2%' is a source file and you cannot rename it on "
|
"Note: If '%2%' is a source file and you cannot rename it on "
|
||||||
"disk, builtins.path { name = ... } can be used to give it an "
|
"disk, 'builtins.path { name = ... }' can be used to give it an "
|
||||||
"alternative name.") % validChars % name;
|
"alternative name.") % validChars % name;
|
||||||
|
|
||||||
|
if (name.empty())
|
||||||
|
throw Error(baseError % "it is an empty string");
|
||||||
|
|
||||||
/* Disallow names starting with a dot for possible security
|
/* Disallow names starting with a dot for possible security
|
||||||
reasons (e.g., "." and ".."). */
|
reasons (e.g., "." and ".."). */
|
||||||
if (string(name, 0, 1) == ".")
|
if (name[0] == '.')
|
||||||
throw Error(baseError % "it is illegal to start the name with a period");
|
throw Error(baseError % "it is illegal to start the name with a period");
|
||||||
|
|
||||||
/* Disallow names longer than 211 characters. ext4’s max is 256,
|
/* Disallow names longer than 211 characters. ext4’s max is 256,
|
||||||
but we need extra space for the hash and .chroot extensions. */
|
but we need extra space for the hash and .chroot extensions. */
|
||||||
if (name.length() > 211)
|
if (name.length() > 211)
|
||||||
throw Error(baseError % "name must be less than 212 characters");
|
throw Error(baseError % "name must be less than 212 characters");
|
||||||
|
|
||||||
for (auto & i : name)
|
for (auto & i : name)
|
||||||
if (!((i >= 'A' && i <= 'Z') ||
|
if (!((i >= 'A' && i <= 'Z') ||
|
||||||
(i >= 'a' && i <= 'z') ||
|
(i >= 'a' && i <= 'z') ||
|
||||||
|
@ -211,7 +216,7 @@ static std::string makeType(string && type, const PathSet & references)
|
||||||
type += ":";
|
type += ":";
|
||||||
type += i;
|
type += i;
|
||||||
}
|
}
|
||||||
return type;
|
return std::move(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,13 +20,13 @@
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
MakeError(SubstError, Error)
|
MakeError(SubstError, Error);
|
||||||
MakeError(BuildError, Error) /* denotes a permanent build failure */
|
MakeError(BuildError, Error); // denotes a permanent build failure
|
||||||
MakeError(InvalidPath, Error)
|
MakeError(InvalidPath, Error);
|
||||||
MakeError(Unsupported, Error)
|
MakeError(Unsupported, Error);
|
||||||
MakeError(SubstituteGone, Error)
|
MakeError(SubstituteGone, Error);
|
||||||
MakeError(SubstituterDisabled, Error)
|
MakeError(SubstituterDisabled, Error);
|
||||||
MakeError(NotInStore, Error)
|
MakeError(NotInStore, Error);
|
||||||
|
|
||||||
|
|
||||||
struct BasicDerivation;
|
struct BasicDerivation;
|
||||||
|
@ -570,7 +570,7 @@ public:
|
||||||
unsigned long long & downloadSize, unsigned long long & narSize);
|
unsigned long long & downloadSize, unsigned long long & narSize);
|
||||||
|
|
||||||
/* Sort a set of paths topologically under the references
|
/* Sort a set of paths topologically under the references
|
||||||
relation. If p refers to q, then p preceeds q in this list. */
|
relation. If p refers to q, then p precedes q in this list. */
|
||||||
Paths topoSortPaths(const PathSet & paths);
|
Paths topoSortPaths(const PathSet & paths);
|
||||||
|
|
||||||
/* Export multiple paths in the format expected by ‘nix-store
|
/* Export multiple paths in the format expected by ‘nix-store
|
||||||
|
|
|
@ -70,23 +70,23 @@ public:
|
||||||
Args & args;
|
Args & args;
|
||||||
Flag::ptr flag;
|
Flag::ptr flag;
|
||||||
friend class Args;
|
friend class Args;
|
||||||
FlagMaker(Args & args) : args(args), flag(std::make_shared<Flag>()) { };
|
FlagMaker(Args & args) : args(args), flag(std::make_shared<Flag>()) { }
|
||||||
public:
|
public:
|
||||||
~FlagMaker();
|
~FlagMaker();
|
||||||
FlagMaker & longName(const std::string & s) { flag->longName = s; return *this; };
|
FlagMaker & longName(const std::string & s) { flag->longName = s; return *this; }
|
||||||
FlagMaker & shortName(char s) { flag->shortName = s; return *this; };
|
FlagMaker & shortName(char s) { flag->shortName = s; return *this; }
|
||||||
FlagMaker & description(const std::string & s) { flag->description = s; return *this; };
|
FlagMaker & description(const std::string & s) { flag->description = s; return *this; }
|
||||||
FlagMaker & label(const std::string & l) { flag->arity = 1; flag->labels = {l}; return *this; };
|
FlagMaker & label(const std::string & l) { flag->arity = 1; flag->labels = {l}; return *this; }
|
||||||
FlagMaker & labels(const Strings & ls) { flag->arity = ls.size(); flag->labels = ls; return *this; };
|
FlagMaker & labels(const Strings & ls) { flag->arity = ls.size(); flag->labels = ls; return *this; }
|
||||||
FlagMaker & arity(size_t arity) { flag->arity = arity; return *this; };
|
FlagMaker & arity(size_t arity) { flag->arity = arity; return *this; }
|
||||||
FlagMaker & handler(std::function<void(std::vector<std::string>)> handler) { flag->handler = handler; return *this; };
|
FlagMaker & handler(std::function<void(std::vector<std::string>)> handler) { flag->handler = handler; return *this; }
|
||||||
FlagMaker & handler(std::function<void()> handler) { flag->handler = [handler](std::vector<std::string>) { handler(); }; return *this; };
|
FlagMaker & handler(std::function<void()> handler) { flag->handler = [handler](std::vector<std::string>) { handler(); }; return *this; }
|
||||||
FlagMaker & handler(std::function<void(std::string)> handler) {
|
FlagMaker & handler(std::function<void(std::string)> handler) {
|
||||||
flag->arity = 1;
|
flag->arity = 1;
|
||||||
flag->handler = [handler](std::vector<std::string> ss) { handler(std::move(ss[0])); };
|
flag->handler = [handler](std::vector<std::string> ss) { handler(std::move(ss[0])); };
|
||||||
return *this;
|
return *this;
|
||||||
};
|
}
|
||||||
FlagMaker & category(const std::string & s) { flag->category = s; return *this; };
|
FlagMaker & category(const std::string & s) { flag->category = s; return *this; }
|
||||||
|
|
||||||
template<class T>
|
template<class T>
|
||||||
FlagMaker & dest(T * dest)
|
FlagMaker & dest(T * dest)
|
||||||
|
@ -94,7 +94,7 @@ public:
|
||||||
flag->arity = 1;
|
flag->arity = 1;
|
||||||
flag->handler = [=](std::vector<std::string> ss) { *dest = ss[0]; };
|
flag->handler = [=](std::vector<std::string> ss) { *dest = ss[0]; };
|
||||||
return *this;
|
return *this;
|
||||||
};
|
}
|
||||||
|
|
||||||
template<class T>
|
template<class T>
|
||||||
FlagMaker & set(T * dest, const T & val)
|
FlagMaker & set(T * dest, const T & val)
|
||||||
|
@ -102,7 +102,7 @@ public:
|
||||||
flag->arity = 0;
|
flag->arity = 0;
|
||||||
flag->handler = [=](std::vector<std::string> ss) { *dest = val; };
|
flag->handler = [=](std::vector<std::string> ss) { *dest = val; };
|
||||||
return *this;
|
return *this;
|
||||||
};
|
}
|
||||||
|
|
||||||
FlagMaker & mkHashTypeFlag(HashType * ht);
|
FlagMaker & mkHashTypeFlag(HashType * ht);
|
||||||
};
|
};
|
||||||
|
|
|
@ -7,3 +7,5 @@ libutil_DIR := $(d)
|
||||||
libutil_SOURCES := $(wildcard $(d)/*.cc)
|
libutil_SOURCES := $(wildcard $(d)/*.cc)
|
||||||
|
|
||||||
libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(BOOST_LDFLAGS) -lboost_context
|
libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(BOOST_LDFLAGS) -lboost_context
|
||||||
|
|
||||||
|
libutil_LIBS = libnixrust
|
||||||
|
|
12
src/libutil/rust-ffi.cc
Normal file
12
src/libutil/rust-ffi.cc
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
#include "logging.hh"
|
||||||
|
#include "rust-ffi.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
extern "C" std::exception_ptr * make_error(rust::StringSlice s)
|
||||||
|
{
|
||||||
|
// FIXME: leak
|
||||||
|
return new std::exception_ptr(std::make_exception_ptr(Error(std::string(s.ptr, s.size))));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
84
src/libutil/rust-ffi.hh
Normal file
84
src/libutil/rust-ffi.hh
Normal file
|
@ -0,0 +1,84 @@
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
namespace rust {
|
||||||
|
|
||||||
|
// Depending on the internal representation of Rust slices is slightly
|
||||||
|
// evil...
|
||||||
|
template<typename T>
|
||||||
|
struct Slice
|
||||||
|
{
|
||||||
|
T * ptr;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
Slice(T * ptr, size_t size) : ptr(ptr), size(size)
|
||||||
|
{
|
||||||
|
assert(ptr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct StringSlice : Slice<char>
|
||||||
|
{
|
||||||
|
StringSlice(const std::string & s): Slice((char *) s.data(), s.size()) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Source
|
||||||
|
{
|
||||||
|
size_t (*fun)(void * source_this, rust::Slice<uint8_t> data);
|
||||||
|
nix::Source * _this;
|
||||||
|
|
||||||
|
Source(nix::Source & _this)
|
||||||
|
: fun(sourceWrapper), _this(&_this)
|
||||||
|
{}
|
||||||
|
|
||||||
|
// FIXME: how to propagate exceptions?
|
||||||
|
static size_t sourceWrapper(void * _this, rust::Slice<uint8_t> data)
|
||||||
|
{
|
||||||
|
auto n = ((nix::Source *) _this)->read(data.ptr, data.size);
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* C++ representation of Rust's Result<T, CppException>. */
|
||||||
|
template<typename T>
|
||||||
|
struct Result
|
||||||
|
{
|
||||||
|
unsigned int tag;
|
||||||
|
|
||||||
|
union {
|
||||||
|
T data;
|
||||||
|
std::exception_ptr * exc;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Rethrow the wrapped exception or return the wrapped value. */
|
||||||
|
T unwrap()
|
||||||
|
{
|
||||||
|
if (tag == 0)
|
||||||
|
return data;
|
||||||
|
else if (tag == 1)
|
||||||
|
std::rethrow_exception(*exc);
|
||||||
|
else
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
struct CBox
|
||||||
|
{
|
||||||
|
T * ptr;
|
||||||
|
|
||||||
|
T * operator ->()
|
||||||
|
{
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
CBox(T * ptr) : ptr(ptr) { }
|
||||||
|
CBox(const CBox &) = delete;
|
||||||
|
CBox(CBox &&) = delete;
|
||||||
|
|
||||||
|
~CBox()
|
||||||
|
{
|
||||||
|
free(ptr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -77,7 +77,6 @@ struct BufferedSource : Source
|
||||||
|
|
||||||
size_t read(unsigned char * data, size_t len) override;
|
size_t read(unsigned char * data, size_t len) override;
|
||||||
|
|
||||||
|
|
||||||
bool hasData();
|
bool hasData();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -274,7 +273,7 @@ Sink & operator << (Sink & sink, const Strings & s);
|
||||||
Sink & operator << (Sink & sink, const StringSet & s);
|
Sink & operator << (Sink & sink, const StringSet & s);
|
||||||
|
|
||||||
|
|
||||||
MakeError(SerialisationError, Error)
|
MakeError(SerialisationError, Error);
|
||||||
|
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
|
36
src/libutil/tarfile.cc
Normal file
36
src/libutil/tarfile.cc
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
#include "rust-ffi.hh"
|
||||||
|
#include "compression.hh"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
rust::Result<std::tuple<>> *
|
||||||
|
unpack_tarfile(rust::Source source, rust::StringSlice dest_dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
void unpackTarfile(Source & source, const Path & destDir)
|
||||||
|
{
|
||||||
|
rust::Source source2(source);
|
||||||
|
rust::CBox(unpack_tarfile(source2, destDir))->unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
void unpackTarfile(const Path & tarFile, const Path & destDir,
|
||||||
|
std::optional<std::string> baseName)
|
||||||
|
{
|
||||||
|
if (!baseName) baseName = baseNameOf(tarFile);
|
||||||
|
|
||||||
|
auto source = sinkToSource([&](Sink & sink) {
|
||||||
|
// FIXME: look at first few bytes to determine compression type.
|
||||||
|
auto decompressor =
|
||||||
|
// FIXME: add .gz support
|
||||||
|
hasSuffix(*baseName, ".bz2") ? makeDecompressionSink("bzip2", sink) :
|
||||||
|
hasSuffix(*baseName, ".xz") ? makeDecompressionSink("xz", sink) :
|
||||||
|
makeDecompressionSink("none", sink);
|
||||||
|
readFile(tarFile, *decompressor);
|
||||||
|
decompressor->finish();
|
||||||
|
});
|
||||||
|
|
||||||
|
unpackTarfile(*source, destDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
10
src/libutil/tarfile.hh
Normal file
10
src/libutil/tarfile.hh
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
void unpackTarfile(Source & source, const Path & destDir);
|
||||||
|
|
||||||
|
void unpackTarfile(const Path & tarFile, const Path & destDir,
|
||||||
|
std::optional<std::string> baseName = {});
|
||||||
|
|
||||||
|
}
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
MakeError(ThreadPoolShutDown, Error)
|
MakeError(ThreadPoolShutDown, Error);
|
||||||
|
|
||||||
/* A simple thread pool that executes a queue of work items
|
/* A simple thread pool that executes a queue of work items
|
||||||
(lambdas). */
|
(lambdas). */
|
||||||
|
|
|
@ -116,9 +116,9 @@ public:
|
||||||
{ \
|
{ \
|
||||||
public: \
|
public: \
|
||||||
using superClass::superClass; \
|
using superClass::superClass; \
|
||||||
};
|
}
|
||||||
|
|
||||||
MakeError(Error, BaseError)
|
MakeError(Error, BaseError);
|
||||||
|
|
||||||
class SysError : public Error
|
class SysError : public Error
|
||||||
{
|
{
|
||||||
|
|
|
@ -59,10 +59,11 @@ std::string SysError::addErrno(const std::string & s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
string getEnv(const string & key, const string & def)
|
std::optional<std::string> getEnv(const std::string & key)
|
||||||
{
|
{
|
||||||
char * value = getenv(key.c_str());
|
char * value = getenv(key.c_str());
|
||||||
return value ? string(value) : def;
|
if (!value) return {};
|
||||||
|
return std::string(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -438,7 +439,7 @@ void deletePath(const Path & path, unsigned long long & bytesFreed)
|
||||||
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
|
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
|
||||||
int & counter)
|
int & counter)
|
||||||
{
|
{
|
||||||
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR", "/tmp") : tmpRoot, true);
|
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
|
||||||
if (includePid)
|
if (includePid)
|
||||||
return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
|
return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
|
||||||
else
|
else
|
||||||
|
@ -479,7 +480,7 @@ Path createTempDir(const Path & tmpRoot, const Path & prefix,
|
||||||
|
|
||||||
std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
||||||
{
|
{
|
||||||
Path tmpl(getEnv("TMPDIR", "/tmp") + "/" + prefix + ".XXXXXX");
|
Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX");
|
||||||
// Strictly speaking, this is UB, but who cares...
|
// Strictly speaking, this is UB, but who cares...
|
||||||
AutoCloseFD fd(mkstemp((char *) tmpl.c_str()));
|
AutoCloseFD fd(mkstemp((char *) tmpl.c_str()));
|
||||||
if (!fd)
|
if (!fd)
|
||||||
|
@ -491,7 +492,7 @@ std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
|
||||||
std::string getUserName()
|
std::string getUserName()
|
||||||
{
|
{
|
||||||
auto pw = getpwuid(geteuid());
|
auto pw = getpwuid(geteuid());
|
||||||
std::string name = pw ? pw->pw_name : getEnv("USER", "");
|
std::string name = pw ? pw->pw_name : getEnv("USER").value_or("");
|
||||||
if (name.empty())
|
if (name.empty())
|
||||||
throw Error("cannot figure out user name");
|
throw Error("cannot figure out user name");
|
||||||
return name;
|
return name;
|
||||||
|
@ -499,8 +500,8 @@ std::string getUserName()
|
||||||
|
|
||||||
|
|
||||||
static Lazy<Path> getHome2([]() {
|
static Lazy<Path> getHome2([]() {
|
||||||
Path homeDir = getEnv("HOME");
|
auto homeDir = getEnv("HOME");
|
||||||
if (homeDir.empty()) {
|
if (!homeDir) {
|
||||||
std::vector<char> buf(16384);
|
std::vector<char> buf(16384);
|
||||||
struct passwd pwbuf;
|
struct passwd pwbuf;
|
||||||
struct passwd * pw;
|
struct passwd * pw;
|
||||||
|
@ -509,7 +510,7 @@ static Lazy<Path> getHome2([]() {
|
||||||
throw Error("cannot determine user's home directory");
|
throw Error("cannot determine user's home directory");
|
||||||
homeDir = pw->pw_dir;
|
homeDir = pw->pw_dir;
|
||||||
}
|
}
|
||||||
return homeDir;
|
return *homeDir;
|
||||||
});
|
});
|
||||||
|
|
||||||
Path getHome() { return getHome2(); }
|
Path getHome() { return getHome2(); }
|
||||||
|
@ -517,25 +518,21 @@ Path getHome() { return getHome2(); }
|
||||||
|
|
||||||
Path getCacheDir()
|
Path getCacheDir()
|
||||||
{
|
{
|
||||||
Path cacheDir = getEnv("XDG_CACHE_HOME");
|
auto cacheDir = getEnv("XDG_CACHE_HOME");
|
||||||
if (cacheDir.empty())
|
return cacheDir ? *cacheDir : getHome() + "/.cache";
|
||||||
cacheDir = getHome() + "/.cache";
|
|
||||||
return cacheDir;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Path getConfigDir()
|
Path getConfigDir()
|
||||||
{
|
{
|
||||||
Path configDir = getEnv("XDG_CONFIG_HOME");
|
auto configDir = getEnv("XDG_CONFIG_HOME");
|
||||||
if (configDir.empty())
|
return configDir ? *configDir : getHome() + "/.config";
|
||||||
configDir = getHome() + "/.config";
|
|
||||||
return configDir;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Path> getConfigDirs()
|
std::vector<Path> getConfigDirs()
|
||||||
{
|
{
|
||||||
Path configHome = getConfigDir();
|
Path configHome = getConfigDir();
|
||||||
string configDirs = getEnv("XDG_CONFIG_DIRS");
|
string configDirs = getEnv("XDG_CONFIG_DIRS").value_or("");
|
||||||
std::vector<Path> result = tokenizeString<std::vector<string>>(configDirs, ":");
|
std::vector<Path> result = tokenizeString<std::vector<string>>(configDirs, ":");
|
||||||
result.insert(result.begin(), configHome);
|
result.insert(result.begin(), configHome);
|
||||||
return result;
|
return result;
|
||||||
|
@ -544,10 +541,8 @@ std::vector<Path> getConfigDirs()
|
||||||
|
|
||||||
Path getDataDir()
|
Path getDataDir()
|
||||||
{
|
{
|
||||||
Path dataDir = getEnv("XDG_DATA_HOME");
|
auto dataDir = getEnv("XDG_DATA_HOME");
|
||||||
if (dataDir.empty())
|
return dataDir ? *dataDir : getHome() + "/.local/share";
|
||||||
dataDir = getHome() + "/.local/share";
|
|
||||||
return dataDir;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ extern const std::string nativeSystem;
|
||||||
|
|
||||||
|
|
||||||
/* Return an environment variable. */
|
/* Return an environment variable. */
|
||||||
string getEnv(const string & key, const string & def = "");
|
std::optional<std::string> getEnv(const std::string & key);
|
||||||
|
|
||||||
/* Get the entire environment. */
|
/* Get the entire environment. */
|
||||||
std::map<std::string, std::string> getEnv();
|
std::map<std::string, std::string> getEnv();
|
||||||
|
@ -158,7 +158,7 @@ void readFull(int fd, unsigned char * buf, size_t count);
|
||||||
void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts = true);
|
void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts = true);
|
||||||
void writeFull(int fd, const string & s, bool allowInterrupts = true);
|
void writeFull(int fd, const string & s, bool allowInterrupts = true);
|
||||||
|
|
||||||
MakeError(EndOfFile, Error)
|
MakeError(EndOfFile, Error);
|
||||||
|
|
||||||
|
|
||||||
/* Read a file descriptor until EOF occurs. */
|
/* Read a file descriptor until EOF occurs. */
|
||||||
|
@ -339,10 +339,10 @@ void inline checkInterrupt()
|
||||||
_interrupted();
|
_interrupted();
|
||||||
}
|
}
|
||||||
|
|
||||||
MakeError(Interrupted, BaseError)
|
MakeError(Interrupted, BaseError);
|
||||||
|
|
||||||
|
|
||||||
MakeError(FormatError, Error)
|
MakeError(FormatError, Error);
|
||||||
|
|
||||||
|
|
||||||
/* String tokenizer. */
|
/* String tokenizer. */
|
||||||
|
|
|
@ -344,9 +344,9 @@ static void _main(int argc, char * * argv)
|
||||||
/* Figure out what bash shell to use. If $NIX_BUILD_SHELL
|
/* Figure out what bash shell to use. If $NIX_BUILD_SHELL
|
||||||
is not set, then build bashInteractive from
|
is not set, then build bashInteractive from
|
||||||
<nixpkgs>. */
|
<nixpkgs>. */
|
||||||
auto shell = getEnv("NIX_BUILD_SHELL", "");
|
auto shell = getEnv("NIX_BUILD_SHELL");
|
||||||
|
|
||||||
if (shell == "") {
|
if (!shell) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto expr = state->parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
|
auto expr = state->parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
|
||||||
|
@ -382,7 +382,8 @@ static void _main(int argc, char * * argv)
|
||||||
// Set the environment.
|
// Set the environment.
|
||||||
auto env = getEnv();
|
auto env = getEnv();
|
||||||
|
|
||||||
auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
|
auto tmp = getEnv("TMPDIR");
|
||||||
|
if (!tmp) tmp = getEnv("XDG_RUNTIME_DIR").value_or("/tmp");
|
||||||
|
|
||||||
if (pure) {
|
if (pure) {
|
||||||
decltype(env) newEnv;
|
decltype(env) newEnv;
|
||||||
|
@ -394,7 +395,7 @@ static void _main(int argc, char * * argv)
|
||||||
env["__ETC_PROFILE_SOURCED"] = "1";
|
env["__ETC_PROFILE_SOURCED"] = "1";
|
||||||
}
|
}
|
||||||
|
|
||||||
env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmp;
|
env["NIX_BUILD_TOP"] = env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = *tmp;
|
||||||
env["NIX_STORE"] = store->storeDir;
|
env["NIX_STORE"] = store->storeDir;
|
||||||
env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores);
|
env["NIX_BUILD_CORES"] = std::to_string(settings.buildCores);
|
||||||
|
|
||||||
|
@ -439,8 +440,8 @@ static void _main(int argc, char * * argv)
|
||||||
(Path) tmpDir,
|
(Path) tmpDir,
|
||||||
(pure ? "" : "p=$PATH; "),
|
(pure ? "" : "p=$PATH; "),
|
||||||
(pure ? "" : "PATH=$PATH:$p; unset p; "),
|
(pure ? "" : "PATH=$PATH:$p; unset p; "),
|
||||||
dirOf(shell),
|
dirOf(*shell),
|
||||||
shell,
|
*shell,
|
||||||
(getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : ""),
|
(getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : ""),
|
||||||
envCommand));
|
envCommand));
|
||||||
|
|
||||||
|
@ -460,9 +461,9 @@ static void _main(int argc, char * * argv)
|
||||||
|
|
||||||
restoreSignals();
|
restoreSignals();
|
||||||
|
|
||||||
execvp(shell.c_str(), argPtrs.data());
|
execvp(shell->c_str(), argPtrs.data());
|
||||||
|
|
||||||
throw SysError("executing shell '%s'", shell);
|
throw SysError("executing shell '%s'", *shell);
|
||||||
}
|
}
|
||||||
|
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -161,8 +161,9 @@ static void daemonLoop(char * * argv)
|
||||||
AutoCloseFD fdSocket;
|
AutoCloseFD fdSocket;
|
||||||
|
|
||||||
/* Handle socket-based activation by systemd. */
|
/* Handle socket-based activation by systemd. */
|
||||||
if (getEnv("LISTEN_FDS") != "") {
|
auto listenFds = getEnv("LISTEN_FDS");
|
||||||
if (getEnv("LISTEN_PID") != std::to_string(getpid()) || getEnv("LISTEN_FDS") != "1")
|
if (listenFds) {
|
||||||
|
if (getEnv("LISTEN_PID") != std::to_string(getpid()) || listenFds != "1")
|
||||||
throw Error("unexpected systemd environment variables");
|
throw Error("unexpected systemd environment variables");
|
||||||
fdSocket = SD_LISTEN_FDS_START;
|
fdSocket = SD_LISTEN_FDS_START;
|
||||||
closeOnExec(fdSocket.get());
|
closeOnExec(fdSocket.get());
|
||||||
|
@ -192,7 +193,7 @@ static void daemonLoop(char * * argv)
|
||||||
|
|
||||||
closeOnExec(remote.get());
|
closeOnExec(remote.get());
|
||||||
|
|
||||||
bool trusted = false;
|
TrustedFlag trusted = NotTrusted;
|
||||||
PeerInfo peer = getPeerInfo(remote.get());
|
PeerInfo peer = getPeerInfo(remote.get());
|
||||||
|
|
||||||
struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
|
struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
|
||||||
|
@ -205,7 +206,7 @@ static void daemonLoop(char * * argv)
|
||||||
Strings allowedUsers = settings.allowedUsers;
|
Strings allowedUsers = settings.allowedUsers;
|
||||||
|
|
||||||
if (matchUser(user, group, trustedUsers))
|
if (matchUser(user, group, trustedUsers))
|
||||||
trusted = true;
|
trusted = Trusted;
|
||||||
|
|
||||||
if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup)
|
if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup)
|
||||||
throw Error(format("user '%1%' is not allowed to connect to the Nix daemon") % user);
|
throw Error(format("user '%1%' is not allowed to connect to the Nix daemon") % user);
|
||||||
|
@ -239,7 +240,7 @@ static void daemonLoop(char * * argv)
|
||||||
/* Handle the connection. */
|
/* Handle the connection. */
|
||||||
FdSource from(remote.get());
|
FdSource from(remote.get());
|
||||||
FdSink to(remote.get());
|
FdSink to(remote.get());
|
||||||
processConnection(openUncachedStore(), from, to, trusted, user, peer.uid);
|
processConnection(openUncachedStore(), from, to, trusted, NotRecursive, user, peer.uid);
|
||||||
|
|
||||||
exit(0);
|
exit(0);
|
||||||
}, options);
|
}, options);
|
||||||
|
@ -321,7 +322,7 @@ static int _main(int argc, char * * argv)
|
||||||
} else {
|
} else {
|
||||||
FdSource from(STDIN_FILENO);
|
FdSource from(STDIN_FILENO);
|
||||||
FdSink to(STDOUT_FILENO);
|
FdSink to(STDOUT_FILENO);
|
||||||
processConnection(openUncachedStore(), from, to, true, "root", 0);
|
processConnection(openUncachedStore(), from, to, Trusted, NotRecursive, "root", 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
daemonLoop(argv);
|
daemonLoop(argv);
|
||||||
|
|
|
@ -1425,7 +1425,7 @@ static int _main(int argc, char * * argv)
|
||||||
globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state);
|
globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state);
|
||||||
|
|
||||||
if (globals.profile == "")
|
if (globals.profile == "")
|
||||||
globals.profile = getEnv("NIX_PROFILE", "");
|
globals.profile = getEnv("NIX_PROFILE").value_or("");
|
||||||
|
|
||||||
if (globals.profile == "")
|
if (globals.profile == "")
|
||||||
globals.profile = getDefaultProfile();
|
globals.profile = getDefaultProfile();
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "legacy.hh"
|
#include "legacy.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "progress-bar.hh"
|
#include "progress-bar.hh"
|
||||||
|
#include "tarfile.hh"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
|
@ -192,8 +193,7 @@ static int _main(int argc, char * * argv)
|
||||||
if (hasSuffix(baseNameOf(uri), ".zip"))
|
if (hasSuffix(baseNameOf(uri), ".zip"))
|
||||||
runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked});
|
runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked});
|
||||||
else
|
else
|
||||||
// FIXME: this requires GNU tar for decompression.
|
unpackTarfile(tmpFile, unpacked, baseNameOf(uri));
|
||||||
runProgram("tar", true, {"xf", tmpFile, "-C", unpacked});
|
|
||||||
|
|
||||||
/* If the archive unpacks to a single file/directory, then use
|
/* If the archive unpacks to a single file/directory, then use
|
||||||
that as the top-level. */
|
that as the top-level. */
|
||||||
|
|
|
@ -85,7 +85,7 @@ void StorePathCommand::run(ref<Store> store)
|
||||||
|
|
||||||
Strings editorFor(const Pos & pos)
|
Strings editorFor(const Pos & pos)
|
||||||
{
|
{
|
||||||
auto editor = getEnv("EDITOR", "cat");
|
auto editor = getEnv("EDITOR").value_or("cat");
|
||||||
auto args = tokenizeString<Strings>(editor);
|
auto args = tokenizeString<Strings>(editor);
|
||||||
if (pos.line > 0 && (
|
if (pos.line > 0 && (
|
||||||
editor.find("emacs") != std::string::npos ||
|
editor.find("emacs") != std::string::npos ||
|
||||||
|
|
|
@ -213,4 +213,4 @@ struct MixEnvironment : virtual Args {
|
||||||
void setEnviron();
|
void setEnviron();
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ struct CmdDoctor : StoreCommand
|
||||||
{
|
{
|
||||||
PathSet dirs;
|
PathSet dirs;
|
||||||
|
|
||||||
for (auto & dir : tokenizeString<Strings>(getEnv("PATH"), ":"))
|
for (auto & dir : tokenizeString<Strings>(getEnv("PATH").value_or(""), ":"))
|
||||||
if (pathExists(dir + "/nix-env"))
|
if (pathExists(dir + "/nix-env"))
|
||||||
dirs.insert(dirOf(canonPath(dir + "/nix-env", true)));
|
dirs.insert(dirOf(canonPath(dir + "/nix-env", true)));
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ struct CmdDoctor : StoreCommand
|
||||||
{
|
{
|
||||||
PathSet dirs;
|
PathSet dirs;
|
||||||
|
|
||||||
for (auto & dir : tokenizeString<Strings>(getEnv("PATH"), ":")) {
|
for (auto & dir : tokenizeString<Strings>(getEnv("PATH").value_or(""), ":")) {
|
||||||
Path profileDir = dirOf(dir);
|
Path profileDir = dirOf(dir);
|
||||||
try {
|
try {
|
||||||
Path userEnv = canonPath(profileDir, true);
|
Path userEnv = canonPath(profileDir, true);
|
||||||
|
|
|
@ -115,7 +115,7 @@ struct CmdRun : InstallablesCommand, RunCommon, MixEnvironment
|
||||||
|
|
||||||
setEnviron();
|
setEnviron();
|
||||||
|
|
||||||
auto unixPath = tokenizeString<Strings>(getEnv("PATH"), ":");
|
auto unixPath = tokenizeString<Strings>(getEnv("PATH").value_or(""), ":");
|
||||||
|
|
||||||
while (!todo.empty()) {
|
while (!todo.empty()) {
|
||||||
Path path = todo.front();
|
Path path = todo.front();
|
||||||
|
|
|
@ -275,7 +275,7 @@ struct CmdDevShell : Common, MixEnvironment
|
||||||
|
|
||||||
stopProgressBar();
|
stopProgressBar();
|
||||||
|
|
||||||
auto shell = getEnv("SHELL", "bash");
|
auto shell = getEnv("SHELL").value_or("bash");
|
||||||
|
|
||||||
setEnviron();
|
setEnviron();
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
|
||||||
{
|
{
|
||||||
Path where;
|
Path where;
|
||||||
|
|
||||||
for (auto & dir : tokenizeString<Strings>(getEnv("PATH"), ":"))
|
for (auto & dir : tokenizeString<Strings>(getEnv("PATH").value_or(""), ":"))
|
||||||
if (pathExists(dir + "/nix-env")) {
|
if (pathExists(dir + "/nix-env")) {
|
||||||
where = dir;
|
where = dir;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
with import <nix/config.nix>;
|
|
||||||
|
|
||||||
rec {
|
rec {
|
||||||
inherit shell;
|
shell = "@bash@";
|
||||||
|
|
||||||
path = coreutils;
|
path = "@coreutils@";
|
||||||
|
|
||||||
system = "@system@";
|
system = "@system@";
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ clearStore
|
||||||
hash=$(nix hash-file --type sha512 --base64 ./fetchurl.sh)
|
hash=$(nix hash-file --type sha512 --base64 ./fetchurl.sh)
|
||||||
hash32=$(nix hash-file --type sha512 --base16 ./fetchurl.sh)
|
hash32=$(nix hash-file --type sha512 --base16 ./fetchurl.sh)
|
||||||
|
|
||||||
mirror=$TMPDIR/hashed-mirror
|
mirror=$TEST_ROOT/hashed-mirror
|
||||||
rm -rf $mirror
|
rm -rf $mirror
|
||||||
mkdir -p $mirror/sha512
|
mkdir -p $mirror/sha512
|
||||||
ln -s $(pwd)/fetchurl.sh $mirror/sha512/$hash32
|
ln -s $(pwd)/fetchurl.sh $mirror/sha512/$hash32
|
||||||
|
|
1
tests/lang/eval-okay-attrs6.exp
Normal file
1
tests/lang/eval-okay-attrs6.exp
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{ __overrides = { bar = "qux"; }; bar = "qux"; foo = "bar"; }
|
4
tests/lang/eval-okay-attrs6.nix
Normal file
4
tests/lang/eval-okay-attrs6.nix
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
rec {
|
||||||
|
"${"foo"}" = "bar";
|
||||||
|
__overrides = { bar = "qux"; };
|
||||||
|
}
|
|
@ -31,6 +31,7 @@ nix_tests = \
|
||||||
nix-copy-ssh.sh \
|
nix-copy-ssh.sh \
|
||||||
post-hook.sh \
|
post-hook.sh \
|
||||||
function-trace.sh \
|
function-trace.sh \
|
||||||
|
recursive.sh \
|
||||||
flakes.sh
|
flakes.sh
|
||||||
# parallel.sh
|
# parallel.sh
|
||||||
|
|
||||||
|
@ -41,4 +42,3 @@ tests-environment = NIX_REMOTE= $(bash) -e
|
||||||
clean-files += $(d)/common.sh $(d)/config.nix
|
clean-files += $(d)/common.sh $(d)/config.nix
|
||||||
|
|
||||||
installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) $(d)/config.nix
|
installcheck: $(d)/common.sh $(d)/plugins/libplugintest.$(SO_EXT) $(d)/config.nix
|
||||||
|
|
||||||
|
|
72
tests/recursive.sh
Normal file
72
tests/recursive.sh
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
# FIXME
|
||||||
|
if [[ $(uname) != Linux ]]; then exit; fi
|
||||||
|
|
||||||
|
clearStore
|
||||||
|
|
||||||
|
export unreachable=$(nix add-to-store ./recursive.sh)
|
||||||
|
|
||||||
|
nix --experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
|
||||||
|
with import ./config.nix;
|
||||||
|
with import <nix/config.nix>;
|
||||||
|
mkDerivation {
|
||||||
|
name = "recursive";
|
||||||
|
dummy = builtins.toFile "dummy" "bla bla";
|
||||||
|
SHELL = shell;
|
||||||
|
|
||||||
|
# Note: this is a string without context.
|
||||||
|
unreachable = builtins.getEnv "unreachable";
|
||||||
|
|
||||||
|
requiredSystemFeatures = [ "recursive-nix" ];
|
||||||
|
|
||||||
|
buildCommand = '\'\''
|
||||||
|
mkdir $out
|
||||||
|
PATH=${nixBinDir}:$PATH
|
||||||
|
opts="--experimental-features nix-command"
|
||||||
|
|
||||||
|
# Check that we can query/build paths in our input closure.
|
||||||
|
nix $opts path-info $dummy
|
||||||
|
nix $opts build $dummy
|
||||||
|
|
||||||
|
# Make sure we cannot query/build paths not in out input closure.
|
||||||
|
[[ -e $unreachable ]]
|
||||||
|
(! nix $opts path-info $unreachable)
|
||||||
|
(! nix $opts build $unreachable)
|
||||||
|
|
||||||
|
# Add something to the store.
|
||||||
|
echo foobar > foobar
|
||||||
|
foobar=$(nix $opts add-to-store ./foobar)
|
||||||
|
|
||||||
|
nix $opts path-info $foobar
|
||||||
|
nix $opts build $foobar
|
||||||
|
|
||||||
|
# Add it to our closure.
|
||||||
|
ln -s $foobar $out/foobar
|
||||||
|
|
||||||
|
[[ $(nix $opts path-info --all | wc -l) -eq 3 ]]
|
||||||
|
|
||||||
|
# Build a derivation.
|
||||||
|
nix $opts build -L --impure --expr '\''
|
||||||
|
derivation {
|
||||||
|
name = "inner1";
|
||||||
|
builder = builtins.getEnv "SHELL";
|
||||||
|
system = builtins.getEnv "system";
|
||||||
|
fnord = builtins.toFile "fnord" "fnord";
|
||||||
|
args = [ "-c" "echo $fnord blaat > $out" ];
|
||||||
|
}
|
||||||
|
'\''
|
||||||
|
|
||||||
|
[[ $(nix $opts path-info --json ./result) =~ fnord ]]
|
||||||
|
|
||||||
|
ln -s $(nix $opts path-info ./result) $out/inner1
|
||||||
|
'\'\'';
|
||||||
|
}
|
||||||
|
'
|
||||||
|
|
||||||
|
[[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]]
|
||||||
|
|
||||||
|
# Make sure the recursively created paths are in the closure.
|
||||||
|
nix path-info -r $TEST_ROOT/result | grep foobar
|
||||||
|
nix path-info -r $TEST_ROOT/result | grep fnord
|
||||||
|
nix path-info -r $TEST_ROOT/result | grep inner1
|
|
@ -4,9 +4,9 @@ clearStore
|
||||||
|
|
||||||
max=500
|
max=500
|
||||||
|
|
||||||
reference=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
reference=$NIX_STORE_DIR/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bla
|
||||||
touch $reference
|
touch $reference
|
||||||
(echo $reference && echo && echo 0) | nix-store --register-validity
|
(echo $reference && echo && echo 0) | nix-store --register-validity
|
||||||
|
|
||||||
echo "making registration..."
|
echo "making registration..."
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue