Merge branch 'no-manifests'
This commit is contained in:
commit
15e1b2c223
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -35,8 +35,11 @@ Makefile.in
|
|||
|
||||
# /doc/manual/
|
||||
/doc/manual/manual.html
|
||||
/doc/manual/manual.xmli
|
||||
/doc/manual/manual.pdf
|
||||
/doc/manual/manual.is-valid
|
||||
/doc/manual/*.1
|
||||
/doc/manual/*.5
|
||||
/doc/manual/*.8
|
||||
/doc/manual/images
|
||||
/doc/manual/version.txt
|
||||
|
@ -60,6 +63,7 @@ Makefile.in
|
|||
/scripts/GeneratePatches.pm
|
||||
/scripts/download-using-manifests.pl
|
||||
/scripts/copy-from-other-stores.pl
|
||||
/scripts/download-from-binary-cache.pl
|
||||
/scripts/find-runtime-roots.pl
|
||||
/scripts/build-remote.pl
|
||||
/scripts/nix-reduce-build
|
||||
|
|
36
configure.ac
36
configure.ac
|
@ -25,12 +25,12 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
|||
|
||||
case "$host_os" in
|
||||
linux-gnu*)
|
||||
# For backward compatibility, strip the `-gnu' part.
|
||||
system="$machine_name-linux";;
|
||||
# For backward compatibility, strip the `-gnu' part.
|
||||
system="$machine_name-linux";;
|
||||
*)
|
||||
# Strip the version number from names such as `gnu0.3',
|
||||
# `darwin10.2.0', etc.
|
||||
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
|
||||
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
|
||||
esac])
|
||||
|
||||
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
|
||||
|
@ -40,7 +40,7 @@ case $sys_name in
|
|||
sys_name=cygwin
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
AC_MSG_RESULT($system)
|
||||
AC_SUBST(system)
|
||||
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
|
||||
|
@ -178,6 +178,7 @@ NEED_PROG(perl, perl)
|
|||
NEED_PROG(sed, sed)
|
||||
NEED_PROG(tar, tar)
|
||||
NEED_PROG(bzip2, bzip2)
|
||||
NEED_PROG(xz, xz)
|
||||
AC_PATH_PROG(dot, dot)
|
||||
AC_PATH_PROG(dblatex, dblatex)
|
||||
AC_PATH_PROG(gzip, gzip)
|
||||
|
@ -266,7 +267,7 @@ if test "$gc" = yes; then
|
|||
fi
|
||||
|
||||
|
||||
# Check for the required Perl dependencies (DBI and DBD::SQLite).
|
||||
# Check for the required Perl dependencies (DBI, DBD::SQLite and WWW::Curl).
|
||||
perlFlags="-I$perllibdir"
|
||||
|
||||
AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH],
|
||||
|
@ -277,13 +278,24 @@ AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH],
|
|||
[prefix of the Perl DBD::SQLite library]),
|
||||
perlFlags="$perlFlags -I$withval")
|
||||
|
||||
AC_ARG_WITH(www-curl, AC_HELP_STRING([--with-www-curl=PATH],
|
||||
[prefix of the Perl WWW::Curl library]),
|
||||
perlFlags="$perlFlags -I$withval")
|
||||
|
||||
AC_MSG_CHECKING([whether DBD::SQLite works])
|
||||
if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
|
||||
AC_MSG_RESULT(no)
|
||||
AC_MSG_FAILURE([The Perl modules DBI and/or DBD::SQLite are missing.])
|
||||
fi
|
||||
AC_MSG_RESULT(yes)
|
||||
|
||||
|
||||
AC_MSG_CHECKING([whether WWW::Curl works])
|
||||
if ! $perl $perlFlags -e 'use WWW::Curl;' 2>&5; then
|
||||
AC_MSG_RESULT(no)
|
||||
AC_MSG_FAILURE([The Perl module WWW::Curl is missing.])
|
||||
fi
|
||||
AC_MSG_RESULT(yes)
|
||||
|
||||
AC_SUBST(perlFlags)
|
||||
|
||||
|
||||
|
@ -327,6 +339,18 @@ eval dynlib_suffix=$shrext_cmds
|
|||
AC_SUBST(dynlib_suffix)
|
||||
|
||||
|
||||
# Do we have GNU tar?
|
||||
AC_MSG_CHECKING([if you have GNU tar])
|
||||
if $tar --version 2> /dev/null | grep -q GNU; then
|
||||
AC_MSG_RESULT(yes)
|
||||
tarFlags="--warning=no-timestamp"
|
||||
else
|
||||
AC_MSG_RESULT(no)
|
||||
fi
|
||||
AC_SUBST(tarFlags)
|
||||
|
||||
|
||||
|
||||
AM_CONFIG_HEADER([config.h])
|
||||
AC_CONFIG_FILES([Makefile
|
||||
src/Makefile
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
all-local: config.nix
|
||||
|
||||
files = nar.nix buildenv.nix buildenv.pl unpack-channel.nix unpack-channel.sh derivation.nix fetchurl.nix \
|
||||
files = nar.nix buildenv.nix buildenv.pl unpack-channel.nix derivation.nix fetchurl.nix \
|
||||
imported-drv-to-derivation.nix
|
||||
|
||||
install-exec-local:
|
||||
|
|
|
@ -6,8 +6,10 @@ in {
|
|||
perl = "@perl@";
|
||||
shell = "@shell@";
|
||||
coreutils = "@coreutils@";
|
||||
bzip2 = fromEnv "NIX_BZIP2" "@bzip2@";
|
||||
bzip2 = "@bzip2@";
|
||||
xz = "@xz@";
|
||||
tar = "@tar@";
|
||||
tarFlags = "@tarFlags@";
|
||||
tr = "@tr@";
|
||||
curl = "@curl@";
|
||||
nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
|
||||
|
|
|
@ -6,28 +6,37 @@ let
|
|||
''
|
||||
export PATH=${nixBinDir}:${coreutils}
|
||||
|
||||
if [ $compressionType = "xz" ]; then
|
||||
ext=xz
|
||||
compressor="${xz} -9"
|
||||
else
|
||||
ext=bz2
|
||||
compressor="${bzip2}"
|
||||
fi
|
||||
|
||||
echo "packing ‘$storePath’..."
|
||||
mkdir $out
|
||||
dst=$out/tmp.nar.bz2
|
||||
dst=$out/tmp.nar.$ext
|
||||
|
||||
set -o pipefail
|
||||
nix-store --dump "$storePath" | ${bzip2} > $dst
|
||||
nix-store --dump "$storePath" | $compressor > $dst
|
||||
|
||||
nix-hash --flat --type $hashAlgo --base32 $dst > $out/narbz2-hash
|
||||
hash=$(nix-hash --flat --type $hashAlgo --base32 $dst)
|
||||
echo -n $hash > $out/nar-compressed-hash
|
||||
|
||||
mv $out/tmp.nar.bz2 $out/$(cat $out/narbz2-hash).nar.bz2
|
||||
mv $dst $out/$hash.nar.$ext
|
||||
'';
|
||||
|
||||
in
|
||||
|
||||
{ storePath, hashAlgo }:
|
||||
{ storePath, hashAlgo, compressionType }:
|
||||
|
||||
derivation {
|
||||
name = "nar";
|
||||
system = builtins.currentSystem;
|
||||
builder = shell;
|
||||
args = [ "-e" builder ];
|
||||
inherit storePath hashAlgo;
|
||||
inherit storePath hashAlgo compressionType;
|
||||
|
||||
# Don't build in a chroot because Nix's dependencies may not be there.
|
||||
__noChroot = true;
|
||||
|
|
|
@ -1,14 +1,31 @@
|
|||
with import <nix/config.nix>;
|
||||
|
||||
{ name, channelName, src }:
|
||||
let
|
||||
|
||||
builder = builtins.toFile "unpack-channel.sh"
|
||||
''
|
||||
mkdir $out
|
||||
cd $out
|
||||
${bzip2} -d < $src | ${tar} xf - --warning=no-timestamp
|
||||
mv * $out/$channelName
|
||||
if [ -n "$binaryCacheURL" ]; then
|
||||
mkdir $out/binary-caches
|
||||
echo -n "$binaryCacheURL" > $out/binary-caches/$channelName
|
||||
fi
|
||||
'';
|
||||
|
||||
in
|
||||
|
||||
{ name, channelName, src, binaryCacheURL ? "" }:
|
||||
|
||||
derivation {
|
||||
system = builtins.currentSystem;
|
||||
builder = shell;
|
||||
args = [ "-e" ./unpack-channel.sh ];
|
||||
inherit name channelName src bzip2 tar tr;
|
||||
args = [ "-e" builder ];
|
||||
inherit name channelName src binaryCacheURL;
|
||||
|
||||
PATH = "${nixBinDir}:${coreutils}";
|
||||
|
||||
|
||||
# No point in doing this remotely.
|
||||
preferLocalBuild = true;
|
||||
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
mkdir $out
|
||||
cd $out
|
||||
$bzip2 -d < $src | $tar xf -
|
||||
mv * $out/$channelName
|
|
@ -30,6 +30,9 @@ gc-keep-derivations = true # Idem
|
|||
env-keep-derivations = false
|
||||
</programlisting>
|
||||
|
||||
<para>You can override settings using the <option>--option</option>
|
||||
flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
|
||||
|
||||
<para>The following settings are currently available:
|
||||
|
||||
<variablelist>
|
||||
|
@ -243,6 +246,16 @@ env-keep-derivations = false
|
|||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>build-fallback</literal></term>
|
||||
|
||||
<listitem><para>If set to <literal>true</literal>, Nix will fall
|
||||
back to building from source if a binary substitute fails. This
|
||||
is equivalent to the <option>--fallback</option> flag. The
|
||||
default is <literal>false</literal>.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry xml:id="conf-build-chroot-dirs"><term><literal>build-chroot-dirs</literal></term>
|
||||
|
||||
<listitem><para>When builds are performed in a chroot environment,
|
||||
|
@ -307,6 +320,50 @@ build-use-chroot = /dev /proc /bin</programlisting>
|
|||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>binary-caches</literal></term>
|
||||
|
||||
<listitem><para>A list of URLs of binary caches, separated by
|
||||
whitespace. The default is empty.<!-- The default is
|
||||
<literal>http://nixos.org/binary-cache</literal>. --></para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>binary-caches-files</literal></term>
|
||||
|
||||
<listitem><para>A list of names of files that will be read to
|
||||
obtain additional binary cache URLs. The default is
|
||||
<literal>/nix/var/nix/profiles/per-user/root/channels/binary-caches/*</literal>,
|
||||
which ensures that Nix will use the binary caches corresponding to
|
||||
the channels installed by root. Do not set this option to read
|
||||
files created by untrusted users!</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>trusted-binary-caches</literal></term>
|
||||
|
||||
<listitem><para>A list of URLs of binary caches, separated by
|
||||
whitespace. These are not used by default, but can be enabled by
|
||||
users of the Nix daemon by specifying <literal>--option
|
||||
binary-caches <replaceable>urls</replaceable></literal> on the
|
||||
command line. Daemon users are only allowed to pass a subset of
|
||||
the URLs listed in <literal>binary-caches</literal> and
|
||||
<literal>trusted-binary-caches</literal>.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>binary-caches-parallel-connections</literal></term>
|
||||
|
||||
<listitem><para>The maximum number of parallel HTTP connections
|
||||
used by the binary cache substituter to get NAR info files. This
|
||||
number should be high to minimise latency. It defaults to
|
||||
150.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
||||
<varlistentry><term><literal>system</literal></term>
|
||||
|
||||
<listitem><para>This option specifies the canonical Nix system
|
||||
|
|
|
@ -94,6 +94,11 @@
|
|||
<citerefentry><refentrytitle>nix.conf</refentrytitle><manvolnum>5</manvolnum></citerefentry>.</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>When using the Nix daemon, the <option>-s</option> flag in
|
||||
<command>nix-env -qa</command> is now much faster.</para>
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
|
||||
</section>
|
||||
|
|
|
@ -16,6 +16,7 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
|
|||
%endif
|
||||
BuildRequires: perl(DBD::SQLite)
|
||||
BuildRequires: perl(DBI)
|
||||
BuildRequires: perl(WWW::Curl)
|
||||
BuildRequires: perl(ExtUtils::ParseXS)
|
||||
Requires: /usr/bin/perl
|
||||
Requires: curl
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in
|
||||
PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in lib/Nix/Utils.pm
|
||||
|
||||
all: $(PERL_MODULES:.in=)
|
||||
|
||||
|
|
|
@ -1,27 +1,39 @@
|
|||
package Nix::Config;
|
||||
|
||||
$version = "@version@";
|
||||
|
||||
$binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
|
||||
$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
|
||||
$stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix";
|
||||
$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
|
||||
$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
|
||||
$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
|
||||
$storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@";
|
||||
|
||||
$bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@";
|
||||
$bzip2 = "@bzip2@";
|
||||
$xz = "@xz@";
|
||||
$curl = "@curl@";
|
||||
|
||||
$useBindings = "@perlbindings@" eq "yes";
|
||||
|
||||
%config = ();
|
||||
|
||||
sub readConfig {
|
||||
my %config;
|
||||
my $config = "@sysconfdir@/nix/nix.conf";
|
||||
if (defined $ENV{'_NIX_OPTIONS'}) {
|
||||
foreach my $s (split '\n', $ENV{'_NIX_OPTIONS'}) {
|
||||
my ($n, $v) = split '=', $s, 2;
|
||||
$config{$n} = $v;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
my $config = "$confDir/nix.conf";
|
||||
return unless -f $config;
|
||||
|
||||
|
||||
open CONFIG, "<$config" or die "cannot open `$config'";
|
||||
while (<CONFIG>) {
|
||||
/^\s*([\w|-]+)\s*=\s*(.*)$/ or next;
|
||||
$config{$1} = $2;
|
||||
print "|$1| -> |$2|\n";
|
||||
}
|
||||
close CONFIG;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ void doInit()
|
|||
{
|
||||
if (!store) {
|
||||
try {
|
||||
setDefaultsFromEnvironment();
|
||||
settings.processEnvironment();
|
||||
store = openStore();
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
|
|
19
perl/lib/Nix/Utils.pm
Normal file
19
perl/lib/Nix/Utils.pm
Normal file
|
@ -0,0 +1,19 @@
|
|||
package Nix::Utils;
|
||||
|
||||
$urlRE = "(?: [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*]+ )";
|
||||
|
||||
sub checkURL {
|
||||
my ($url) = @_;
|
||||
die "invalid URL ‘$url’\n" unless $url =~ /^ $urlRE $ /x;
|
||||
}
|
||||
|
||||
sub uniq {
|
||||
my %seen;
|
||||
my @res;
|
||||
foreach my $name (@_) {
|
||||
next if $seen{$name};
|
||||
$seen{$name} = 1;
|
||||
push @res, $name;
|
||||
}
|
||||
return @res;
|
||||
}
|
23
release.nix
23
release.nix
|
@ -29,6 +29,7 @@ let
|
|||
--with-xml-flags=--nonet
|
||||
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
|
||||
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
|
||||
--with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
|
||||
'';
|
||||
|
||||
postUnpack = ''
|
||||
|
@ -43,7 +44,7 @@ let
|
|||
|
||||
preDist = ''
|
||||
make -C doc/manual install prefix=$out
|
||||
|
||||
|
||||
make -C doc/manual manual.pdf prefix=$out
|
||||
cp doc/manual/manual.pdf $out/manual.pdf
|
||||
|
||||
|
@ -54,7 +55,7 @@ let
|
|||
# to Windows and Macs, so there should be no Linux binaries
|
||||
# in the closure).
|
||||
nuke-refs $out/manual.pdf
|
||||
|
||||
|
||||
echo "doc manual $out/share/doc/nix/manual" >> $out/nix-support/hydra-build-products
|
||||
echo "doc-pdf manual $out/manual.pdf" >> $out/nix-support/hydra-build-products
|
||||
echo "doc release-notes $out/share/doc/nix/release-notes" >> $out/nix-support/hydra-build-products
|
||||
|
@ -77,6 +78,7 @@ let
|
|||
--disable-init-state
|
||||
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
|
||||
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
|
||||
--with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
|
||||
--enable-gc
|
||||
'';
|
||||
|
||||
|
@ -134,12 +136,13 @@ let
|
|||
--disable-init-state
|
||||
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
|
||||
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
|
||||
--with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
|
||||
'';
|
||||
|
||||
dontInstall = false;
|
||||
|
||||
doInstallCheck = true;
|
||||
|
||||
|
||||
lcovFilter = [ "*/boost/*" "*-tab.*" ];
|
||||
|
||||
# We call `dot', and even though we just use it to
|
||||
|
@ -148,16 +151,16 @@ let
|
|||
FONTCONFIG_FILE = texFunctions.fontsConf;
|
||||
};
|
||||
|
||||
|
||||
|
||||
rpm_fedora13i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora13i386) 50;
|
||||
rpm_fedora13x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora13x86_64) 50;
|
||||
rpm_fedora16i386 = makeRPM_i686 (diskImageFuns: diskImageFuns.fedora16i386) 50;
|
||||
rpm_fedora16x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora16x86_64) 50;
|
||||
|
||||
|
||||
|
||||
deb_debian60i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian60i386) 50;
|
||||
deb_debian60x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian60x86_64) 50;
|
||||
|
||||
|
||||
deb_ubuntu1004i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1004i386) 50;
|
||||
deb_ubuntu1004x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1004x86_64) 50;
|
||||
deb_ubuntu1010i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1010i386) 50;
|
||||
|
@ -183,7 +186,7 @@ let
|
|||
makeRPM_i686 = makeRPM "i686-linux";
|
||||
makeRPM_x86_64 = makeRPM "x86_64-linux";
|
||||
|
||||
makeRPM =
|
||||
makeRPM =
|
||||
system: diskImageFun: prio:
|
||||
|
||||
with import nixpkgs { inherit system; };
|
||||
|
@ -192,7 +195,7 @@ let
|
|||
name = "nix-rpm-${diskImage.name}";
|
||||
src = jobs.tarball;
|
||||
diskImage = (diskImageFun vmTools.diskImageFuns)
|
||||
{ extraPackages = [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" ]; };
|
||||
{ extraPackages = [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "perl-WWW-Curl" ]; };
|
||||
memSize = 1024;
|
||||
meta.schedulingPriority = prio;
|
||||
postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck";
|
||||
|
@ -201,7 +204,7 @@ let
|
|||
|
||||
makeDeb_i686 = makeDeb "i686-linux";
|
||||
makeDeb_x86_64 = makeDeb "x86_64-linux";
|
||||
|
||||
|
||||
makeDeb =
|
||||
system: diskImageFun: prio:
|
||||
|
||||
|
@ -211,7 +214,7 @@ let
|
|||
name = "nix-deb";
|
||||
src = jobs.tarball;
|
||||
diskImage = (diskImageFun vmTools.diskImageFuns)
|
||||
{ extraPackages = [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" ]; };
|
||||
{ extraPackages = [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" "libwww-curl-perl" ]; };
|
||||
memSize = 1024;
|
||||
meta.schedulingPriority = prio;
|
||||
configureFlags = "--sysconfdir=/etc";
|
||||
|
|
|
@ -7,17 +7,14 @@ noinst_SCRIPTS = nix-profile.sh \
|
|||
find-runtime-roots.pl build-remote.pl nix-reduce-build \
|
||||
copy-from-other-stores.pl nix-http-export.cgi
|
||||
|
||||
nix-pull nix-push: download-using-manifests.pl
|
||||
|
||||
install-exec-local: download-using-manifests.pl copy-from-other-stores.pl find-runtime-roots.pl
|
||||
install-exec-local: download-using-manifests.pl copy-from-other-stores.pl download-from-binary-cache.pl find-runtime-roots.pl
|
||||
$(INSTALL) -d $(DESTDIR)$(sysconfdir)/profile.d
|
||||
$(INSTALL_DATA) nix-profile.sh $(DESTDIR)$(sysconfdir)/profile.d/nix.sh
|
||||
$(INSTALL) -d $(DESTDIR)$(libexecdir)/nix
|
||||
$(INSTALL_PROGRAM) find-runtime-roots.pl $(DESTDIR)$(libexecdir)/nix
|
||||
$(INSTALL_PROGRAM) build-remote.pl $(DESTDIR)$(libexecdir)/nix
|
||||
$(INSTALL) -d $(DESTDIR)$(libexecdir)/nix/substituters
|
||||
$(INSTALL_PROGRAM) download-using-manifests.pl $(DESTDIR)$(libexecdir)/nix/substituters
|
||||
$(INSTALL_PROGRAM) copy-from-other-stores.pl $(DESTDIR)$(libexecdir)/nix/substituters
|
||||
$(INSTALL_PROGRAM) download-using-manifests.pl copy-from-other-stores.pl download-from-binary-cache.pl $(DESTDIR)$(libexecdir)/nix/substituters
|
||||
$(INSTALL) -d $(DESTDIR)$(sysconfdir)/nix
|
||||
|
||||
include ../substitute.mk
|
||||
|
@ -29,6 +26,7 @@ EXTRA_DIST = nix-collect-garbage.in \
|
|||
nix-build.in \
|
||||
download-using-manifests.pl.in \
|
||||
copy-from-other-stores.pl.in \
|
||||
download-from-binary-cache.pl.in \
|
||||
nix-copy-closure.in \
|
||||
find-runtime-roots.pl.in \
|
||||
build-remote.pl.in \
|
||||
|
|
|
@ -36,42 +36,45 @@ sub findStorePath {
|
|||
if ($ARGV[0] eq "--query") {
|
||||
|
||||
while (<STDIN>) {
|
||||
my $cmd = $_; chomp $cmd;
|
||||
chomp;
|
||||
my ($cmd, @args) = split " ", $_;
|
||||
|
||||
if ($cmd eq "have") {
|
||||
my $storePath = <STDIN>; chomp $storePath;
|
||||
print STDOUT (defined findStorePath($storePath) ? "1\n" : "0\n");
|
||||
foreach my $storePath (@args) {
|
||||
print "$storePath\n" if defined findStorePath($storePath);
|
||||
}
|
||||
print "\n";
|
||||
}
|
||||
|
||||
elsif ($cmd eq "info") {
|
||||
my $storePath = <STDIN>; chomp $storePath;
|
||||
my ($store, $sourcePath) = findStorePath($storePath);
|
||||
if (!defined $store) {
|
||||
print "0\n";
|
||||
next; # not an error
|
||||
foreach my $storePath (@args) {
|
||||
my ($store, $sourcePath) = findStorePath($storePath);
|
||||
next unless defined $store;
|
||||
|
||||
$ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
|
||||
|
||||
my $deriver = `@bindir@/nix-store --query --deriver $storePath`;
|
||||
die "cannot query deriver of `$storePath'" if $? != 0;
|
||||
chomp $deriver;
|
||||
$deriver = "" if $deriver eq "unknown-deriver";
|
||||
|
||||
my @references = split "\n",
|
||||
`@bindir@/nix-store --query --references $storePath`;
|
||||
die "cannot query references of `$storePath'" if $? != 0;
|
||||
|
||||
my $narSize = `@bindir@/nix-store --query --size $storePath`;
|
||||
die "cannot query size of `$storePath'" if $? != 0;
|
||||
chomp $narSize;
|
||||
|
||||
print "$storePath\n";
|
||||
print "$deriver\n";
|
||||
print scalar @references, "\n";
|
||||
print "$_\n" foreach @references;
|
||||
print "$narSize\n";
|
||||
print "$narSize\n";
|
||||
}
|
||||
print "1\n";
|
||||
|
||||
$ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
|
||||
|
||||
my $deriver = `@bindir@/nix-store --query --deriver $storePath`;
|
||||
die "cannot query deriver of `$storePath'" if $? != 0;
|
||||
chomp $deriver;
|
||||
$deriver = "" if $deriver eq "unknown-deriver";
|
||||
|
||||
my @references = split "\n",
|
||||
`@bindir@/nix-store --query --references $storePath`;
|
||||
die "cannot query references of `$storePath'" if $? != 0;
|
||||
|
||||
my $narSize = `@bindir@/nix-store --query --size $storePath`;
|
||||
die "cannot query size of `$storePath'" if $? != 0;
|
||||
chomp $narSize;
|
||||
|
||||
print "$deriver\n";
|
||||
print scalar @references, "\n";
|
||||
print "$_\n" foreach @references;
|
||||
print "$narSize\n";
|
||||
print "$narSize\n";
|
||||
print "\n";
|
||||
}
|
||||
|
||||
else { die "unknown command `$cmd'"; }
|
||||
|
@ -84,9 +87,10 @@ elsif ($ARGV[0] eq "--substitute") {
|
|||
my $storePath = $ARGV[1];
|
||||
my ($store, $sourcePath) = findStorePath $storePath;
|
||||
die unless $store;
|
||||
print "\n*** Copying `$storePath' from `$sourcePath'\n\n";
|
||||
print STDERR "\n*** Copying `$storePath' from `$sourcePath'\n\n";
|
||||
system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $storePath") == 0
|
||||
or die "cannot copy `$sourcePath' to `$storePath'";
|
||||
print "\n"; # no hash to verify
|
||||
}
|
||||
|
||||
|
||||
|
|
537
scripts/download-from-binary-cache.pl.in
Normal file
537
scripts/download-from-binary-cache.pl.in
Normal file
|
@ -0,0 +1,537 @@
|
|||
#! @perl@ -w @perlFlags@
|
||||
|
||||
use DBI;
|
||||
use File::Basename;
|
||||
use IO::Select;
|
||||
use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Nix::Utils;
|
||||
use WWW::Curl::Easy;
|
||||
use WWW::Curl::Multi;
|
||||
use strict;
|
||||
|
||||
|
||||
Nix::Config::readConfig;
|
||||
|
||||
my @caches;
|
||||
my $gotCaches = 0;
|
||||
|
||||
my $maxParallelRequests = int($Nix::Config::config{"binary-caches-parallel-connections"} // 150);
|
||||
$maxParallelRequests = 1 if $maxParallelRequests < 1;
|
||||
|
||||
my $debug = ($ENV{"NIX_DEBUG_SUBST"} // "") eq 1;
|
||||
|
||||
my ($dbh, $queryCache, $insertNAR, $queryNAR, $insertNARExistence, $queryNARExistence);
|
||||
|
||||
my $curlm = WWW::Curl::Multi->new;
|
||||
my $activeRequests = 0;
|
||||
my $curlIdCount = 1;
|
||||
my %requests;
|
||||
my %scheduled;
|
||||
my $caBundle = $ENV{"CURL_CA_BUNDLE"} // $ENV{"OPENSSL_X509_CERT_FILE"};
|
||||
|
||||
|
||||
sub addRequest {
|
||||
my ($storePath, $url, $head) = @_;
|
||||
|
||||
my $curl = WWW::Curl::Easy->new;
|
||||
my $curlId = $curlIdCount++;
|
||||
$requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "", type => $head ? "HEAD" : "GET" };
|
||||
|
||||
$curl->setopt(CURLOPT_PRIVATE, $curlId);
|
||||
$curl->setopt(CURLOPT_URL, $url);
|
||||
$curl->setopt(CURLOPT_WRITEDATA, \$requests{$curlId}->{content});
|
||||
$curl->setopt(CURLOPT_FOLLOWLOCATION, 1);
|
||||
$curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle;
|
||||
$curl->setopt(CURLOPT_USERAGENT, "Nix/$Nix::Config::version");
|
||||
$curl->setopt(CURLOPT_NOBODY, 1) if $head;
|
||||
$curl->setopt(CURLOPT_FAILONERROR, 1);
|
||||
|
||||
if ($activeRequests >= $maxParallelRequests) {
|
||||
$scheduled{$curlId} = 1;
|
||||
} else {
|
||||
$curlm->add_handle($curl);
|
||||
$activeRequests++;
|
||||
}
|
||||
|
||||
return $requests{$curlId};
|
||||
}
|
||||
|
||||
|
||||
sub processRequests {
|
||||
while ($activeRequests) {
|
||||
my ($rfds, $wfds, $efds) = $curlm->fdset();
|
||||
#print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n";
|
||||
|
||||
# Sleep until we can read or write some data.
|
||||
if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) {
|
||||
IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 0.1);
|
||||
}
|
||||
|
||||
if ($curlm->perform() != $activeRequests) {
|
||||
while (my ($id, $result) = $curlm->info_read) {
|
||||
if ($id) {
|
||||
my $request = $requests{$id} or die;
|
||||
my $handle = $request->{handle};
|
||||
$request->{result} = $result;
|
||||
$request->{httpStatus} = $handle->getinfo(CURLINFO_RESPONSE_CODE);
|
||||
|
||||
print STDERR "$request->{type} on $request->{url} [$request->{result}, $request->{httpStatus}]\n" if $debug;
|
||||
|
||||
$activeRequests--;
|
||||
delete $request->{handle};
|
||||
|
||||
if (scalar(keys %scheduled) > 0) {
|
||||
my $id2 = (keys %scheduled)[0];
|
||||
$curlm->add_handle($requests{$id2}->{handle});
|
||||
$activeRequests++;
|
||||
delete $scheduled{$id2};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub initCache {
|
||||
my $dbPath = "$Nix::Config::stateDir/binary-cache-v1.sqlite";
|
||||
|
||||
# Open/create the database.
|
||||
$dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
|
||||
or die "cannot open database `$dbPath'";
|
||||
$dbh->{RaiseError} = 1;
|
||||
$dbh->{PrintError} = 0;
|
||||
|
||||
$dbh->do("pragma synchronous = off"); # we can always reproduce the cache
|
||||
$dbh->do("pragma journal_mode = truncate");
|
||||
|
||||
# Initialise the database schema, if necessary.
|
||||
$dbh->do(<<EOF);
|
||||
create table if not exists BinaryCaches (
|
||||
id integer primary key autoincrement not null,
|
||||
url text unique not null,
|
||||
timestamp integer not null,
|
||||
storeDir text not null,
|
||||
wantMassQuery integer not null
|
||||
);
|
||||
EOF
|
||||
|
||||
$dbh->do(<<EOF);
|
||||
create table if not exists NARs (
|
||||
cache integer not null,
|
||||
storePath text not null,
|
||||
url text not null,
|
||||
compression text not null,
|
||||
fileHash text,
|
||||
fileSize integer,
|
||||
narHash text,
|
||||
narSize integer,
|
||||
refs text,
|
||||
deriver text,
|
||||
system text,
|
||||
timestamp integer not null,
|
||||
primary key (cache, storePath),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
EOF
|
||||
|
||||
$dbh->do(<<EOF);
|
||||
create table if not exists NARExistence (
|
||||
cache integer not null,
|
||||
storePath text not null,
|
||||
exist integer not null,
|
||||
timestamp integer not null,
|
||||
primary key (cache, storePath),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
EOF
|
||||
|
||||
$queryCache = $dbh->prepare("select id, storeDir, wantMassQuery from BinaryCaches where url = ?") or die;
|
||||
|
||||
$insertNAR = $dbh->prepare(
|
||||
"insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " .
|
||||
"narSize, refs, deriver, system, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
|
||||
|
||||
$queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die;
|
||||
|
||||
$insertNARExistence = $dbh->prepare(
|
||||
"insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)") or die;
|
||||
|
||||
$queryNARExistence = $dbh->prepare("select exist from NARExistence where cache = ? and storePath = ?") or die;
|
||||
}
|
||||
|
||||
|
||||
sub getAvailableCaches {
|
||||
return if $gotCaches;
|
||||
$gotCaches = 1;
|
||||
|
||||
sub strToList {
|
||||
my ($s) = @_;
|
||||
return map { s/\/+$//; $_ } split(/ /, $s);
|
||||
}
|
||||
|
||||
my @urls = strToList ($Nix::Config::config{"binary-caches"} // "");
|
||||
# // ($Nix::Config::storeDir eq "/nix/store" ? "http://nixos.org/binary-cache" : ""));
|
||||
|
||||
my $urlsFiles = $Nix::Config::config{"binary-cache-files"}
|
||||
// "/nix/var/nix/profiles/per-user/root/channels/binary-caches/*";
|
||||
foreach my $urlFile (glob $urlsFiles) {
|
||||
next unless -f $urlFile;
|
||||
open FILE, "<$urlFile" or die "cannot open ‘$urlFile’\n";
|
||||
my $url = <FILE>; chomp $url;
|
||||
close FILE;
|
||||
push @urls, strToList($url);
|
||||
}
|
||||
|
||||
# Allow Nix daemon users to override the binary caches to a subset
|
||||
# of those listed in the config file. Note that ‘untrusted-*’
|
||||
# denotes options passed by the client.
|
||||
if (defined $Nix::Config::config{"untrusted-binary-caches"}) {
|
||||
my @untrustedUrls = strToList $Nix::Config::config{"untrusted-binary-caches"};
|
||||
my @trustedUrls = (@urls, strToList($Nix::Config::config{"trusted-binary-caches"} // ""));
|
||||
@urls = ();
|
||||
foreach my $url (@untrustedUrls) {
|
||||
die "binary cache ‘$url’ is not trusted (please add it to ‘trusted-binary-caches’ in $Nix::Config::confDir/nix.conf)\n"
|
||||
unless grep { $url eq $_ } @trustedUrls > 0;
|
||||
push @urls, $url;
|
||||
}
|
||||
}
|
||||
|
||||
foreach my $url (Nix::Utils::uniq @urls) {
|
||||
|
||||
# FIXME: not atomic.
|
||||
$queryCache->execute($url);
|
||||
my $res = $queryCache->fetchrow_hashref();
|
||||
if (defined $res) {
|
||||
next if $res->{storeDir} ne $Nix::Config::storeDir;
|
||||
push @caches, { id => $res->{id}, url => $url, wantMassQuery => $res->{wantMassQuery} };
|
||||
next;
|
||||
}
|
||||
|
||||
# Get the cache info file.
|
||||
my $request = addRequest(undef, $url . "/nix-cache-info");
|
||||
processRequests;
|
||||
|
||||
if ($request->{result} != 0) {
|
||||
print STDERR "could not download ‘$request->{url}’ (" .
|
||||
($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
|
||||
next;
|
||||
}
|
||||
|
||||
my $storeDir = "/nix/store";
|
||||
my $wantMassQuery = 0;
|
||||
foreach my $line (split "\n", $request->{content}) {
|
||||
unless ($line =~ /^(.*): (.*)$/) {
|
||||
print STDERR "bad cache info file ‘$request->{url}’\n";
|
||||
return undef;
|
||||
}
|
||||
if ($1 eq "StoreDir") { $storeDir = $2; }
|
||||
elsif ($1 eq "WantMassQuery") { $wantMassQuery = int($2); }
|
||||
}
|
||||
|
||||
$dbh->do("insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery) values (?, ?, ?, ?)",
|
||||
{}, $url, time(), $storeDir, $wantMassQuery);
|
||||
my $id = $dbh->last_insert_id("", "", "", "");
|
||||
next if $storeDir ne $Nix::Config::storeDir;
|
||||
push @caches, { id => $id, url => $url, wantMassQuery => $wantMassQuery };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub processNARInfo {
|
||||
my ($storePath, $cache, $request) = @_;
|
||||
|
||||
if ($request->{result} != 0) {
|
||||
if ($request->{result} != 37 && $request->{httpStatus} != 404) {
|
||||
print STDERR "could not download ‘$request->{url}’ (" .
|
||||
($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
|
||||
} else {
|
||||
$insertNARExistence->execute($cache->{id}, basename($storePath), 0, time())
|
||||
unless $request->{url} =~ /^file:/;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system);
|
||||
my $compression = "bzip2";
|
||||
my @refs;
|
||||
foreach my $line (split "\n", $request->{content}) {
|
||||
unless ($line =~ /^(.*): (.*)$/) {
|
||||
print STDERR "bad NAR info file ‘$request->{url}’\n";
|
||||
return undef;
|
||||
}
|
||||
if ($1 eq "StorePath") { $storePath2 = $2; }
|
||||
elsif ($1 eq "URL") { $url = $2; }
|
||||
elsif ($1 eq "Compression") { $compression = $2; }
|
||||
elsif ($1 eq "FileHash") { $fileHash = $2; }
|
||||
elsif ($1 eq "FileSize") { $fileSize = int($2); }
|
||||
elsif ($1 eq "NarHash") { $narHash = $2; }
|
||||
elsif ($1 eq "NarSize") { $narSize = int($2); }
|
||||
elsif ($1 eq "References") { @refs = split / /, $2; }
|
||||
elsif ($1 eq "Deriver") { $deriver = $2; }
|
||||
elsif ($1 eq "System") { $system = $2; }
|
||||
}
|
||||
return undef if $storePath ne $storePath2;
|
||||
if ($storePath ne $storePath2 || !defined $url || !defined $narHash) {
|
||||
print STDERR "bad NAR info file ‘$request->{url}’\n";
|
||||
return undef;
|
||||
}
|
||||
|
||||
# Cache the result.
|
||||
$insertNAR->execute(
|
||||
$cache->{id}, basename($storePath), $url, $compression, $fileHash, $fileSize,
|
||||
$narHash, $narSize, join(" ", @refs), $deriver, $system, time())
|
||||
unless $request->{url} =~ /^file:/;
|
||||
|
||||
return
|
||||
{ url => $url
|
||||
, compression => $compression
|
||||
, fileHash => $fileHash
|
||||
, fileSize => $fileSize
|
||||
, narHash => $narHash
|
||||
, narSize => $narSize
|
||||
, refs => [ @refs ]
|
||||
, deriver => $deriver
|
||||
, system => $system
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub getCachedInfoFrom {
|
||||
my ($storePath, $cache) = @_;
|
||||
|
||||
$queryNAR->execute($cache->{id}, basename($storePath));
|
||||
my $res = $queryNAR->fetchrow_hashref();
|
||||
return undef unless defined $res;
|
||||
|
||||
return
|
||||
{ url => $res->{url}
|
||||
, compression => $res->{compression}
|
||||
, fileHash => $res->{fileHash}
|
||||
, fileSize => $res->{fileSize}
|
||||
, narHash => $res->{narHash}
|
||||
, narSize => $res->{narSize}
|
||||
, refs => [ split " ", $res->{refs} ]
|
||||
, deriver => $res->{deriver}
|
||||
} if defined $res;
|
||||
}
|
||||
|
||||
|
||||
sub negativeHit {
|
||||
my ($storePath, $cache) = @_;
|
||||
$queryNARExistence->execute($cache->{id}, basename($storePath));
|
||||
my $res = $queryNARExistence->fetchrow_hashref();
|
||||
return defined $res && $res->{exist} == 0;
|
||||
}
|
||||
|
||||
|
||||
sub positiveHit {
|
||||
my ($storePath, $cache) = @_;
|
||||
return 1 if defined getCachedInfoFrom($storePath, $cache);
|
||||
$queryNARExistence->execute($cache->{id}, basename($storePath));
|
||||
my $res = $queryNARExistence->fetchrow_hashref();
|
||||
return defined $res && $res->{exist} == 1;
|
||||
}
|
||||
|
||||
|
||||
sub printInfo {
|
||||
my ($storePath, $info) = @_;
|
||||
print "$storePath\n";
|
||||
print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n";
|
||||
print scalar @{$info->{refs}}, "\n";
|
||||
print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}};
|
||||
print $info->{fileSize} || 0, "\n";
|
||||
print $info->{narSize} || 0, "\n";
|
||||
}
|
||||
|
||||
|
||||
sub infoUrl {
|
||||
my ($binaryCacheUrl, $storePath) = @_;
|
||||
my $pathHash = substr(basename($storePath), 0, 32);
|
||||
my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo";
|
||||
}
|
||||
|
||||
|
||||
sub printInfoParallel {
|
||||
my @paths = @_;
|
||||
|
||||
# First print all paths for which we have cached info.
|
||||
my @left;
|
||||
foreach my $storePath (@paths) {
|
||||
my $found = 0;
|
||||
foreach my $cache (@caches) {
|
||||
my $info = getCachedInfoFrom($storePath, $cache);
|
||||
if (defined $info) {
|
||||
printInfo($storePath, $info);
|
||||
$found = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
push @left, $storePath if !$found;
|
||||
}
|
||||
|
||||
return if scalar @left == 0;
|
||||
|
||||
foreach my $cache (@caches) {
|
||||
|
||||
my @left2;
|
||||
%requests = ();
|
||||
foreach my $storePath (@left) {
|
||||
if (negativeHit($storePath, $cache)) {
|
||||
push @left2, $storePath;
|
||||
next;
|
||||
}
|
||||
addRequest($storePath, infoUrl($cache->{url}, $storePath));
|
||||
}
|
||||
|
||||
processRequests;
|
||||
|
||||
foreach my $request (values %requests) {
|
||||
my $info = processNARInfo($request->{storePath}, $cache, $request);
|
||||
if (defined $info) {
|
||||
printInfo($request->{storePath}, $info);
|
||||
} else {
|
||||
push @left2, $request->{storePath};
|
||||
}
|
||||
}
|
||||
|
||||
@left = @left2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub printSubstitutablePaths {
|
||||
my @paths = @_;
|
||||
|
||||
# First look for paths that have cached info.
|
||||
my @left;
|
||||
foreach my $storePath (@paths) {
|
||||
my $found = 0;
|
||||
foreach my $cache (@caches) {
|
||||
next unless $cache->{wantMassQuery};
|
||||
if (positiveHit($storePath, $cache)) {
|
||||
print "$storePath\n";
|
||||
$found = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
push @left, $storePath if !$found;
|
||||
}
|
||||
|
||||
return if scalar @left == 0;
|
||||
|
||||
# For remaining paths, do HEAD requests.
|
||||
foreach my $cache (@caches) {
|
||||
next unless $cache->{wantMassQuery};
|
||||
my @left2;
|
||||
%requests = ();
|
||||
foreach my $storePath (@left) {
|
||||
if (negativeHit($storePath, $cache)) {
|
||||
push @left2, $storePath;
|
||||
next;
|
||||
}
|
||||
addRequest($storePath, infoUrl($cache->{url}, $storePath), 1);
|
||||
}
|
||||
|
||||
processRequests;
|
||||
|
||||
foreach my $request (values %requests) {
|
||||
if ($request->{result} != 0) {
|
||||
if ($request->{result} != 37 && $request->{httpStatus} != 404) {
|
||||
print STDERR "could not check ‘$request->{url}’ (" .
|
||||
($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
|
||||
} else {
|
||||
$insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 0, time())
|
||||
unless $request->{url} =~ /^file:/;
|
||||
}
|
||||
push @left2, $request->{storePath};
|
||||
} else {
|
||||
$insertNARExistence->execute($cache->{id}, basename($request->{storePath}), 1, time())
|
||||
unless $request->{url} =~ /^file:/;
|
||||
print "$request->{storePath}\n";
|
||||
}
|
||||
}
|
||||
|
||||
@left = @left2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub downloadBinary {
|
||||
my ($storePath) = @_;
|
||||
|
||||
foreach my $cache (@caches) {
|
||||
my $info = getCachedInfoFrom($storePath, $cache);
|
||||
|
||||
unless (defined $info) {
|
||||
next if negativeHit($storePath, $cache);
|
||||
my $request = addRequest($storePath, infoUrl($cache->{url}, $storePath));
|
||||
processRequests;
|
||||
$info = processNARInfo($storePath, $cache, $request);
|
||||
}
|
||||
|
||||
next unless defined $info;
|
||||
|
||||
my $decompressor;
|
||||
if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; }
|
||||
elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; }
|
||||
else {
|
||||
print STDERR "unknown compression method ‘$info->{compression}’\n";
|
||||
next;
|
||||
}
|
||||
my $url = "$cache->{url}/$info->{url}"; # FIXME: handle non-relative URLs
|
||||
print STDERR "\n*** Downloading ‘$url’ into ‘$storePath’...\n";
|
||||
Nix::Utils::checkURL $url;
|
||||
if (system("$Nix::Config::curl --fail --location --insecure '$url' | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) {
|
||||
die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
|
||||
next;
|
||||
}
|
||||
|
||||
# Tell Nix about the expected hash so it can verify it.
|
||||
print "$info->{narHash}\n";
|
||||
|
||||
print STDERR "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
print STDERR "could not download ‘$storePath’ from any binary cache\n";
|
||||
}
|
||||
|
||||
|
||||
initCache();
|
||||
|
||||
|
||||
if ($ARGV[0] eq "--query") {
|
||||
|
||||
while (<STDIN>) {
|
||||
getAvailableCaches;
|
||||
chomp;
|
||||
my ($cmd, @args) = split " ", $_;
|
||||
|
||||
if ($cmd eq "have") {
|
||||
printSubstitutablePaths(@args);
|
||||
print "\n";
|
||||
}
|
||||
|
||||
elsif ($cmd eq "info") {
|
||||
printInfoParallel(@args);
|
||||
print "\n";
|
||||
}
|
||||
|
||||
else { die "unknown command `$cmd'"; }
|
||||
|
||||
flush STDOUT;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
elsif ($ARGV[0] eq "--substitute") {
|
||||
my $storePath = $ARGV[1] or die;
|
||||
getAvailableCaches;
|
||||
downloadBinary($storePath);
|
||||
}
|
||||
|
||||
else {
|
||||
die;
|
||||
}
|
|
@ -4,6 +4,7 @@ use strict;
|
|||
use Nix::Config;
|
||||
use Nix::Manifest;
|
||||
use Nix::Store;
|
||||
use Nix::Utils;
|
||||
use POSIX qw(strftime);
|
||||
use File::Temp qw(tempdir);
|
||||
|
||||
|
@ -15,6 +16,9 @@ my $logFile = "$Nix::Config::logDir/downloads";
|
|||
# estimating the expected download size.
|
||||
my $fast = 1;
|
||||
|
||||
# ‘--insecure’ is fine because Nix verifies the hash of the result.
|
||||
my $curl = "$Nix::Config::curl --fail --location --insecure";
|
||||
|
||||
|
||||
# Open the manifest cache and update it if necessary.
|
||||
my $dbh = updateManifestDB();
|
||||
|
@ -38,7 +42,7 @@ sub parseHash {
|
|||
# given path.
|
||||
sub computeSmallestDownload {
|
||||
my $targetPath = shift;
|
||||
|
||||
|
||||
# Build a graph of all store paths that might contribute to the
|
||||
# construction of $targetPath, and the special node "start". The
|
||||
# edges are either patch operations, or downloads of full NAR
|
||||
|
@ -93,7 +97,7 @@ sub computeSmallestDownload {
|
|||
my $patchList = $dbh->selectall_arrayref(
|
||||
"select * from Patches where storePath = ?",
|
||||
{ Slice => {} }, $u);
|
||||
|
||||
|
||||
foreach my $patch (@{$patchList}) {
|
||||
if (isValidPath($patch->{basePath})) {
|
||||
my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
|
||||
|
@ -106,7 +110,7 @@ sub computeSmallestDownload {
|
|||
$hash =~ s/.*://;
|
||||
$hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
|
||||
}
|
||||
|
||||
|
||||
next if $hash ne $baseHash;
|
||||
}
|
||||
push @queue, $patch->{basePath};
|
||||
|
@ -117,7 +121,7 @@ sub computeSmallestDownload {
|
|||
my $narFileList = $dbh->selectall_arrayref(
|
||||
"select * from NARs where storePath = ?",
|
||||
{ Slice => {} }, $u);
|
||||
|
||||
|
||||
foreach my $narFile (@{$narFileList}) {
|
||||
# !!! how to handle files whose size is not known in advance?
|
||||
# For now, assume some arbitrary size (1 GB).
|
||||
|
@ -173,58 +177,56 @@ sub computeSmallestDownload {
|
|||
if ($ARGV[0] eq "--query") {
|
||||
|
||||
while (<STDIN>) {
|
||||
my $cmd = $_; chomp $cmd;
|
||||
chomp;
|
||||
my ($cmd, @args) = split " ", $_;
|
||||
|
||||
if ($cmd eq "have") {
|
||||
my $storePath = <STDIN>; chomp $storePath;
|
||||
print STDOUT (
|
||||
scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0
|
||||
? "1\n" : "0\n");
|
||||
foreach my $storePath (@args) {
|
||||
print "$storePath\n" if scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0;
|
||||
}
|
||||
print "\n";
|
||||
}
|
||||
|
||||
elsif ($cmd eq "info") {
|
||||
my $storePath = <STDIN>; chomp $storePath;
|
||||
foreach my $storePath (@args) {
|
||||
|
||||
my $infos = $dbh->selectall_arrayref(
|
||||
"select * from NARs where storePath = ?",
|
||||
{ Slice => {} }, $storePath);
|
||||
|
||||
my $info;
|
||||
if (scalar @{$infos} > 0) {
|
||||
$info = @{$infos}[0];
|
||||
}
|
||||
else {
|
||||
print "0\n";
|
||||
next; # not an error
|
||||
}
|
||||
my $infos = $dbh->selectall_arrayref(
|
||||
"select * from NARs where storePath = ?",
|
||||
{ Slice => {} }, $storePath);
|
||||
|
||||
print "1\n";
|
||||
print "$info->{deriver}\n";
|
||||
my @references = split " ", $info->{refs};
|
||||
print scalar @references, "\n";
|
||||
print "$_\n" foreach @references;
|
||||
next unless scalar @{$infos} > 0;
|
||||
my $info = @{$infos}[0];
|
||||
|
||||
my @path = computeSmallestDownload $storePath;
|
||||
print "$storePath\n";
|
||||
print "$info->{deriver}\n";
|
||||
my @references = split " ", $info->{refs};
|
||||
print scalar @references, "\n";
|
||||
print "$_\n" foreach @references;
|
||||
|
||||
my $downloadSize = 0;
|
||||
while (scalar @path > 0) {
|
||||
my $edge = pop @path;
|
||||
my $u = $edge->{start};
|
||||
my $v = $edge->{end};
|
||||
if ($edge->{type} eq "patch") {
|
||||
$downloadSize += $edge->{info}->{size} || 0;
|
||||
}
|
||||
elsif ($edge->{type} eq "narfile") {
|
||||
$downloadSize += $edge->{info}->{size} || 0;
|
||||
my @path = computeSmallestDownload $storePath;
|
||||
|
||||
my $downloadSize = 0;
|
||||
while (scalar @path > 0) {
|
||||
my $edge = pop @path;
|
||||
my $u = $edge->{start};
|
||||
my $v = $edge->{end};
|
||||
if ($edge->{type} eq "patch") {
|
||||
$downloadSize += $edge->{info}->{size} || 0;
|
||||
}
|
||||
elsif ($edge->{type} eq "narfile") {
|
||||
$downloadSize += $edge->{info}->{size} || 0;
|
||||
}
|
||||
}
|
||||
|
||||
print "$downloadSize\n";
|
||||
|
||||
my $narSize = $info->{narSize} || 0;
|
||||
print "$narSize\n";
|
||||
}
|
||||
|
||||
print "$downloadSize\n";
|
||||
|
||||
my $narSize = $info->{narSize} || 0;
|
||||
print "$narSize\n";
|
||||
print "\n";
|
||||
}
|
||||
|
||||
|
||||
else { die "unknown command `$cmd'"; }
|
||||
}
|
||||
|
||||
|
@ -273,16 +275,6 @@ $dbh->disconnect;
|
|||
my $curStep = 1;
|
||||
my $maxStep = scalar @path;
|
||||
|
||||
sub downloadFile {
|
||||
my $url = shift;
|
||||
$ENV{"PRINT_PATH"} = 1;
|
||||
$ENV{"QUIET"} = 1;
|
||||
my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
|
||||
die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
|
||||
chomp $path;
|
||||
return $path;
|
||||
}
|
||||
|
||||
my $finalNarHash;
|
||||
|
||||
while (scalar @path > 0) {
|
||||
|
@ -314,13 +306,16 @@ while (scalar @path > 0) {
|
|||
|
||||
# Download the patch.
|
||||
print STDERR " downloading patch...\n";
|
||||
my $patchPath = downloadFile "$patch->{url}";
|
||||
my $patchPath = "$tmpDir/patch";
|
||||
Nix::Utils::checkURL $patch->{url};
|
||||
system("$curl '$patch->{url}' -o $patchPath") == 0
|
||||
or die "cannot download patch `$patch->{url}'\n";
|
||||
|
||||
# Apply the patch to the NAR archive produced in step 1 (for
|
||||
# the already present path) or a later step (for patch sequences).
|
||||
print STDERR " applying patch...\n";
|
||||
system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0
|
||||
or die "cannot apply patch `$patchPath' to $tmpNar";
|
||||
or die "cannot apply patch `$patchPath' to $tmpNar\n";
|
||||
|
||||
if ($curStep < $maxStep) {
|
||||
# The archive will be used as the base of the next patch.
|
||||
|
@ -330,7 +325,7 @@ while (scalar @path > 0) {
|
|||
# into the target path.
|
||||
print STDERR " unpacking patched archive...\n";
|
||||
system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0
|
||||
or die "cannot unpack $tmpNar2 into `$v'";
|
||||
or die "cannot unpack $tmpNar2 into `$v'\n";
|
||||
}
|
||||
|
||||
$finalNarHash = $patch->{narHash};
|
||||
|
@ -342,20 +337,16 @@ while (scalar @path > 0) {
|
|||
|
||||
my $size = $narFile->{size} || -1;
|
||||
print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
|
||||
|
||||
# Download the archive.
|
||||
print STDERR " downloading archive...\n";
|
||||
my $narFilePath = downloadFile "$narFile->{url}";
|
||||
|
||||
Nix::Utils::checkURL $narFile->{url};
|
||||
if ($curStep < $maxStep) {
|
||||
# The archive will be used a base to a patch.
|
||||
system("$Nix::Config::bzip2 -d < '$narFilePath' > $tmpNar") == 0
|
||||
or die "cannot unpack `$narFilePath' into `$v'";
|
||||
system("$curl '$narFile->{url}' | $Nix::Config::bzip2 -d > $tmpNar") == 0
|
||||
or die "cannot download and unpack `$narFile->{url}' into `$v'\n";
|
||||
} else {
|
||||
# Unpack the archive into the target path.
|
||||
print STDERR " unpacking archive...\n";
|
||||
system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0
|
||||
or die "cannot unpack `$narFilePath' into `$v'";
|
||||
system("$curl '$narFile->{url}' | $Nix::Config::bzip2 -d | $Nix::Config::binDir/nix-store --restore '$v'") == 0
|
||||
or die "cannot download and unpack `$narFile->{url}' into `$v'\n";
|
||||
}
|
||||
|
||||
$finalNarHash = $narFile->{narHash};
|
||||
|
@ -365,21 +356,10 @@ while (scalar @path > 0) {
|
|||
}
|
||||
|
||||
|
||||
# Make sure that the hash declared in the manifest matches what we
|
||||
# downloaded and unpacked.
|
||||
|
||||
if (defined $finalNarHash) {
|
||||
my ($hashAlgo, $hash) = parseHash $finalNarHash;
|
||||
|
||||
# The hash in the manifest can be either in base-16 or base-32.
|
||||
# Handle both.
|
||||
my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath);
|
||||
|
||||
die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n"
|
||||
if $hash ne $hash2;
|
||||
} else {
|
||||
die "cannot check integrity of the downloaded path since its hash is not known\n";
|
||||
}
|
||||
# Tell Nix about the expected hash so it can verify it.
|
||||
die "cannot check integrity of the downloaded path since its hash is not known\n"
|
||||
unless defined $finalNarHash;
|
||||
print "$finalNarHash\n";
|
||||
|
||||
|
||||
print STDERR "\n";
|
||||
|
|
|
@ -58,6 +58,11 @@ EOF
|
|||
# '` hack
|
||||
}
|
||||
|
||||
elsif ($arg eq "--version") {
|
||||
print "nix-build (Nix) $Nix::Config::version\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
elsif ($arg eq "--add-drv-link") {
|
||||
$drvLink = "./derivation";
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ my $nixDefExpr = "$home/.nix-defexpr";
|
|||
my $userName = getpwuid($<) or die "cannot figure out user name";
|
||||
my $profile = "$Nix::Config::stateDir/profiles/per-user/$userName/channels";
|
||||
mkpath(dirname $profile, 0, 0755);
|
||||
|
||||
|
||||
my %channels;
|
||||
|
||||
|
||||
|
@ -77,20 +77,14 @@ sub removeChannel {
|
|||
# channels.
|
||||
sub update {
|
||||
my @channelNames = @_;
|
||||
|
||||
|
||||
readChannels;
|
||||
|
||||
# Create the manifests directory if it doesn't exist.
|
||||
mkdir $manifestDir, 0755 unless -e $manifestDir;
|
||||
|
||||
# Do we have write permission to the manifests directory?
|
||||
die "$0: you do not have write permission to `$manifestDir'!\n" unless -W $manifestDir;
|
||||
|
||||
# Download each channel.
|
||||
my $exprs = "";
|
||||
foreach my $name (keys %channels) {
|
||||
next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames};
|
||||
|
||||
|
||||
my $url = $channels{$name};
|
||||
my $origUrl = "$url/MANIFEST";
|
||||
|
||||
|
@ -101,11 +95,20 @@ sub update {
|
|||
die "$0: unable to check `$url'\n" if $? != 0;
|
||||
$headers =~ s/\r//g;
|
||||
$url = $1 if $headers =~ /^Location:\s*(.*)\s*$/m;
|
||||
|
||||
# Pull the channel manifest.
|
||||
$ENV{'NIX_ORIG_URL'} = $origUrl;
|
||||
system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0
|
||||
or die "cannot pull manifest from `$url'\n";
|
||||
|
||||
# Check if the channel advertises a binary cache.
|
||||
my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`;
|
||||
my $extraAttrs = "";
|
||||
if ($? == 0 && $binaryCacheURL ne "") {
|
||||
$extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; ";
|
||||
} else {
|
||||
# No binary cache, so pull the channel manifest.
|
||||
mkdir $manifestDir, 0755 unless -e $manifestDir;
|
||||
die "$0: you do not have write permission to `$manifestDir'!\n" unless -W $manifestDir;
|
||||
$ENV{'NIX_ORIG_URL'} = $origUrl;
|
||||
system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0
|
||||
or die "cannot pull manifest from `$url'\n";
|
||||
}
|
||||
|
||||
# Download the channel tarball.
|
||||
my $fullURL = "$url/nixexprs.tar.bz2";
|
||||
|
@ -120,7 +123,7 @@ sub update {
|
|||
my $cname = $name;
|
||||
$cname .= $1 if basename($url) =~ /(-\d.*)$/;
|
||||
|
||||
$exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; }' ";
|
||||
$exprs .= "'f: f { name = \"$cname\"; channelName = \"$name\"; src = builtins.storePath \"$path\"; $extraAttrs }' ";
|
||||
}
|
||||
|
||||
# Unpack the channel tarballs into the Nix store and install them
|
||||
|
@ -189,11 +192,16 @@ while (scalar @ARGV) {
|
|||
update(@ARGV);
|
||||
last;
|
||||
}
|
||||
|
||||
|
||||
elsif ($arg eq "--help") {
|
||||
usageError;
|
||||
}
|
||||
|
||||
elsif ($arg eq "--version") {
|
||||
print "nix-channel (Nix) $Nix::Config::version\n";
|
||||
exit 0;
|
||||
}
|
||||
|
||||
else {
|
||||
die "unknown argument `$arg'; try `--help'";
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
use strict;
|
||||
use File::Temp qw(tempdir);
|
||||
use Nix::Config;
|
||||
use Nix::Utils;
|
||||
|
||||
|
||||
sub usageError {
|
||||
|
@ -72,7 +73,7 @@ my $tmpDir = tempdir("nix-install-package.XXXXXX", CLEANUP => 1, TMPDIR => 1)
|
|||
|
||||
sub barf {
|
||||
my $msg = shift;
|
||||
print "$msg\n";
|
||||
print "\nInstallation failed: $msg\n";
|
||||
<STDIN> if $interactive;
|
||||
exit 1;
|
||||
}
|
||||
|
@ -92,7 +93,6 @@ open PKGFILE, "<$pkgFile" or barf "cannot open `$pkgFile': $!";
|
|||
my $contents = <PKGFILE>;
|
||||
close PKGFILE;
|
||||
|
||||
my $urlRE = "(?: [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ )";
|
||||
my $nameRE = "(?: [A-Za-z0-9\+\-\.\_\?\=]+ )"; # see checkStoreName()
|
||||
my $systemRE = "(?: [A-Za-z0-9\+\-\_]+ )";
|
||||
my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )";
|
||||
|
@ -101,7 +101,7 @@ my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )";
|
|||
# store path. We'll let nix-env do that.
|
||||
|
||||
$contents =~
|
||||
/ ^ \s* (\S+) \s+ ($urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) /x
|
||||
/ ^ \s* (\S+) \s+ ($Nix::Utils::urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )? /x
|
||||
or barf "invalid package contents";
|
||||
my $version = $1;
|
||||
my $manifestURL = $2;
|
||||
|
@ -109,6 +109,7 @@ my $drvName = $3;
|
|||
my $system = $4;
|
||||
my $drvPath = $5;
|
||||
my $outPath = $6;
|
||||
my $binaryCacheURL = $8;
|
||||
|
||||
barf "invalid package version `$version'" unless $version eq "NIXPKG1";
|
||||
|
||||
|
@ -122,17 +123,25 @@ if ($interactive) {
|
|||
}
|
||||
|
||||
|
||||
# Store the manifest in the temporary directory so that we don't
|
||||
# pollute /nix/var/nix/manifests. This also requires that we don't
|
||||
# use the Nix daemon (because otherwise download-using-manifests won't
|
||||
# see our NIX_MANIFESTS_DIRS environment variable).
|
||||
$ENV{NIX_MANIFESTS_DIR} = $tmpDir;
|
||||
$ENV{NIX_REMOTE} = "";
|
||||
if (defined $binaryCacheURL) {
|
||||
|
||||
push @extraNixEnvArgs, "--option", "binary-caches", $binaryCacheURL;
|
||||
|
||||
print "\nPulling manifests...\n";
|
||||
system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0
|
||||
or barf "nix-pull failed: $?";
|
||||
} else {
|
||||
|
||||
# Store the manifest in the temporary directory so that we don't
|
||||
# pollute /nix/var/nix/manifests. This also requires that we
|
||||
# don't use the Nix daemon (because otherwise
|
||||
# download-using-manifests won't see our NIX_MANIFESTS_DIRS
|
||||
# environment variable).
|
||||
$ENV{NIX_MANIFESTS_DIR} = $tmpDir;
|
||||
$ENV{NIX_REMOTE} = "";
|
||||
|
||||
print "\nPulling manifests...\n";
|
||||
system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0
|
||||
or barf "nix-pull failed: $?";
|
||||
|
||||
}
|
||||
|
||||
|
||||
print "\nInstalling package...\n";
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
#! @perl@ -w @perlFlags@
|
||||
|
||||
use strict;
|
||||
use File::Basename;
|
||||
use File::Temp qw(tempdir);
|
||||
use File::Path qw(mkpath);
|
||||
use File::stat;
|
||||
use File::Copy;
|
||||
use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Nix::Manifest;
|
||||
|
||||
my $hashAlgo = "sha256";
|
||||
|
||||
my $tmpDir = tempdir("nix-push.XXXXXX", CLEANUP => 1, TMPDIR => 1)
|
||||
or die "cannot create a temporary directory";
|
||||
|
||||
my $nixExpr = "$tmpDir/create-nars.nix";
|
||||
my $manifest = "$tmpDir/MANIFEST";
|
||||
|
||||
my $curl = "$Nix::Config::curl --fail --silent";
|
||||
my $extraCurlFlags = ${ENV{'CURL_FLAGS'}};
|
||||
$curl = "$curl $extraCurlFlags" if defined $extraCurlFlags;
|
||||
|
||||
|
||||
# Parse the command line.
|
||||
my $localCopy;
|
||||
my $localArchivesDir;
|
||||
my $localManifestFile;
|
||||
|
||||
my $targetArchivesUrl;
|
||||
|
||||
my $archivesPutURL;
|
||||
my $archivesGetURL;
|
||||
my $manifestPutURL;
|
||||
my $compressionType = "xz";
|
||||
my $force = 0;
|
||||
my $destDir;
|
||||
my $writeManifest = 0;
|
||||
my $archivesURL;
|
||||
my @roots;
|
||||
|
||||
sub showSyntax {
|
||||
print STDERR <<EOF
|
||||
Usage: nix-push --copy ARCHIVES_DIR MANIFEST_FILE PATHS...
|
||||
or: nix-push ARCHIVES_PUT_URL ARCHIVES_GET_URL MANIFEST_PUT_URL PATHS...
|
||||
Usage: nix-push --dest DIR [--manifest] [--url-prefix URL] PATHS...
|
||||
|
||||
`nix-push' copies or uploads the closure of PATHS to the given
|
||||
destination.
|
||||
`nix-push' packs the closure of PATHS into a set of NAR files stored
|
||||
in DIR. Optionally generate a manifest.
|
||||
EOF
|
||||
; # `
|
||||
exit 1;
|
||||
}
|
||||
|
||||
showSyntax if scalar @ARGV < 1;
|
||||
for (my $n = 0; $n < scalar @ARGV; $n++) {
|
||||
my $arg = $ARGV[$n];
|
||||
|
||||
if ($ARGV[0] eq "--copy") {
|
||||
showSyntax if scalar @ARGV < 3;
|
||||
$localCopy = 1;
|
||||
shift @ARGV;
|
||||
$localArchivesDir = shift @ARGV;
|
||||
$localManifestFile = shift @ARGV;
|
||||
if ($ARGV[0] eq "--target") {
|
||||
shift @ARGV;
|
||||
$targetArchivesUrl = shift @ARGV;
|
||||
}
|
||||
else {
|
||||
$targetArchivesUrl = "file://$localArchivesDir";
|
||||
if ($arg eq "--help") {
|
||||
showSyntax;
|
||||
} elsif ($arg eq "--bzip2") {
|
||||
$compressionType = "bzip2";
|
||||
} elsif ($arg eq "--force") {
|
||||
$force = 1;
|
||||
} elsif ($arg eq "--dest") {
|
||||
$n++;
|
||||
die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV;
|
||||
$destDir = $ARGV[$n];
|
||||
mkpath($destDir, 0, 0755);
|
||||
} elsif ($arg eq "--manifest") {
|
||||
$writeManifest = 1;
|
||||
} elsif ($arg eq "--url-prefix") {
|
||||
$n++;
|
||||
die "$0: `$arg' requires an argument\n" unless $n < scalar @ARGV;
|
||||
$archivesURL = $ARGV[$n];
|
||||
} elsif (substr($arg, 0, 1) eq "-") {
|
||||
showSyntax;
|
||||
} else {
|
||||
push @roots, $arg;
|
||||
}
|
||||
}
|
||||
else {
|
||||
showSyntax if scalar @ARGV < 3;
|
||||
$localCopy = 0;
|
||||
$archivesPutURL = shift @ARGV;
|
||||
$archivesGetURL = shift @ARGV;
|
||||
$manifestPutURL = shift @ARGV;
|
||||
}
|
||||
|
||||
showSyntax if !defined $destDir;
|
||||
|
||||
$archivesURL = "file://$destDir" unless defined $archivesURL;
|
||||
|
||||
|
||||
# From the given store paths, determine the set of requisite store
|
||||
# paths, i.e, the paths required to realise them.
|
||||
my %storePaths;
|
||||
|
||||
foreach my $path (@ARGV) {
|
||||
foreach my $path (@roots) {
|
||||
die unless $path =~ /^\//;
|
||||
|
||||
# Get all paths referenced by the normalisation of the given
|
||||
# Get all paths referenced by the normalisation of the given
|
||||
# Nix expression.
|
||||
my $pid = open(READ,
|
||||
"$Nix::Config::binDir/nix-store --query --requisites --force-realise " .
|
||||
"--include-outputs '$path'|") or die;
|
||||
|
||||
|
||||
while (<READ>) {
|
||||
chomp;
|
||||
die "bad: $_" unless /^\//;
|
||||
|
@ -92,8 +92,8 @@ foreach my $path (@ARGV) {
|
|||
my @storePaths = keys %storePaths;
|
||||
|
||||
|
||||
# For each path, create a Nix expression that turns the path into
|
||||
# a Nix archive.
|
||||
# Create a list of Nix derivations that turn each path into a Nix
|
||||
# archive.
|
||||
open NIX, ">$nixExpr";
|
||||
print NIX "[";
|
||||
|
||||
|
@ -101,10 +101,10 @@ foreach my $storePath (@storePaths) {
|
|||
die unless ($storePath =~ /\/[0-9a-z]{32}[^\"\\\$]*$/);
|
||||
|
||||
# Construct a Nix expression that creates a Nix archive.
|
||||
my $nixexpr =
|
||||
my $nixexpr =
|
||||
"(import <nix/nar.nix> " .
|
||||
"{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"$hashAlgo\"; }) ";
|
||||
|
||||
"{ storePath = builtins.storePath \"$storePath\"; hashAlgo = \"sha256\"; compressionType = \"$compressionType\"; }) ";
|
||||
|
||||
print NIX $nixexpr;
|
||||
}
|
||||
|
||||
|
@ -112,172 +112,132 @@ print NIX "]";
|
|||
close NIX;
|
||||
|
||||
|
||||
# Instantiate store derivations from the Nix expression.
|
||||
my @storeExprs;
|
||||
print STDERR "instantiating store derivations...\n";
|
||||
my $pid = open(READ, "$Nix::Config::binDir/nix-instantiate $nixExpr|")
|
||||
or die "cannot run nix-instantiate";
|
||||
# Build the Nix expression.
|
||||
print STDERR "building compressed archives...\n";
|
||||
my @narPaths;
|
||||
my $pid = open(READ, "$Nix::Config::binDir/nix-build $nixExpr -o $tmpDir/result |")
|
||||
or die "cannot run nix-build";
|
||||
while (<READ>) {
|
||||
chomp;
|
||||
die unless /^\//;
|
||||
push @storeExprs, $_;
|
||||
push @narPaths, $_;
|
||||
}
|
||||
close READ or die "nix-instantiate failed: $?";
|
||||
close READ or die "nix-build failed: $?";
|
||||
|
||||
|
||||
# Build the derivations.
|
||||
print STDERR "creating archives...\n";
|
||||
|
||||
my @narPaths;
|
||||
|
||||
my @tmp = @storeExprs;
|
||||
while (scalar @tmp > 0) {
|
||||
my $n = scalar @tmp;
|
||||
if ($n > 256) { $n = 256 };
|
||||
my @tmp2 = @tmp[0..$n - 1];
|
||||
@tmp = @tmp[$n..scalar @tmp - 1];
|
||||
|
||||
my $pid = open(READ, "$Nix::Config::binDir/nix-store --realise @tmp2|")
|
||||
or die "cannot run nix-store";
|
||||
while (<READ>) {
|
||||
chomp;
|
||||
die unless (/^\//);
|
||||
push @narPaths, "$_";
|
||||
}
|
||||
close READ or die "nix-store failed: $?";
|
||||
# Write the cache info file.
|
||||
my $cacheInfoFile = "$destDir/nix-cache-info";
|
||||
if (! -e $cacheInfoFile) {
|
||||
open FILE, ">$cacheInfoFile" or die "cannot create $cacheInfoFile: $!";
|
||||
print FILE "StoreDir: $Nix::Config::storeDir\n";
|
||||
print FILE "WantMassQuery: 0\n"; # by default, don't hit this cache for "nix-env -qas"
|
||||
close FILE;
|
||||
}
|
||||
|
||||
|
||||
# Create the manifest.
|
||||
print STDERR "creating manifest...\n";
|
||||
# Copy the archives and the corresponding NAR info files.
|
||||
print STDERR "copying archives...\n";
|
||||
|
||||
my $totalNarSize = 0;
|
||||
my $totalCompressedSize = 0;
|
||||
|
||||
my %narFiles;
|
||||
my %patches;
|
||||
|
||||
my @narArchives;
|
||||
for (my $n = 0; $n < scalar @storePaths; $n++) {
|
||||
my $storePath = $storePaths[$n];
|
||||
my $narDir = $narPaths[$n];
|
||||
|
||||
$storePath =~ /\/([^\/]*)$/;
|
||||
my $basename = $1;
|
||||
defined $basename or die;
|
||||
my $baseName = basename $storePath;
|
||||
|
||||
open HASH, "$narDir/narbz2-hash" or die "cannot open narbz2-hash";
|
||||
my $narbz2Hash = <HASH>;
|
||||
chomp $narbz2Hash;
|
||||
$narbz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash";
|
||||
close HASH;
|
||||
|
||||
my $narName = "$narbz2Hash.nar.bz2";
|
||||
|
||||
my $narFile = "$narDir/$narName";
|
||||
(-f $narFile) or die "narfile for $storePath not found";
|
||||
push @narArchives, $narFile;
|
||||
|
||||
my $narbz2Size = stat($narFile)->size;
|
||||
|
||||
my $references = `$Nix::Config::binDir/nix-store --query --references '$storePath'`;
|
||||
die "cannot query references for `$storePath'" if $? != 0;
|
||||
$references = join(" ", split(" ", $references));
|
||||
|
||||
my $deriver = `$Nix::Config::binDir/nix-store --query --deriver '$storePath'`;
|
||||
die "cannot query deriver for `$storePath'" if $? != 0;
|
||||
chomp $deriver;
|
||||
$deriver = "" if $deriver eq "unknown-deriver";
|
||||
|
||||
my $narHash = `$Nix::Config::binDir/nix-store --query --hash '$storePath'`;
|
||||
die "cannot query hash for `$storePath'" if $? != 0;
|
||||
chomp $narHash;
|
||||
# Get info about the store path.
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
||||
|
||||
# In some exceptional cases (such as VM tests that use the Nix
|
||||
# store of the host), the database doesn't contain the hash. So
|
||||
# compute it.
|
||||
if ($narHash =~ /^sha256:0*$/) {
|
||||
$narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 '$storePath'`;
|
||||
die "cannot hash `$storePath'" if $? != 0;
|
||||
my $nar = "$tmpDir/nar";
|
||||
system("$Nix::Config::binDir/nix-store --dump $storePath > $nar") == 0
|
||||
or die "cannot dump $storePath\n";
|
||||
$narHash = `$Nix::Config::binDir/nix-hash --type sha256 --base32 --flat $nar`;
|
||||
die "cannot hash `$nar'" if $? != 0;
|
||||
chomp $narHash;
|
||||
$narHash = "sha256:$narHash";
|
||||
$narSize = stat("$nar")->size;
|
||||
unlink $nar or die;
|
||||
}
|
||||
|
||||
my $narSize = `$Nix::Config::binDir/nix-store --query --size '$storePath'`;
|
||||
die "cannot query size for `$storePath'" if $? != 0;
|
||||
chomp $narSize;
|
||||
$totalNarSize += $narSize;
|
||||
|
||||
my $url;
|
||||
if ($localCopy) {
|
||||
$url = "$targetArchivesUrl/$narName";
|
||||
} else {
|
||||
$url = "$archivesGetURL/$narName";
|
||||
# Get info about the compressed NAR.
|
||||
open HASH, "$narDir/nar-compressed-hash" or die "cannot open nar-compressed-hash";
|
||||
my $compressedHash = <HASH>;
|
||||
chomp $compressedHash;
|
||||
$compressedHash =~ /^[0-9a-z]+$/ or die "invalid hash";
|
||||
close HASH;
|
||||
|
||||
my $narName = "$compressedHash.nar." . ($compressionType eq "xz" ? "xz" : "bz2");
|
||||
|
||||
my $narFile = "$narDir/$narName";
|
||||
(-f $narFile) or die "NAR file for $storePath not found";
|
||||
|
||||
my $compressedSize = stat($narFile)->size;
|
||||
$totalCompressedSize += $compressedSize;
|
||||
|
||||
printf STDERR "%s [%.2f MiB, %.1f%%]\n", $storePath,
|
||||
$compressedSize / (1024 * 1024), $compressedSize / $narSize * 100;
|
||||
|
||||
# Copy the compressed NAR.
|
||||
my $dst = "$destDir/$narName";
|
||||
if (! -f $dst) {
|
||||
my $tmp = "$destDir/.tmp.$$.$narName";
|
||||
copy($narFile, $tmp) or die "cannot copy $narFile to $tmp: $!\n";
|
||||
rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
|
||||
}
|
||||
|
||||
# Write the info file.
|
||||
my $info;
|
||||
$info .= "StorePath: $storePath\n";
|
||||
$info .= "URL: $narName\n";
|
||||
$info .= "Compression: $compressionType\n";
|
||||
$info .= "FileHash: sha256:$compressedHash\n";
|
||||
$info .= "FileSize: $compressedSize\n";
|
||||
$info .= "NarHash: $narHash\n";
|
||||
$info .= "NarSize: $narSize\n";
|
||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||
if (defined $deriver) {
|
||||
$info .= "Deriver: " . basename $deriver . "\n";
|
||||
if (isValidPath($deriver)) {
|
||||
my $drv = derivationFromPath($deriver);
|
||||
$info .= "System: $drv->{platform}\n";
|
||||
}
|
||||
}
|
||||
|
||||
my $pathHash = substr(basename($storePath), 0, 32);
|
||||
|
||||
$dst = "$destDir/$pathHash.narinfo";
|
||||
if ($force || ! -f $dst) {
|
||||
my $tmp = "$destDir/.tmp.$$.$pathHash.narinfo";
|
||||
open INFO, ">$tmp" or die;
|
||||
print INFO "$info" or die;
|
||||
close INFO or die;
|
||||
rename($tmp, $dst) or die "cannot rename $tmp to $dst: $!\n";
|
||||
}
|
||||
|
||||
$narFiles{$storePath} = [
|
||||
{ url => $url
|
||||
, hash => "$hashAlgo:$narbz2Hash"
|
||||
, size => $narbz2Size
|
||||
{ url => "$archivesURL/$narName"
|
||||
, hash => "sha256:$compressedHash"
|
||||
, size => $compressedSize
|
||||
, narHash => "$narHash"
|
||||
, narSize => $narSize
|
||||
, references => $references
|
||||
, references => join(" ", @{$refs})
|
||||
, deriver => $deriver
|
||||
}
|
||||
];
|
||||
] if $writeManifest;
|
||||
}
|
||||
|
||||
writeManifest $manifest, \%narFiles, \%patches;
|
||||
printf STDERR "total compressed size %.2f MiB, %.1f%%\n",
|
||||
$totalCompressedSize / (1024 * 1024), $totalCompressedSize / $totalNarSize * 100;
|
||||
|
||||
|
||||
sub copyFile {
|
||||
my $src = shift;
|
||||
my $dst = shift;
|
||||
my $tmp = "$dst.tmp.$$";
|
||||
system("@coreutils@/cp", $src, $tmp) == 0 or die "cannot copy file";
|
||||
rename($tmp, $dst) or die "cannot rename file: $!";
|
||||
}
|
||||
|
||||
|
||||
# Upload/copy the archives.
|
||||
print STDERR "uploading/copying archives...\n";
|
||||
|
||||
sub archiveExists {
|
||||
my $name = shift;
|
||||
print STDERR " HEAD on $archivesGetURL/$name\n";
|
||||
return system("$curl --head $archivesGetURL/$name > /dev/null") == 0;
|
||||
}
|
||||
|
||||
foreach my $narArchive (@narArchives) {
|
||||
|
||||
$narArchive =~ /\/([^\/]*)$/;
|
||||
my $basename = $1;
|
||||
|
||||
if ($localCopy) {
|
||||
# Since nix-push creates $dst atomically, if it exists we
|
||||
# don't have to copy again.
|
||||
my $dst = "$localArchivesDir/$basename";
|
||||
if (! -f "$localArchivesDir/$basename") {
|
||||
print STDERR " $narArchive\n";
|
||||
copyFile $narArchive, $dst;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (!archiveExists("$basename")) {
|
||||
print STDERR " $narArchive\n";
|
||||
system("$curl --show-error --upload-file " .
|
||||
"'$narArchive' '$archivesPutURL/$basename' > /dev/null") == 0 or
|
||||
die "curl failed on $narArchive: $?";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Upload the manifest.
|
||||
print STDERR "uploading manifest...\n";
|
||||
if ($localCopy) {
|
||||
copyFile $manifest, $localManifestFile;
|
||||
copyFile "$manifest.bz2", "$localManifestFile.bz2";
|
||||
} else {
|
||||
system("$curl --show-error --upload-file " .
|
||||
"'$manifest' '$manifestPutURL' > /dev/null") == 0 or
|
||||
die "curl failed on $manifest: $?";
|
||||
system("$curl --show-error --upload-file " .
|
||||
"'$manifest'.bz2 '$manifestPutURL'.bz2 > /dev/null") == 0 or
|
||||
die "curl failed on $manifest: $?";
|
||||
}
|
||||
# Optionally write a manifest.
|
||||
writeManifest "$destDir/MANIFEST", \%narFiles, \() if $writeManifest;
|
||||
|
|
|
@ -181,7 +181,7 @@ EvalState::EvalState()
|
|||
searchPathInsertionPoint = searchPath.end();
|
||||
Strings paths = tokenizeString(getEnv("NIX_PATH", ""), ":");
|
||||
foreach (Strings::iterator, i, paths) addToSearchPath(*i);
|
||||
addToSearchPath("nix=" + nixDataDir + "/nix/corepkgs");
|
||||
addToSearchPath("nix=" + settings.nixDataDir + "/nix/corepkgs");
|
||||
searchPathInsertionPoint = searchPath.begin();
|
||||
|
||||
createBaseEnv();
|
||||
|
@ -1091,7 +1091,7 @@ string EvalState::coerceToString(Value & v, PathSet & context,
|
|||
if (srcToStore[path] != "")
|
||||
dstPath = srcToStore[path];
|
||||
else {
|
||||
dstPath = readOnlyMode
|
||||
dstPath = settings.readOnlyMode
|
||||
? computeStorePathForPath(path).first
|
||||
: store->addToStore(path);
|
||||
srcToStore[path] = dstPath;
|
||||
|
|
|
@ -31,6 +31,8 @@ private:
|
|||
|
||||
bool metaInfoRead;
|
||||
MetaInfo meta;
|
||||
|
||||
bool failed; // set if we get an AssertionError
|
||||
|
||||
public:
|
||||
string name;
|
||||
|
@ -40,7 +42,7 @@ public:
|
|||
/* !!! make this private */
|
||||
Bindings * attrs;
|
||||
|
||||
DrvInfo() : metaInfoRead(false), attrs(0) { };
|
||||
DrvInfo() : metaInfoRead(false), failed(false), attrs(0) { };
|
||||
|
||||
string queryDrvPath(EvalState & state) const;
|
||||
string queryOutPath(EvalState & state) const;
|
||||
|
@ -58,6 +60,9 @@ public:
|
|||
}
|
||||
|
||||
void setMetaInfo(const MetaInfo & meta);
|
||||
|
||||
void setFailed() { failed = true; };
|
||||
bool hasFailed() { return failed; };
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -51,6 +51,12 @@ static void prim_import(EvalState & state, Value * * args, Value & v)
|
|||
% path % ctx);
|
||||
if (isDerivation(ctx))
|
||||
try {
|
||||
/* For performance, prefetch all substitute info. */
|
||||
PathSet willBuild, willSubstitute, unknown;
|
||||
unsigned long long downloadSize, narSize;
|
||||
queryMissing(*store, singleton<PathSet>(ctx),
|
||||
willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||
|
||||
/* !!! If using a substitute, we only need to fetch
|
||||
the selected output of this derivation. */
|
||||
store->buildPaths(singleton<PathSet>(ctx));
|
||||
|
@ -617,7 +623,7 @@ static void prim_toFile(EvalState & state, Value * * args, Value & v)
|
|||
refs.insert(path);
|
||||
}
|
||||
|
||||
Path storePath = readOnlyMode
|
||||
Path storePath = settings.readOnlyMode
|
||||
? computeStorePathForText(name, contents, refs)
|
||||
: store->addTextToStore(name, contents, refs);
|
||||
|
||||
|
@ -681,7 +687,7 @@ static void prim_filterSource(EvalState & state, Value * * args, Value & v)
|
|||
|
||||
FilterFromExpr filter(state, *args[0]);
|
||||
|
||||
Path dstPath = readOnlyMode
|
||||
Path dstPath = settings.readOnlyMode
|
||||
? computeStorePathForPath(path, true, htSHA256, filter).first
|
||||
: store->addToStore(path, true, htSHA256, filter);
|
||||
|
||||
|
@ -1134,7 +1140,7 @@ void EvalState::createBaseEnv()
|
|||
mkInt(v, time(0));
|
||||
addConstant("__currentTime", v);
|
||||
|
||||
mkString(v, thisSystem.c_str());
|
||||
mkString(v, settings.thisSystem.c_str());
|
||||
addConstant("__currentSystem", v);
|
||||
|
||||
// Miscellaneous
|
||||
|
|
|
@ -36,7 +36,7 @@ static void sigintHandler(int signo)
|
|||
void printGCWarning()
|
||||
{
|
||||
static bool haveWarned = false;
|
||||
warnOnce(haveWarned,
|
||||
warnOnce(haveWarned,
|
||||
"you did not specify `--add-root'; "
|
||||
"the result might be removed by the garbage collector");
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ void printMissing(StoreAPI & store, const PathSet & paths)
|
|||
|
||||
if (!unknown.empty()) {
|
||||
printMsg(lvlInfo, format("don't know how to build these paths%1%:")
|
||||
% (readOnlyMode ? " (may be caused by read-only store access)" : ""));
|
||||
% (settings.readOnlyMode ? " (may be caused by read-only store access)" : ""));
|
||||
foreach (PathSet::iterator, i, unknown)
|
||||
printMsg(lvlInfo, format(" %1%") % *i);
|
||||
}
|
||||
|
@ -83,12 +83,21 @@ static void setLogType(string lt)
|
|||
static bool showTrace = false;
|
||||
|
||||
|
||||
string getArg(const string & opt,
|
||||
Strings::iterator & i, const Strings::iterator & end)
|
||||
{
|
||||
++i;
|
||||
if (i == end) throw UsageError(format("`%1%' requires an argument") % opt);
|
||||
return *i;
|
||||
}
|
||||
|
||||
/* Initialize and reorder arguments, then call the actual argument
|
||||
processor. */
|
||||
static void initAndRun(int argc, char * * argv)
|
||||
{
|
||||
setDefaultsFromEnvironment();
|
||||
|
||||
settings.processEnvironment();
|
||||
settings.loadConfFile();
|
||||
|
||||
/* Catch SIGINT. */
|
||||
struct sigaction act;
|
||||
act.sa_handler = sigintHandler;
|
||||
|
@ -127,7 +136,7 @@ static void initAndRun(int argc, char * * argv)
|
|||
Strings args, remaining;
|
||||
while (argc--) args.push_back(*argv++);
|
||||
args.erase(args.begin());
|
||||
|
||||
|
||||
/* Expand compound dash options (i.e., `-qlf' -> `-q -l -f'), and
|
||||
ignore options for the ATerm library. */
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ++i) {
|
||||
|
@ -146,20 +155,19 @@ static void initAndRun(int argc, char * * argv)
|
|||
remaining.clear();
|
||||
|
||||
/* Process default options. */
|
||||
int verbosityDelta = 0;
|
||||
int verbosityDelta = lvlInfo;
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ++i) {
|
||||
string arg = *i;
|
||||
if (arg == "--verbose" || arg == "-v") verbosityDelta++;
|
||||
else if (arg == "--quiet") verbosityDelta--;
|
||||
else if (arg == "--log-type") {
|
||||
++i;
|
||||
if (i == args.end()) throw UsageError("`--log-type' requires an argument");
|
||||
setLogType(*i);
|
||||
string s = getArg(arg, i, args.end());
|
||||
setLogType(s);
|
||||
}
|
||||
else if (arg == "--no-build-output" || arg == "-Q")
|
||||
buildVerbosity = lvlVomit;
|
||||
settings.buildVerbosity = lvlVomit;
|
||||
else if (arg == "--print-build-trace")
|
||||
printBuildTrace = true;
|
||||
settings.printBuildTrace = true;
|
||||
else if (arg == "--help") {
|
||||
printHelp();
|
||||
return;
|
||||
|
@ -169,23 +177,23 @@ static void initAndRun(int argc, char * * argv)
|
|||
return;
|
||||
}
|
||||
else if (arg == "--keep-failed" || arg == "-K")
|
||||
keepFailed = true;
|
||||
settings.keepFailed = true;
|
||||
else if (arg == "--keep-going" || arg == "-k")
|
||||
keepGoing = true;
|
||||
settings.keepGoing = true;
|
||||
else if (arg == "--fallback")
|
||||
tryFallback = true;
|
||||
settings.set("build-fallback", "true");
|
||||
else if (arg == "--max-jobs" || arg == "-j")
|
||||
maxBuildJobs = getIntArg<unsigned int>(arg, i, args.end());
|
||||
settings.set("build-max-jobs", getArg(arg, i, args.end()));
|
||||
else if (arg == "--cores")
|
||||
buildCores = getIntArg<unsigned int>(arg, i, args.end());
|
||||
settings.set("build-cores", getArg(arg, i, args.end()));
|
||||
else if (arg == "--readonly-mode")
|
||||
readOnlyMode = true;
|
||||
settings.readOnlyMode = true;
|
||||
else if (arg == "--max-silent-time")
|
||||
maxSilentTime = getIntArg<unsigned int>(arg, i, args.end());
|
||||
settings.set("build-max-silent-time", getArg(arg, i, args.end()));
|
||||
else if (arg == "--timeout")
|
||||
buildTimeout = getIntArg<unsigned int>(arg, i, args.end());
|
||||
settings.set("build-timeout", getArg(arg, i, args.end()));
|
||||
else if (arg == "--no-build-hook")
|
||||
useBuildHook = false;
|
||||
settings.useBuildHook = false;
|
||||
else if (arg == "--show-trace")
|
||||
showTrace = true;
|
||||
else if (arg == "--option") {
|
||||
|
@ -193,14 +201,15 @@ static void initAndRun(int argc, char * * argv)
|
|||
string name = *i;
|
||||
++i; if (i == args.end()) throw UsageError("`--option' requires two arguments");
|
||||
string value = *i;
|
||||
overrideSetting(name, tokenizeString(value));
|
||||
settings.set(name, value);
|
||||
}
|
||||
else remaining.push_back(arg);
|
||||
}
|
||||
|
||||
verbosityDelta += queryIntSetting("verbosity", lvlInfo);
|
||||
verbosity = (Verbosity) (verbosityDelta < 0 ? 0 : verbosityDelta);
|
||||
|
||||
|
||||
settings.update();
|
||||
|
||||
run(remaining);
|
||||
|
||||
/* Close the Nix database. */
|
||||
|
@ -218,7 +227,7 @@ static void setuidInit()
|
|||
|
||||
uid_t nixUid = geteuid();
|
||||
gid_t nixGid = getegid();
|
||||
|
||||
|
||||
setuidCleanup();
|
||||
|
||||
/* Don't trust the current directory. */
|
||||
|
@ -284,7 +293,7 @@ int main(int argc, char * * argv)
|
|||
right away. */
|
||||
if (argc == 0) abort();
|
||||
setuidInit();
|
||||
|
||||
|
||||
/* Turn on buffering for cerr. */
|
||||
#if HAVE_PUBSETBUF
|
||||
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
|
||||
|
@ -313,7 +322,7 @@ int main(int argc, char * * argv)
|
|||
throw;
|
||||
}
|
||||
} catch (UsageError & e) {
|
||||
printMsg(lvlError,
|
||||
printMsg(lvlError,
|
||||
format(
|
||||
"error: %1%\n"
|
||||
"Try `%2% --help' for more information.")
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -12,7 +12,7 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash
|
|||
{
|
||||
recursive = false;
|
||||
string algo = hashAlgo;
|
||||
|
||||
|
||||
if (string(algo, 0, 2) == "r:") {
|
||||
recursive = true;
|
||||
algo = string(algo, 2);
|
||||
|
@ -21,7 +21,7 @@ void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash
|
|||
hashType = parseHashType(algo);
|
||||
if (hashType == htUnknown)
|
||||
throw Error(format("unknown hash algorithm `%1%'") % algo);
|
||||
|
||||
|
||||
hash = parseHash(hashType, this->hash);
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ Path writeDerivation(StoreAPI & store,
|
|||
held during a garbage collection). */
|
||||
string suffix = name + drvExtension;
|
||||
string contents = unparseDerivation(drv);
|
||||
return readOnlyMode
|
||||
return settings.readOnlyMode
|
||||
? computeStorePathForText(suffix, contents, references)
|
||||
: store.addTextToStore(suffix, contents, references);
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ static Path parsePath(std::istream & str)
|
|||
throw Error(format("bad path `%1%' in derivation") % s);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static StringSet parseStrings(std::istream & str, bool arePaths)
|
||||
{
|
||||
|
@ -60,7 +60,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
|
|||
res.insert(arePaths ? parsePath(str) : parseString(str));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Derivation parseDerivation(const string & s)
|
||||
{
|
||||
|
@ -106,7 +106,7 @@ Derivation parseDerivation(const string & s)
|
|||
expect(str, ")");
|
||||
drv.env[name] = value;
|
||||
}
|
||||
|
||||
|
||||
expect(str, ")");
|
||||
return drv;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ string unparseDerivation(const Derivation & drv)
|
|||
|
||||
s += "],";
|
||||
printStrings(s, drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||
|
||||
|
||||
s += ','; printString(s, drv.platform);
|
||||
s += ','; printString(s, drv.builder);
|
||||
s += ','; printStrings(s, drv.args.begin(), drv.args.end());
|
||||
|
@ -178,9 +178,9 @@ string unparseDerivation(const Derivation & drv)
|
|||
s += ','; printString(s, i->second);
|
||||
s += ')';
|
||||
}
|
||||
|
||||
|
||||
s += "])";
|
||||
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ bool isDerivation(const string & fileName)
|
|||
return hasSuffix(fileName, drvExtension);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool isFixedOutputDrv(const Derivation & drv)
|
||||
{
|
||||
return drv.outputs.size() == 1 &&
|
||||
|
@ -247,7 +247,7 @@ Hash hashDerivationModulo(StoreAPI & store, Derivation drv)
|
|||
inputs2[printHash(h)] = i->second;
|
||||
}
|
||||
drv.inputDrvs = inputs2;
|
||||
|
||||
|
||||
return hashString(htSHA256, unparseDerivation(drv));
|
||||
}
|
||||
|
||||
|
|
|
@ -34,10 +34,10 @@ static const int defaultGcLevel = 1000;
|
|||
int LocalStore::openGCLock(LockType lockType)
|
||||
{
|
||||
Path fnGCLock = (format("%1%/%2%")
|
||||
% nixStateDir % gcLockName).str();
|
||||
|
||||
% settings.nixStateDir % gcLockName).str();
|
||||
|
||||
debug(format("acquiring global GC lock `%1%'") % fnGCLock);
|
||||
|
||||
|
||||
AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT, 0600);
|
||||
if (fdGCLock == -1)
|
||||
throw SysError(format("opening global GC lock `%1%'") % fnGCLock);
|
||||
|
@ -51,7 +51,7 @@ int LocalStore::openGCLock(LockType lockType)
|
|||
/* !!! Restrict read permission on the GC root. Otherwise any
|
||||
process that can open the file for reading can DoS the
|
||||
collector. */
|
||||
|
||||
|
||||
return fdGCLock.borrow();
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ void LocalStore::addIndirectRoot(const Path & path)
|
|||
{
|
||||
string hash = printHash32(hashString(htSHA1, path));
|
||||
Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
|
||||
% nixStateDir % gcRootsDir % hash).str());
|
||||
% settings.nixStateDir % gcRootsDir % hash).str());
|
||||
createSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
@ -113,15 +113,15 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath,
|
|||
|
||||
else {
|
||||
if (!allowOutsideRootsDir) {
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % nixStateDir % gcRootsDir).str());
|
||||
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str());
|
||||
|
||||
if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
|
||||
throw Error(format(
|
||||
"path `%1%' is not a valid garbage collector root; "
|
||||
"it's not in the directory `%2%'")
|
||||
% gcRoot % rootsDir);
|
||||
}
|
||||
|
||||
|
||||
createSymlink(gcRoot, storePath);
|
||||
}
|
||||
|
||||
|
@ -130,10 +130,10 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath,
|
|||
Instead of reading all the roots, it would be more efficient to
|
||||
check if the root is in a directory in or linked from the
|
||||
gcroots directory. */
|
||||
if (queryBoolSetting("gc-check-reachability", false)) {
|
||||
if (settings.checkRootReachability) {
|
||||
Roots roots = store.findRoots();
|
||||
if (roots.find(gcRoot) == roots.end())
|
||||
printMsg(lvlError,
|
||||
printMsg(lvlError,
|
||||
format(
|
||||
"warning: `%1%' is not in a directory where the garbage collector looks for roots; "
|
||||
"therefore, `%2%' might be removed by the garbage collector")
|
||||
|
@ -144,7 +144,7 @@ Path addPermRoot(StoreAPI & store, const Path & _storePath,
|
|||
progress. This prevents the set of permanent roots from
|
||||
increasing while a GC is in progress. */
|
||||
store.syncWithGC();
|
||||
|
||||
|
||||
return gcRoot;
|
||||
}
|
||||
|
||||
|
@ -160,23 +160,23 @@ void LocalStore::addTempRoot(const Path & path)
|
|||
if (fdTempRoots == -1) {
|
||||
|
||||
while (1) {
|
||||
Path dir = (format("%1%/%2%") % nixStateDir % tempRootsDir).str();
|
||||
Path dir = (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str();
|
||||
createDirs(dir);
|
||||
|
||||
|
||||
fnTempRoots = (format("%1%/%2%")
|
||||
% dir % getpid()).str();
|
||||
|
||||
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||
|
||||
|
||||
if (pathExists(fnTempRoots))
|
||||
/* It *must* be stale, since there can be no two
|
||||
processes with the same pid. */
|
||||
unlink(fnTempRoots.c_str());
|
||||
|
||||
fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
|
||||
fdGCLock.close();
|
||||
|
||||
|
||||
debug(format("acquiring read lock on `%1%'") % fnTempRoots);
|
||||
lockFile(fdTempRoots, ltRead, true);
|
||||
|
||||
|
@ -186,7 +186,7 @@ void LocalStore::addTempRoot(const Path & path)
|
|||
if (fstat(fdTempRoots, &st) == -1)
|
||||
throw SysError(format("statting `%1%'") % fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
|
||||
|
||||
/* The garbage collector deleted this file before we could
|
||||
get a lock. (It won't delete the file after we get a
|
||||
lock.) Try again. */
|
||||
|
@ -218,7 +218,7 @@ void removeTempRoots()
|
|||
|
||||
|
||||
/* Automatically clean up the temporary roots file when we exit. */
|
||||
struct RemoveTempRoots
|
||||
struct RemoveTempRoots
|
||||
{
|
||||
~RemoveTempRoots()
|
||||
{
|
||||
|
@ -238,10 +238,10 @@ static void readTempRoots(PathSet & tempRoots, FDs & fds)
|
|||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
Strings tempRootFiles = readDirectory(
|
||||
(format("%1%/%2%") % nixStateDir % tempRootsDir).str());
|
||||
(format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str());
|
||||
|
||||
foreach (Strings::iterator, i, tempRootFiles) {
|
||||
Path path = (format("%1%/%2%/%3%") % nixStateDir % tempRootsDir % *i).str();
|
||||
Path path = (format("%1%/%2%/%3%") % settings.nixStateDir % tempRootsDir % *i).str();
|
||||
|
||||
debug(format("reading temporary root file `%1%'") % path);
|
||||
FDPtr fd(new AutoCloseFD(open(path.c_str(), O_RDWR, 0666)));
|
||||
|
@ -295,7 +295,7 @@ static void findRoots(StoreAPI & store, const Path & path,
|
|||
bool recurseSymlinks, bool deleteStale, Roots & roots)
|
||||
{
|
||||
try {
|
||||
|
||||
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st) == -1)
|
||||
throw SysError(format("statting `%1%'") % path);
|
||||
|
@ -315,7 +315,7 @@ static void findRoots(StoreAPI & store, const Path & path,
|
|||
debug(format("found root `%1%' in `%2%'")
|
||||
% target % path);
|
||||
Path storePath = toStorePath(target);
|
||||
if (store.isValidPath(storePath))
|
||||
if (store.isValidPath(storePath))
|
||||
roots[path] = storePath;
|
||||
else
|
||||
printMsg(lvlInfo, format("skipping invalid root from `%1%' to `%2%'")
|
||||
|
@ -350,7 +350,7 @@ static void findRoots(StoreAPI & store, const Path & path,
|
|||
static Roots findRoots(StoreAPI & store, bool deleteStale)
|
||||
{
|
||||
Roots roots;
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % nixStateDir % gcRootsDir).str());
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str());
|
||||
findRoots(store, rootsDir, true, deleteStale, roots);
|
||||
return roots;
|
||||
}
|
||||
|
@ -365,16 +365,16 @@ Roots LocalStore::findRoots()
|
|||
static void addAdditionalRoots(StoreAPI & store, PathSet & roots)
|
||||
{
|
||||
Path rootFinder = getEnv("NIX_ROOT_FINDER",
|
||||
nixLibexecDir + "/nix/find-runtime-roots.pl");
|
||||
settings.nixLibexecDir + "/nix/find-runtime-roots.pl");
|
||||
|
||||
if (rootFinder.empty()) return;
|
||||
|
||||
|
||||
debug(format("executing `%1%' to find additional roots") % rootFinder);
|
||||
|
||||
string result = runProgram(rootFinder);
|
||||
|
||||
Strings paths = tokenizeString(result, "\n");
|
||||
|
||||
|
||||
foreach (Strings::iterator, i, paths) {
|
||||
if (isInStore(*i)) {
|
||||
Path path = toStorePath(*i);
|
||||
|
@ -556,7 +556,7 @@ bool LocalStore::tryToDelete(GCState & state, const Path & path)
|
|||
|
||||
} else
|
||||
printMsg(lvlTalkative, format("would delete `%1%'") % path);
|
||||
|
||||
|
||||
state.deleted.insert(path);
|
||||
if (state.options.action != GCOptions::gcReturnLive)
|
||||
state.results.paths.insert(path);
|
||||
|
@ -621,10 +621,10 @@ void LocalStore::removeUnusedLinks(const GCState & state)
|
|||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
GCState state(results);
|
||||
state.options = options;
|
||||
|
||||
state.gcKeepOutputs = queryBoolSetting("gc-keep-outputs", false);
|
||||
state.gcKeepDerivations = queryBoolSetting("gc-keep-derivations", true);
|
||||
state.options = options;
|
||||
|
||||
state.gcKeepOutputs = settings.gcKeepOutputs;
|
||||
state.gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||
consequences if `gc-keep-outputs' or `gc-keep-derivations' are
|
||||
|
@ -634,7 +634,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
state.gcKeepOutputs = false;
|
||||
state.gcKeepDerivations = false;
|
||||
}
|
||||
|
||||
|
||||
/* Acquire the global GC root. This prevents
|
||||
a) New roots from being added.
|
||||
b) Processes from creating new temporary root files. */
|
||||
|
@ -675,18 +675,18 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
if (!tryToDelete(state, *i))
|
||||
throw Error(format("cannot delete path `%1%' since it is still alive") % *i);
|
||||
}
|
||||
|
||||
|
||||
} else if (options.maxFreed > 0) {
|
||||
|
||||
|
||||
if (shouldDelete(state.options.action))
|
||||
printMsg(lvlError, format("deleting garbage..."));
|
||||
else
|
||||
printMsg(lvlError, format("determining live/dead paths..."));
|
||||
|
||||
|
||||
try {
|
||||
|
||||
AutoCloseDir dir = opendir(nixStore.c_str());
|
||||
if (!dir) throw SysError(format("opening directory `%1%'") % nixStore);
|
||||
AutoCloseDir dir = opendir(settings.nixStore.c_str());
|
||||
if (!dir) throw SysError(format("opening directory `%1%'") % settings.nixStore);
|
||||
|
||||
/* Read the store and immediately delete all paths that
|
||||
aren't valid. When using --max-freed etc., deleting
|
||||
|
@ -700,14 +700,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
checkInterrupt();
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = nixStore + "/" + name;
|
||||
Path path = settings.nixStore + "/" + name;
|
||||
if (isValidPath(path))
|
||||
entries.push_back(path);
|
||||
else
|
||||
tryToDelete(state, path);
|
||||
}
|
||||
|
||||
dir.close();
|
||||
dir.close();
|
||||
|
||||
/* Now delete the unreachable valid paths. Randomise the
|
||||
order in which we delete entries to make the collector
|
||||
|
|
|
@ -10,36 +10,63 @@
|
|||
namespace nix {
|
||||
|
||||
|
||||
string nixStore = "/UNINIT";
|
||||
string nixDataDir = "/UNINIT";
|
||||
string nixLogDir = "/UNINIT";
|
||||
string nixStateDir = "/UNINIT";
|
||||
string nixDBPath = "/UNINIT";
|
||||
string nixConfDir = "/UNINIT";
|
||||
string nixLibexecDir = "/UNINIT";
|
||||
string nixBinDir = "/UNINIT";
|
||||
|
||||
bool keepFailed = false;
|
||||
bool keepGoing = false;
|
||||
bool tryFallback = false;
|
||||
Verbosity buildVerbosity = lvlError;
|
||||
unsigned int maxBuildJobs = 1;
|
||||
unsigned int buildCores = 1;
|
||||
bool readOnlyMode = false;
|
||||
string thisSystem = "unset";
|
||||
time_t maxSilentTime = 0;
|
||||
time_t buildTimeout = 0;
|
||||
Paths substituters;
|
||||
bool useBuildHook = true;
|
||||
bool printBuildTrace = false;
|
||||
Settings settings;
|
||||
|
||||
|
||||
static bool settingsRead = false;
|
||||
Settings::Settings()
|
||||
{
|
||||
keepFailed = false;
|
||||
keepGoing = false;
|
||||
tryFallback = false;
|
||||
buildVerbosity = lvlError;
|
||||
maxBuildJobs = 1;
|
||||
buildCores = 1;
|
||||
readOnlyMode = false;
|
||||
thisSystem = SYSTEM;
|
||||
maxSilentTime = 0;
|
||||
buildTimeout = 0;
|
||||
useBuildHook = true;
|
||||
printBuildTrace = false;
|
||||
reservedSize = 1024 * 1024;
|
||||
fsyncMetadata = true;
|
||||
useSQLiteWAL = true;
|
||||
syncBeforeRegistering = false;
|
||||
useSubstitutes = true;
|
||||
useChroot = false;
|
||||
dirsInChroot.insert("/dev");
|
||||
dirsInChroot.insert("/dev/pts");
|
||||
impersonateLinux26 = false;
|
||||
keepLog = true;
|
||||
compressLog = true;
|
||||
cacheFailure = false;
|
||||
pollInterval = 5;
|
||||
checkRootReachability = false;
|
||||
gcKeepOutputs = false;
|
||||
gcKeepDerivations = true;
|
||||
autoOptimiseStore = true;
|
||||
envKeepDerivations = false;
|
||||
}
|
||||
|
||||
static std::map<string, Strings> settings;
|
||||
|
||||
/* Overriden settings. */
|
||||
std::map<string, Strings> settingsCmdline;
|
||||
void Settings::processEnvironment()
|
||||
{
|
||||
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
|
||||
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
|
||||
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
|
||||
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
|
||||
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
|
||||
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
|
||||
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
|
||||
|
||||
string subs = getEnv("NIX_SUBSTITUTERS", "default");
|
||||
if (subs == "default") {
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl");
|
||||
} else
|
||||
substituters = tokenizeString(subs, ":");
|
||||
}
|
||||
|
||||
|
||||
string & at(Strings & ss, unsigned int n)
|
||||
|
@ -50,7 +77,7 @@ string & at(Strings & ss, unsigned int n)
|
|||
}
|
||||
|
||||
|
||||
static void readSettings()
|
||||
void Settings::loadConfFile()
|
||||
{
|
||||
Path settingsFile = (format("%1%/%2%") % nixConfDir % "nix.conf").str();
|
||||
if (!pathExists(settingsFile)) return;
|
||||
|
@ -78,95 +105,103 @@ static void readSettings()
|
|||
|
||||
Strings::iterator i = tokens.begin();
|
||||
advance(i, 2);
|
||||
settings[name] = Strings(i, tokens.end());
|
||||
settings[name] = concatStringsSep(" ", Strings(i, tokens.end())); // FIXME: slow
|
||||
};
|
||||
|
||||
settings.insert(settingsCmdline.begin(), settingsCmdline.end());
|
||||
|
||||
settingsRead = true;
|
||||
}
|
||||
|
||||
|
||||
Strings querySetting(const string & name, const Strings & def)
|
||||
void Settings::set(const string & name, const string & value)
|
||||
{
|
||||
if (!settingsRead) readSettings();
|
||||
std::map<string, Strings>::iterator i = settings.find(name);
|
||||
return i == settings.end() ? def : i->second;
|
||||
settings[name] = value;
|
||||
overrides[name] = value;
|
||||
}
|
||||
|
||||
|
||||
string querySetting(const string & name, const string & def)
|
||||
void Settings::update()
|
||||
{
|
||||
Strings defs;
|
||||
defs.push_back(def);
|
||||
|
||||
Strings value = querySetting(name, defs);
|
||||
if (value.size() != 1)
|
||||
throw Error(format("configuration option `%1%' should not be a list") % name);
|
||||
|
||||
return value.front();
|
||||
get(tryFallback, "build-fallback");
|
||||
get(maxBuildJobs, "build-max-jobs");
|
||||
get(buildCores, "build-cores");
|
||||
get(thisSystem, "system");
|
||||
get(maxSilentTime, "build-max-silent-time");
|
||||
get(buildTimeout, "build-timeout");
|
||||
get(reservedSize, "gc-reserved-space");
|
||||
get(fsyncMetadata, "fsync-metadata");
|
||||
get(useSQLiteWAL, "use-sqlite-wal");
|
||||
get(syncBeforeRegistering, "sync-before-registering");
|
||||
get(useSubstitutes, "build-use-substitutes");
|
||||
get(buildUsersGroup, "build-users-group");
|
||||
get(useChroot, "build-use-chroot");
|
||||
get(dirsInChroot, "build-chroot-dirs");
|
||||
get(impersonateLinux26, "build-impersonate-linux-26");
|
||||
get(keepLog, "build-keep-log");
|
||||
get(compressLog, "build-compress-log");
|
||||
get(cacheFailure, "build-cache-failure");
|
||||
get(pollInterval, "build-poll-interval");
|
||||
get(checkRootReachability, "gc-check-reachability");
|
||||
get(gcKeepOutputs, "gc-keep-outputs");
|
||||
get(gcKeepDerivations, "gc-keep-derivations");
|
||||
get(autoOptimiseStore, "auto-optimise-store");
|
||||
get(envKeepDerivations, "env-keep-derivations");
|
||||
}
|
||||
|
||||
|
||||
bool queryBoolSetting(const string & name, bool def)
|
||||
void Settings::get(string & res, const string & name)
|
||||
{
|
||||
string v = querySetting(name, def ? "true" : "false");
|
||||
if (v == "true") return true;
|
||||
else if (v == "false") return false;
|
||||
SettingsMap::iterator i = settings.find(name);
|
||||
if (i == settings.end()) return;
|
||||
res = i->second;
|
||||
}
|
||||
|
||||
|
||||
void Settings::get(bool & res, const string & name)
|
||||
{
|
||||
SettingsMap::iterator i = settings.find(name);
|
||||
if (i == settings.end()) return;
|
||||
if (i->second == "true") res = true;
|
||||
else if (i->second == "false") res = false;
|
||||
else throw Error(format("configuration option `%1%' should be either `true' or `false', not `%2%'")
|
||||
% name % v);
|
||||
% name % i->second);
|
||||
}
|
||||
|
||||
|
||||
unsigned int queryIntSetting(const string & name, unsigned int def)
|
||||
void Settings::get(PathSet & res, const string & name)
|
||||
{
|
||||
int n;
|
||||
if (!string2Int(querySetting(name, int2String(def)), n) || n < 0)
|
||||
SettingsMap::iterator i = settings.find(name);
|
||||
if (i == settings.end()) return;
|
||||
res.clear();
|
||||
Strings ss = tokenizeString(i->second);
|
||||
res.insert(ss.begin(), ss.end());
|
||||
}
|
||||
|
||||
|
||||
template<class N> void Settings::get(N & res, const string & name)
|
||||
{
|
||||
SettingsMap::iterator i = settings.find(name);
|
||||
if (i == settings.end()) return;
|
||||
if (!string2Int(i->second, res))
|
||||
throw Error(format("configuration setting `%1%' should have an integer value") % name);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
void overrideSetting(const string & name, const Strings & value)
|
||||
string Settings::pack()
|
||||
{
|
||||
if (settingsRead) settings[name] = value;
|
||||
settingsCmdline[name] = value;
|
||||
string s;
|
||||
foreach (SettingsMap::iterator, i, settings) {
|
||||
if (i->first.find('\n') != string::npos ||
|
||||
i->first.find('=') != string::npos ||
|
||||
i->second.find('\n') != string::npos)
|
||||
throw Error("illegal option name/value");
|
||||
s += i->first; s += '='; s += i->second; s += '\n';
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
void reloadSettings()
|
||||
Settings::SettingsMap Settings::getOverrides()
|
||||
{
|
||||
settingsRead = false;
|
||||
settings.clear();
|
||||
return overrides;
|
||||
}
|
||||
|
||||
|
||||
void setDefaultsFromEnvironment()
|
||||
{
|
||||
/* Setup Nix paths. */
|
||||
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
|
||||
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
|
||||
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
|
||||
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
|
||||
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
|
||||
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
|
||||
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
|
||||
|
||||
string subs = getEnv("NIX_SUBSTITUTERS", "default");
|
||||
if (subs == "default") {
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
|
||||
} else
|
||||
substituters = tokenizeString(subs, ":");
|
||||
|
||||
/* Get some settings from the configuration file. */
|
||||
thisSystem = querySetting("system", SYSTEM);
|
||||
maxBuildJobs = queryIntSetting("build-max-jobs", 1);
|
||||
buildCores = queryIntSetting("build-cores", 1);
|
||||
maxSilentTime = queryIntSetting("build-max-silent-time", 0);
|
||||
buildTimeout = queryIntSetting("build-timeout", 0);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -2,118 +2,191 @@
|
|||
|
||||
#include "types.hh"
|
||||
|
||||
#include <map>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Path names. */
|
||||
struct Settings {
|
||||
|
||||
/* nixStore is the directory where we generally store atomic and
|
||||
derived files. */
|
||||
extern string nixStore;
|
||||
typedef std::map<string, string> SettingsMap;
|
||||
|
||||
extern string nixDataDir; /* !!! fix */
|
||||
Settings();
|
||||
|
||||
/* nixLogDir is the directory where we log various operations. */
|
||||
extern string nixLogDir;
|
||||
void processEnvironment();
|
||||
|
||||
/* nixStateDir is the directory where state is stored. */
|
||||
extern string nixStateDir;
|
||||
void loadConfFile();
|
||||
|
||||
/* nixDBPath is the path name of our Berkeley DB environment. */
|
||||
extern string nixDBPath;
|
||||
void set(const string & name, const string & value);
|
||||
|
||||
/* nixConfDir is the directory where configuration files are
|
||||
stored. */
|
||||
extern string nixConfDir;
|
||||
void update();
|
||||
|
||||
/* nixLibexecDir is the directory where internal helper programs are
|
||||
stored. */
|
||||
extern string nixLibexecDir;
|
||||
string pack();
|
||||
|
||||
/* nixBinDir is the directory where the main programs are stored. */
|
||||
extern string nixBinDir;
|
||||
SettingsMap getOverrides();
|
||||
|
||||
/* The directory where we store sources and derived files. */
|
||||
Path nixStore;
|
||||
|
||||
Path nixDataDir; /* !!! fix */
|
||||
|
||||
/* The directory where we log various operations. */
|
||||
Path nixLogDir;
|
||||
|
||||
/* The directory where state is stored. */
|
||||
Path nixStateDir;
|
||||
|
||||
/* The directory where we keep the SQLite database. */
|
||||
Path nixDBPath;
|
||||
|
||||
/* The directory where configuration files are stored. */
|
||||
Path nixConfDir;
|
||||
|
||||
/* The directory where internal helper programs are stored. */
|
||||
Path nixLibexecDir;
|
||||
|
||||
/* The directory where the main programs are stored. */
|
||||
Path nixBinDir;
|
||||
|
||||
/* Whether to keep temporary directories of failed builds. */
|
||||
bool keepFailed;
|
||||
|
||||
/* Whether to keep building subgoals when a sibling (another
|
||||
subgoal of the same goal) fails. */
|
||||
bool keepGoing;
|
||||
|
||||
/* Whether, if we cannot realise the known closure corresponding
|
||||
to a derivation, we should try to normalise the derivation
|
||||
instead. */
|
||||
bool tryFallback;
|
||||
|
||||
/* Verbosity level for build output. */
|
||||
Verbosity buildVerbosity;
|
||||
|
||||
/* Maximum number of parallel build jobs. 0 means unlimited. */
|
||||
unsigned int maxBuildJobs;
|
||||
|
||||
/* Number of CPU cores to utilize in parallel within a build,
|
||||
i.e. by passing this number to Make via '-j'. 0 means that the
|
||||
number of actual CPU cores on the local host ought to be
|
||||
auto-detected. */
|
||||
unsigned int buildCores;
|
||||
|
||||
/* Read-only mode. Don't copy stuff to the store, don't change
|
||||
the database. */
|
||||
bool readOnlyMode;
|
||||
|
||||
/* The canonical system name, as returned by config.guess. */
|
||||
string thisSystem;
|
||||
|
||||
/* The maximum time in seconds that a builer can go without
|
||||
producing any output on stdout/stderr before it is killed. 0
|
||||
means infinity. */
|
||||
time_t maxSilentTime;
|
||||
|
||||
/* The maximum duration in seconds that a builder can run. 0
|
||||
means infinity. */
|
||||
time_t buildTimeout;
|
||||
|
||||
/* The substituters. There are programs that can somehow realise
|
||||
a store path without building, e.g., by downloading it or
|
||||
copying it from a CD. */
|
||||
Paths substituters;
|
||||
|
||||
/* Whether to use build hooks (for distributed builds). Sometimes
|
||||
users want to disable this from the command-line. */
|
||||
bool useBuildHook;
|
||||
|
||||
/* Whether buildDerivations() should print out lines on stderr in
|
||||
a fixed format to allow its progress to be monitored. Each
|
||||
line starts with a "@". The following are defined:
|
||||
|
||||
@ build-started <drvpath> <outpath> <system> <logfile>
|
||||
@ build-failed <drvpath> <outpath> <exitcode> <error text>
|
||||
@ build-succeeded <drvpath> <outpath>
|
||||
@ substituter-started <outpath> <substituter>
|
||||
@ substituter-failed <outpath> <exitcode> <error text>
|
||||
@ substituter-succeeded <outpath>
|
||||
|
||||
Best combined with --no-build-output, otherwise stderr might
|
||||
conceivably contain lines in this format printed by the
|
||||
builders. */
|
||||
bool printBuildTrace;
|
||||
|
||||
/* Amount of reserved space for the garbage collector
|
||||
(/nix/var/nix/db/reserved). */
|
||||
off_t reservedSize;
|
||||
|
||||
/* Whether SQLite should use fsync. */
|
||||
bool fsyncMetadata;
|
||||
|
||||
/* Whether SQLite should use WAL mode. */
|
||||
bool useSQLiteWAL;
|
||||
|
||||
/* Whether to call sync() before registering a path as valid. */
|
||||
bool syncBeforeRegistering;
|
||||
|
||||
/* Whether to use substitutes. */
|
||||
bool useSubstitutes;
|
||||
|
||||
/* The Unix group that contains the build users. */
|
||||
string buildUsersGroup;
|
||||
|
||||
/* Whether to build in chroot. */
|
||||
bool useChroot;
|
||||
|
||||
/* The directories from the host filesystem to be included in the
|
||||
chroot. */
|
||||
PathSet dirsInChroot;
|
||||
|
||||
/* Whether to impersonate a Linux 2.6 machine on newer kernels. */
|
||||
bool impersonateLinux26;
|
||||
|
||||
/* Whether to store build logs. */
|
||||
bool keepLog;
|
||||
|
||||
/* Whether to compress logs. */
|
||||
bool compressLog;
|
||||
|
||||
/* Whether to cache build failures. */
|
||||
bool cacheFailure;
|
||||
|
||||
/* How often (in seconds) to poll for locks. */
|
||||
unsigned int pollInterval;
|
||||
|
||||
/* Whether to check if new GC roots can in fact be found by the
|
||||
garbage collector. */
|
||||
bool checkRootReachability;
|
||||
|
||||
/* Whether the garbage collector should keep outputs of live
|
||||
derivations. */
|
||||
bool gcKeepOutputs;
|
||||
|
||||
/* Whether the garbage collector should keep derivers of live
|
||||
paths. */
|
||||
bool gcKeepDerivations;
|
||||
|
||||
/* Whether to automatically replace files with identical contents
|
||||
with hard links. */
|
||||
bool autoOptimiseStore;
|
||||
|
||||
/* Whether to add derivations as a dependency of user environments
|
||||
(to prevent them from being GCed). */
|
||||
bool envKeepDerivations;
|
||||
|
||||
private:
|
||||
SettingsMap settings, overrides;
|
||||
|
||||
void get(string & res, const string & name);
|
||||
void get(bool & res, const string & name);
|
||||
void get(PathSet & res, const string & name);
|
||||
template<class N> void get(N & res, const string & name);
|
||||
};
|
||||
|
||||
|
||||
/* Misc. global flags. */
|
||||
|
||||
/* Whether to keep temporary directories of failed builds. */
|
||||
extern bool keepFailed;
|
||||
|
||||
/* Whether to keep building subgoals when a sibling (another subgoal
|
||||
of the same goal) fails. */
|
||||
extern bool keepGoing;
|
||||
|
||||
/* Whether, if we cannot realise the known closure corresponding to a
|
||||
derivation, we should try to normalise the derivation instead. */
|
||||
extern bool tryFallback;
|
||||
|
||||
/* Verbosity level for build output. */
|
||||
extern Verbosity buildVerbosity;
|
||||
|
||||
/* Maximum number of parallel build jobs. 0 means unlimited. */
|
||||
extern unsigned int maxBuildJobs;
|
||||
|
||||
/* Number of CPU cores to utilize in parallel within a build, i.e. by passing
|
||||
this number to Make via '-j'. 0 means that the number of actual CPU cores on
|
||||
the local host ought to be auto-detected. */
|
||||
extern unsigned int buildCores;
|
||||
|
||||
/* Read-only mode. Don't copy stuff to the store, don't change the
|
||||
database. */
|
||||
extern bool readOnlyMode;
|
||||
|
||||
/* The canonical system name, as returned by config.guess. */
|
||||
extern string thisSystem;
|
||||
|
||||
/* The maximum time in seconds that a builer can go without producing
|
||||
any output on stdout/stderr before it is killed. 0 means
|
||||
infinity. */
|
||||
extern time_t maxSilentTime;
|
||||
|
||||
/* The maximum duration in seconds that a builder can run. 0 means
|
||||
infinity. */
|
||||
extern time_t buildTimeout;
|
||||
|
||||
/* The substituters. There are programs that can somehow realise a
|
||||
store path without building, e.g., by downloading it or copying it
|
||||
from a CD. */
|
||||
extern Paths substituters;
|
||||
|
||||
/* Whether to use build hooks (for distributed builds). Sometimes
|
||||
users want to disable this from the command-line. */
|
||||
extern bool useBuildHook;
|
||||
|
||||
/* Whether buildDerivations() should print out lines on stderr in a
|
||||
fixed format to allow its progress to be monitored. Each line
|
||||
starts with a "@". The following are defined:
|
||||
|
||||
@ build-started <drvpath> <outpath> <system> <logfile>
|
||||
@ build-failed <drvpath> <outpath> <exitcode> <error text>
|
||||
@ build-succeeded <drvpath> <outpath>
|
||||
@ substituter-started <outpath> <substituter>
|
||||
@ substituter-failed <outpath> <exitcode> <error text>
|
||||
@ substituter-succeeded <outpath>
|
||||
|
||||
Best combined with --no-build-output, otherwise stderr might
|
||||
conceivably contain lines in this format printed by the builders.
|
||||
*/
|
||||
extern bool printBuildTrace;
|
||||
|
||||
|
||||
Strings querySetting(const string & name, const Strings & def);
|
||||
|
||||
string querySetting(const string & name, const string & def);
|
||||
|
||||
bool queryBoolSetting(const string & name, bool def);
|
||||
|
||||
unsigned int queryIntSetting(const string & name, unsigned int def);
|
||||
|
||||
void overrideSetting(const string & name, const Strings & value);
|
||||
|
||||
void reloadSettings();
|
||||
|
||||
void setDefaultsFromEnvironment();
|
||||
// FIXME: don't use a global variable.
|
||||
extern Settings settings;
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "worker-protocol.hh"
|
||||
#include "derivations.hh"
|
||||
#include "immutable.hh"
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
|
||||
|
@ -147,11 +147,11 @@ struct SQLiteStmtUse
|
|||
};
|
||||
|
||||
|
||||
struct SQLiteTxn
|
||||
struct SQLiteTxn
|
||||
{
|
||||
bool active;
|
||||
sqlite3 * db;
|
||||
|
||||
|
||||
SQLiteTxn(sqlite3 * db) : active(false) {
|
||||
this->db = db;
|
||||
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
|
||||
|
@ -159,14 +159,14 @@ struct SQLiteTxn
|
|||
active = true;
|
||||
}
|
||||
|
||||
void commit()
|
||||
void commit()
|
||||
{
|
||||
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "committing transaction");
|
||||
active = false;
|
||||
}
|
||||
|
||||
~SQLiteTxn()
|
||||
|
||||
~SQLiteTxn()
|
||||
{
|
||||
try {
|
||||
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
|
||||
|
@ -181,7 +181,7 @@ struct SQLiteTxn
|
|||
void checkStoreNotSymlink()
|
||||
{
|
||||
if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
|
||||
Path path = nixStore;
|
||||
Path path = settings.nixStore;
|
||||
struct stat st;
|
||||
while (path != "/") {
|
||||
if (lstat(path.c_str(), &st))
|
||||
|
@ -198,29 +198,27 @@ void checkStoreNotSymlink()
|
|||
|
||||
LocalStore::LocalStore(bool reserveSpace)
|
||||
{
|
||||
substitutablePathsLoaded = false;
|
||||
|
||||
schemaPath = nixDBPath + "/schema";
|
||||
|
||||
if (readOnlyMode) {
|
||||
schemaPath = settings.nixDBPath + "/schema";
|
||||
|
||||
if (settings.readOnlyMode) {
|
||||
openDB(false);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Create missing state directories if they don't already exist. */
|
||||
createDirs(nixStore);
|
||||
createDirs(linksDir = nixStore + "/.links");
|
||||
Path profilesDir = nixStateDir + "/profiles";
|
||||
createDirs(nixStateDir + "/profiles");
|
||||
createDirs(nixStateDir + "/temproots");
|
||||
createDirs(nixDBPath);
|
||||
Path gcRootsDir = nixStateDir + "/gcroots";
|
||||
createDirs(settings.nixStore);
|
||||
createDirs(linksDir = settings.nixStore + "/.links");
|
||||
Path profilesDir = settings.nixStateDir + "/profiles";
|
||||
createDirs(settings.nixStateDir + "/profiles");
|
||||
createDirs(settings.nixStateDir + "/temproots");
|
||||
createDirs(settings.nixDBPath);
|
||||
Path gcRootsDir = settings.nixStateDir + "/gcroots";
|
||||
if (!pathExists(gcRootsDir)) {
|
||||
createDirs(gcRootsDir);
|
||||
if (symlink(profilesDir.c_str(), (gcRootsDir + "/profiles").c_str()) == -1)
|
||||
throw SysError(format("creating symlink to `%1%'") % profilesDir);
|
||||
}
|
||||
|
||||
|
||||
checkStoreNotSymlink();
|
||||
|
||||
/* We can't open a SQLite database if the disk is full. Since
|
||||
|
@ -228,13 +226,12 @@ LocalStore::LocalStore(bool reserveSpace)
|
|||
needed, we reserve some dummy space that we can free just
|
||||
before doing a garbage collection. */
|
||||
try {
|
||||
Path reservedPath = nixDBPath + "/reserved";
|
||||
Path reservedPath = settings.nixDBPath + "/reserved";
|
||||
if (reserveSpace) {
|
||||
int reservedSize = queryIntSetting("gc-reserved-space", 1024 * 1024);
|
||||
struct stat st;
|
||||
if (stat(reservedPath.c_str(), &st) == -1 ||
|
||||
st.st_size != reservedSize)
|
||||
writeFile(reservedPath, string(reservedSize, 'X'));
|
||||
st.st_size != settings.reservedSize)
|
||||
writeFile(reservedPath, string(settings.reservedSize, 'X'));
|
||||
}
|
||||
else
|
||||
deletePath(reservedPath);
|
||||
|
@ -244,15 +241,15 @@ LocalStore::LocalStore(bool reserveSpace)
|
|||
/* Acquire the big fat lock in shared mode to make sure that no
|
||||
schema upgrade is in progress. */
|
||||
try {
|
||||
Path globalLockPath = nixDBPath + "/big-lock";
|
||||
Path globalLockPath = settings.nixDBPath + "/big-lock";
|
||||
globalLock = openLockFile(globalLockPath.c_str(), true);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != EACCES) throw;
|
||||
readOnlyMode = true;
|
||||
settings.readOnlyMode = true;
|
||||
openDB(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (!lockFile(globalLock, ltRead, false)) {
|
||||
printMsg(lvlError, "waiting for the big Nix store lock...");
|
||||
lockFile(globalLock, ltRead, true);
|
||||
|
@ -264,20 +261,20 @@ LocalStore::LocalStore(bool reserveSpace)
|
|||
if (curSchema > nixSchemaVersion)
|
||||
throw Error(format("current Nix store schema is version %1%, but I only support %2%")
|
||||
% curSchema % nixSchemaVersion);
|
||||
|
||||
|
||||
else if (curSchema == 0) { /* new store */
|
||||
curSchema = nixSchemaVersion;
|
||||
openDB(true);
|
||||
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
||||
}
|
||||
|
||||
|
||||
else if (curSchema < nixSchemaVersion) {
|
||||
if (curSchema < 5)
|
||||
throw Error(
|
||||
"Your Nix store has a database in Berkeley DB format,\n"
|
||||
"which is no longer supported. To convert to the new format,\n"
|
||||
"please upgrade Nix to version 0.12 first.");
|
||||
|
||||
|
||||
if (!lockFile(globalLock, ltWrite, false)) {
|
||||
printMsg(lvlError, "waiting for exclusive access to the Nix store...");
|
||||
lockFile(globalLock, ltWrite, true);
|
||||
|
@ -293,7 +290,7 @@ LocalStore::LocalStore(bool reserveSpace)
|
|||
|
||||
lockFile(globalLock, ltRead, true);
|
||||
}
|
||||
|
||||
|
||||
else openDB(false);
|
||||
}
|
||||
|
||||
|
@ -327,7 +324,7 @@ int LocalStore::getSchema()
|
|||
void LocalStore::openDB(bool create)
|
||||
{
|
||||
/* Open the Nix database. */
|
||||
if (sqlite3_open_v2((nixDBPath + "/db.sqlite").c_str(), &db.db,
|
||||
if (sqlite3_open_v2((settings.nixDBPath + "/db.sqlite").c_str(), &db.db,
|
||||
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
|
||||
throw Error("cannot open SQLite database");
|
||||
|
||||
|
@ -339,18 +336,18 @@ void LocalStore::openDB(bool create)
|
|||
|
||||
/* !!! check whether sqlite has been built with foreign key
|
||||
support */
|
||||
|
||||
|
||||
/* Whether SQLite should fsync(). "Normal" synchronous mode
|
||||
should be safe enough. If the user asks for it, don't sync at
|
||||
all. This can cause database corruption if the system
|
||||
crashes. */
|
||||
string syncMode = queryBoolSetting("fsync-metadata", true) ? "normal" : "off";
|
||||
string syncMode = settings.fsyncMetadata ? "normal" : "off";
|
||||
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting synchronous mode");
|
||||
|
||||
/* Set the SQLite journal mode. WAL mode is fastest, so it's the
|
||||
default. */
|
||||
string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate";
|
||||
string mode = settings.useSQLiteWAL ? "wal" : "truncate";
|
||||
string prevMode;
|
||||
{
|
||||
SQLiteStmt stmt;
|
||||
|
@ -368,7 +365,7 @@ void LocalStore::openDB(bool create)
|
|||
derivation is done in a single fsync(). */
|
||||
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 8192;", 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting autocheckpoint interval");
|
||||
|
||||
|
||||
/* Initialise the database schema, if necessary. */
|
||||
if (create) {
|
||||
#include "schema.sql.hh"
|
||||
|
@ -423,7 +420,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse)
|
|||
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
|
||||
/* Really make sure that the path is of a supported type. This
|
||||
has already been checked in dumpPath(). */
|
||||
|
@ -451,7 +448,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse)
|
|||
|
||||
/* Mask out all type related bits. */
|
||||
mode_t mode = st.st_mode & ~S_IFMT;
|
||||
|
||||
|
||||
if (mode != 0444 && mode != 0555) {
|
||||
mode = (st.st_mode & S_IFMT)
|
||||
| 0444
|
||||
|
@ -461,7 +458,7 @@ void canonicalisePathMetaData(const Path & path, bool recurse)
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
if (st.st_mtime != mtimeStore) {
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = st.st_atime;
|
||||
|
@ -472,14 +469,14 @@ void canonicalisePathMetaData(const Path & path, bool recurse)
|
|||
if (lutimes(path.c_str(), times) == -1)
|
||||
#else
|
||||
if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
|
||||
#endif
|
||||
#endif
|
||||
throw SysError(format("changing modification time of `%1%'") % path);
|
||||
}
|
||||
|
||||
if (recurse && S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectory(path);
|
||||
foreach (Strings::iterator, i, names)
|
||||
canonicalisePathMetaData(path + "/" + *i, true);
|
||||
foreach (Strings::iterator, i, names)
|
||||
canonicalisePathMetaData(path + "/" + *i, true);
|
||||
}
|
||||
|
||||
makeImmutable(path);
|
||||
|
@ -494,7 +491,7 @@ void canonicalisePathMetaData(const Path & path)
|
|||
be a symlink, since we can't change its ownership. */
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
|
||||
if (st.st_uid != geteuid()) {
|
||||
assert(S_ISLNK(st.st_mode));
|
||||
|
@ -508,7 +505,7 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
|
|||
string drvName = storePathToName(drvPath);
|
||||
assert(isDerivation(drvName));
|
||||
drvName = string(drvName, 0, drvName.size() - drvExtension.size());
|
||||
|
||||
|
||||
if (isFixedOutputDrv(drv)) {
|
||||
DerivationOutputs::const_iterator out = drv.outputs.find("out");
|
||||
if (out == drv.outputs.end())
|
||||
|
@ -532,7 +529,7 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
|
|||
}
|
||||
|
||||
Hash h = hashDerivationModulo(*this, drvCopy);
|
||||
|
||||
|
||||
foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
|
||||
Path outPath = makeOutputPath(i->first, h, drvName);
|
||||
StringPairs::const_iterator j = drv.env.find(i->first);
|
||||
|
@ -568,14 +565,14 @@ unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool che
|
|||
derivation. */
|
||||
if (isDerivation(info.path)) {
|
||||
Derivation drv = parseDerivation(readFile(info.path));
|
||||
|
||||
|
||||
/* Verify that the output paths in the derivation are correct
|
||||
(i.e., follow the scheme for computing output paths from
|
||||
derivations). Note that if this throws an error, then the
|
||||
DB transaction is rolled back, so the path validity
|
||||
registration above is undone. */
|
||||
if (checkOutputs) checkDerivationOutputs(info.path, drv);
|
||||
|
||||
|
||||
foreach (DerivationOutputs::iterator, i, drv.outputs) {
|
||||
SQLiteStmtUse use(stmtAddDerivationOutput);
|
||||
stmtAddDerivationOutput.bind(id);
|
||||
|
@ -681,7 +678,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path)
|
|||
SQLiteStmtUse use1(stmtQueryPathInfo);
|
||||
|
||||
stmtQueryPathInfo.bind(path);
|
||||
|
||||
|
||||
int r = sqlite3_step(stmtQueryPathInfo);
|
||||
if (r == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
|
||||
if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database");
|
||||
|
@ -691,7 +688,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path)
|
|||
const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1);
|
||||
assert(s);
|
||||
info.hash = parseHashField(path, s);
|
||||
|
||||
|
||||
info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2);
|
||||
|
||||
s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3);
|
||||
|
@ -756,13 +753,22 @@ bool LocalStore::isValidPath(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
PathSet LocalStore::queryValidPaths()
|
||||
PathSet LocalStore::queryValidPaths(const PathSet & paths)
|
||||
{
|
||||
PathSet res;
|
||||
foreach (PathSet::const_iterator, i, paths)
|
||||
if (isValidPath(*i)) res.insert(*i);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
PathSet LocalStore::queryAllValidPaths()
|
||||
{
|
||||
SQLiteStmt stmt;
|
||||
stmt.create(db, "select path from ValidPaths");
|
||||
|
||||
|
||||
PathSet res;
|
||||
|
||||
|
||||
int r;
|
||||
while ((r = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||
const char * s = (const char *) sqlite3_column_text(stmt, 0);
|
||||
|
@ -825,10 +831,10 @@ PathSet LocalStore::queryValidDerivers(const Path & path)
|
|||
assert(s);
|
||||
derivers.insert(s);
|
||||
}
|
||||
|
||||
|
||||
if (r != SQLITE_DONE)
|
||||
throwSQLiteError(db, format("error getting valid derivers of `%1%'") % path);
|
||||
|
||||
|
||||
return derivers;
|
||||
}
|
||||
|
||||
|
@ -836,10 +842,10 @@ PathSet LocalStore::queryValidDerivers(const Path & path)
|
|||
PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
||||
{
|
||||
SQLiteTxn txn(db);
|
||||
|
||||
|
||||
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
||||
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
||||
|
||||
|
||||
PathSet outputs;
|
||||
int r;
|
||||
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
||||
|
@ -847,7 +853,7 @@ PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
|||
assert(s);
|
||||
outputs.insert(s);
|
||||
}
|
||||
|
||||
|
||||
if (r != SQLITE_DONE)
|
||||
throwSQLiteError(db, format("error getting outputs of `%1%'") % path);
|
||||
|
||||
|
@ -858,10 +864,10 @@ PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
|||
StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
||||
{
|
||||
SQLiteTxn txn(db);
|
||||
|
||||
|
||||
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
||||
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
||||
|
||||
|
||||
StringSet outputNames;
|
||||
int r;
|
||||
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
||||
|
@ -869,7 +875,7 @@ StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
|||
assert(s);
|
||||
outputNames.insert(s);
|
||||
}
|
||||
|
||||
|
||||
if (r != SQLITE_DONE)
|
||||
throwSQLiteError(db, format("error getting output names of `%1%'") % path);
|
||||
|
||||
|
@ -880,11 +886,11 @@ StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
|||
Path LocalStore::queryPathFromHashPart(const string & hashPart)
|
||||
{
|
||||
if (hashPart.size() != 32) throw Error("invalid hash part");
|
||||
|
||||
|
||||
SQLiteTxn txn(db);
|
||||
|
||||
Path prefix = nixStore + "/" + hashPart;
|
||||
|
||||
Path prefix = settings.nixStore + "/" + hashPart;
|
||||
|
||||
SQLiteStmtUse use(stmtQueryPathFromHashPart);
|
||||
stmtQueryPathFromHashPart.bind(prefix);
|
||||
|
||||
|
@ -900,16 +906,17 @@ Path LocalStore::queryPathFromHashPart(const string & hashPart)
|
|||
void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run)
|
||||
{
|
||||
if (run.pid != -1) return;
|
||||
|
||||
|
||||
debug(format("starting substituter program `%1%'") % substituter);
|
||||
|
||||
Pipe toPipe, fromPipe;
|
||||
|
||||
Pipe toPipe, fromPipe, errorPipe;
|
||||
|
||||
toPipe.create();
|
||||
fromPipe.create();
|
||||
errorPipe.create();
|
||||
|
||||
run.pid = fork();
|
||||
|
||||
|
||||
switch (run.pid) {
|
||||
|
||||
case -1:
|
||||
|
@ -923,13 +930,19 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter &
|
|||
library named libutil. As a result, substituters
|
||||
written in Perl (i.e. all of them) fail. */
|
||||
unsetenv("DYLD_LIBRARY_PATH");
|
||||
|
||||
|
||||
/* Pass configuration options (including those overriden
|
||||
with --option) to the substituter. */
|
||||
setenv("_NIX_OPTIONS", settings.pack().c_str(), 1);
|
||||
|
||||
fromPipe.readSide.close();
|
||||
toPipe.writeSide.close();
|
||||
if (dup2(toPipe.readSide, STDIN_FILENO) == -1)
|
||||
throw SysError("dupping stdin");
|
||||
if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1)
|
||||
throw SysError("dupping stdout");
|
||||
if (dup2(errorPipe.writeSide, STDERR_FILENO) == -1)
|
||||
throw SysError("dupping stderr");
|
||||
closeMostFDs(set<int>());
|
||||
execl(substituter.c_str(), substituter.c_str(), "--query", NULL);
|
||||
throw SysError(format("executing `%1%'") % substituter);
|
||||
|
@ -940,9 +953,10 @@ void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter &
|
|||
}
|
||||
|
||||
/* Parent. */
|
||||
|
||||
|
||||
run.to = toPipe.writeSide.borrow();
|
||||
run.from = fromPipe.readSide.borrow();
|
||||
run.error = errorPipe.readSide.borrow();
|
||||
}
|
||||
|
||||
|
||||
|
@ -955,50 +969,79 @@ template<class T> T getIntLine(int fd)
|
|||
}
|
||||
|
||||
|
||||
bool LocalStore::hasSubstitutes(const Path & path)
|
||||
PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
|
||||
{
|
||||
foreach (Paths::iterator, i, substituters) {
|
||||
PathSet res;
|
||||
foreach (Paths::iterator, i, settings.substituters) {
|
||||
if (res.size() == paths.size()) break;
|
||||
RunningSubstituter & run(runningSubstituters[*i]);
|
||||
startSubstituter(*i, run);
|
||||
writeLine(run.to, "have\n" + path);
|
||||
if (getIntLine<int>(run.from)) return true;
|
||||
string s = "have ";
|
||||
foreach (PathSet::const_iterator, j, paths)
|
||||
if (res.find(*j) == res.end()) { s += *j; s += " "; }
|
||||
writeLine(run.to, s);
|
||||
while (true) {
|
||||
/* FIXME: we only read stderr when an error occurs, so
|
||||
substituters should only write (short) messages to
|
||||
stderr when they fail. I.e. they shouldn't write debug
|
||||
output. */
|
||||
try {
|
||||
Path path = readLine(run.from);
|
||||
if (path == "") break;
|
||||
res.insert(path);
|
||||
} catch (EndOfFile e) {
|
||||
throw Error(format("substituter `%1%' failed: %2%") % *i % chomp(drainFD(run.error)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool LocalStore::querySubstitutablePathInfo(const Path & substituter,
|
||||
const Path & path, SubstitutablePathInfo & info)
|
||||
void LocalStore::querySubstitutablePathInfos(const Path & substituter,
|
||||
PathSet & paths, SubstitutablePathInfos & infos)
|
||||
{
|
||||
RunningSubstituter & run(runningSubstituters[substituter]);
|
||||
startSubstituter(substituter, run);
|
||||
|
||||
writeLine(run.to, "info\n" + path);
|
||||
string s = "info ";
|
||||
foreach (PathSet::const_iterator, i, paths)
|
||||
if (infos.find(*i) == infos.end()) { s += *i; s += " "; }
|
||||
writeLine(run.to, s);
|
||||
|
||||
if (!getIntLine<int>(run.from)) return false;
|
||||
|
||||
info.deriver = readLine(run.from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
int nrRefs = getIntLine<int>(run.from);
|
||||
while (nrRefs--) {
|
||||
Path p = readLine(run.from);
|
||||
assertStorePath(p);
|
||||
info.references.insert(p);
|
||||
while (true) {
|
||||
try {
|
||||
Path path = readLine(run.from);
|
||||
if (path == "") break;
|
||||
if (paths.find(path) == paths.end())
|
||||
throw Error(format("got unexpected path `%1%' from substituter") % path);
|
||||
paths.erase(path);
|
||||
SubstitutablePathInfo & info(infos[path]);
|
||||
info.deriver = readLine(run.from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
int nrRefs = getIntLine<int>(run.from);
|
||||
while (nrRefs--) {
|
||||
Path p = readLine(run.from);
|
||||
assertStorePath(p);
|
||||
info.references.insert(p);
|
||||
}
|
||||
info.downloadSize = getIntLine<long long>(run.from);
|
||||
info.narSize = getIntLine<long long>(run.from);
|
||||
} catch (EndOfFile e) {
|
||||
throw Error(format("substituter `%1%' failed: %2%") % substituter % chomp(drainFD(run.error)));
|
||||
}
|
||||
}
|
||||
info.downloadSize = getIntLine<long long>(run.from);
|
||||
info.narSize = getIntLine<long long>(run.from);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool LocalStore::querySubstitutablePathInfo(const Path & path,
|
||||
SubstitutablePathInfo & info)
|
||||
void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos)
|
||||
{
|
||||
foreach (Paths::iterator, i, substituters)
|
||||
if (querySubstitutablePathInfo(*i, path, info)) return true;
|
||||
return false;
|
||||
PathSet todo = paths;
|
||||
foreach (Paths::iterator, i, settings.substituters) {
|
||||
if (todo.empty()) break;
|
||||
querySubstitutablePathInfos(*i, todo, infos);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1018,17 +1061,16 @@ void LocalStore::registerValidPath(const ValidPathInfo & info)
|
|||
|
||||
void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
||||
{
|
||||
/* sqlite will fsync by default, but the new valid paths may not be fsync-ed.
|
||||
/* SQLite will fsync by default, but the new valid paths may not be fsync-ed.
|
||||
* So some may want to fsync them before registering the validity, at the
|
||||
* expense of some speed of the path registering operation. */
|
||||
if (queryBoolSetting("sync-before-registering", false))
|
||||
sync();
|
||||
if (settings.syncBeforeRegistering) sync();
|
||||
|
||||
while (1) {
|
||||
try {
|
||||
SQLiteTxn txn(db);
|
||||
PathSet paths;
|
||||
|
||||
|
||||
foreach (ValidPathInfos::const_iterator, i, infos) {
|
||||
assert(i->hash.type == htSHA256);
|
||||
/* !!! Maybe the registration info should be updated if the
|
||||
|
@ -1119,7 +1161,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
|||
hash = hashPath(htSHA256, dstPath);
|
||||
|
||||
optimisePath(dstPath); // FIXME: combine with hashPath()
|
||||
|
||||
|
||||
ValidPathInfo info;
|
||||
info.path = dstPath;
|
||||
info.hash = hash.first;
|
||||
|
@ -1144,7 +1186,7 @@ Path LocalStore::addToStore(const Path & _srcPath,
|
|||
method for very large paths, but `copyPath' is mainly used for
|
||||
small files. */
|
||||
StringSink sink;
|
||||
if (recursive)
|
||||
if (recursive)
|
||||
dumpPath(srcPath, sink, filter);
|
||||
else
|
||||
sink.s = readFile(srcPath);
|
||||
|
@ -1157,7 +1199,7 @@ Path LocalStore::addTextToStore(const string & name, const string & s,
|
|||
const PathSet & references)
|
||||
{
|
||||
Path dstPath = computeStorePathForText(name, s, references);
|
||||
|
||||
|
||||
addTempRoot(dstPath);
|
||||
|
||||
if (!isValidPath(dstPath)) {
|
||||
|
@ -1175,7 +1217,7 @@ Path LocalStore::addTextToStore(const string & name, const string & s,
|
|||
HashResult hash = hashPath(htSHA256, dstPath);
|
||||
|
||||
optimisePath(dstPath);
|
||||
|
||||
|
||||
ValidPathInfo info;
|
||||
info.path = dstPath;
|
||||
info.hash = hash.first;
|
||||
|
@ -1233,7 +1275,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
|
|||
throw Error(format("path `%1%' is not valid") % path);
|
||||
|
||||
HashAndWriteSink hashAndWriteSink(sink);
|
||||
|
||||
|
||||
dumpPath(path, hashAndWriteSink);
|
||||
|
||||
/* Refuse to export paths that have changed. This prevents
|
||||
|
@ -1248,7 +1290,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
|
|||
writeInt(EXPORT_MAGIC, hashAndWriteSink);
|
||||
|
||||
writeString(path, hashAndWriteSink);
|
||||
|
||||
|
||||
PathSet references;
|
||||
queryReferences(path, references);
|
||||
writeStrings(references, hashAndWriteSink);
|
||||
|
@ -1258,15 +1300,15 @@ void LocalStore::exportPath(const Path & path, bool sign,
|
|||
|
||||
if (sign) {
|
||||
Hash hash = hashAndWriteSink.currentHash();
|
||||
|
||||
|
||||
writeInt(1, hashAndWriteSink);
|
||||
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmp(tmpDir);
|
||||
Path hashFile = tmpDir + "/hash";
|
||||
writeFile(hashFile, printHash(hash));
|
||||
|
||||
Path secretKey = nixConfDir + "/signing-key.sec";
|
||||
Path secretKey = settings.nixConfDir + "/signing-key.sec";
|
||||
checkSecrecy(secretKey);
|
||||
|
||||
Strings args;
|
||||
|
@ -1279,7 +1321,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
|
|||
string signature = runProgram(OPENSSL_PATH, true, args);
|
||||
|
||||
writeString(signature, hashAndWriteSink);
|
||||
|
||||
|
||||
} else
|
||||
writeInt(0, hashAndWriteSink);
|
||||
}
|
||||
|
@ -1312,7 +1354,7 @@ Path LocalStore::createTempDirInStore()
|
|||
/* There is a slight possibility that `tmpDir' gets deleted by
|
||||
the GC between createTempDir() and addTempRoot(), so repeat
|
||||
until `tmpDir' exists. */
|
||||
tmpDir = createTempDir(nixStore);
|
||||
tmpDir = createTempDir(settings.nixStore);
|
||||
addTempRoot(tmpDir);
|
||||
} while (!pathExists(tmpDir));
|
||||
return tmpDir;
|
||||
|
@ -1322,7 +1364,7 @@ Path LocalStore::createTempDirInStore()
|
|||
Path LocalStore::importPath(bool requireSignature, Source & source)
|
||||
{
|
||||
HashAndReadSource hashAndReadSource(source);
|
||||
|
||||
|
||||
/* We don't yet know what store path this archive contains (the
|
||||
store path follows the archive data proper), and besides, we
|
||||
don't know yet whether the signature is valid. */
|
||||
|
@ -1352,7 +1394,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
|
||||
if (requireSignature && !haveSignature)
|
||||
throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
|
||||
|
||||
|
||||
if (haveSignature) {
|
||||
string signature = readString(hashAndReadSource);
|
||||
|
||||
|
@ -1364,7 +1406,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
args.push_back("rsautl");
|
||||
args.push_back("-verify");
|
||||
args.push_back("-inkey");
|
||||
args.push_back(nixConfDir + "/signing-key.pub");
|
||||
args.push_back(settings.nixConfDir + "/signing-key.pub");
|
||||
args.push_back("-pubin");
|
||||
args.push_back("-in");
|
||||
args.push_back(sigFile);
|
||||
|
@ -1406,13 +1448,13 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
% unpacked % dstPath);
|
||||
|
||||
canonicalisePathMetaData(dstPath);
|
||||
|
||||
|
||||
/* !!! if we were clever, we could prevent the hashPath()
|
||||
here. */
|
||||
HashResult hash = hashPath(htSHA256, dstPath);
|
||||
|
||||
optimisePath(dstPath); // FIXME: combine with hashPath()
|
||||
|
||||
|
||||
ValidPathInfo info;
|
||||
info.path = dstPath;
|
||||
info.hash = hash.first;
|
||||
|
@ -1421,10 +1463,10 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
info.deriver = deriver != "" && isValidPath(deriver) ? deriver : "";
|
||||
registerValidPath(info);
|
||||
}
|
||||
|
||||
|
||||
outputLock.setDeletion(true);
|
||||
}
|
||||
|
||||
|
||||
return dstPath;
|
||||
}
|
||||
|
||||
|
@ -1472,14 +1514,14 @@ void LocalStore::verifyStore(bool checkContents)
|
|||
|
||||
/* Acquire the global GC lock to prevent a garbage collection. */
|
||||
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
||||
|
||||
Paths entries = readDirectory(nixStore);
|
||||
|
||||
Paths entries = readDirectory(settings.nixStore);
|
||||
PathSet store(entries.begin(), entries.end());
|
||||
|
||||
/* Check whether all valid paths actually exist. */
|
||||
printMsg(lvlInfo, "checking path existence...");
|
||||
|
||||
PathSet validPaths2 = queryValidPaths(), validPaths, done;
|
||||
PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
|
||||
|
||||
foreach (PathSet::iterator, i, validPaths2)
|
||||
verifyPath(*i, store, done, validPaths);
|
||||
|
@ -1501,7 +1543,7 @@ void LocalStore::verifyStore(bool checkContents)
|
|||
/* Check the content hash (optionally - slow). */
|
||||
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
|
||||
HashResult current = hashPath(info.hash.type, *i);
|
||||
|
||||
|
||||
if (info.hash != nullHash && info.hash != current.first) {
|
||||
printMsg(lvlError, format("path `%1%' was modified! "
|
||||
"expected hash `%2%', got `%3%'")
|
||||
|
@ -1516,18 +1558,18 @@ void LocalStore::verifyStore(bool checkContents)
|
|||
info.hash = current.first;
|
||||
update = true;
|
||||
}
|
||||
|
||||
|
||||
/* Fill in missing narSize fields (from old stores). */
|
||||
if (info.narSize == 0) {
|
||||
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
|
||||
info.narSize = current.second;
|
||||
update = true;
|
||||
update = true;
|
||||
}
|
||||
|
||||
if (update) updatePathInfo(info);
|
||||
|
||||
}
|
||||
|
||||
|
||||
} catch (Error & e) {
|
||||
/* It's possible that the path got GC'ed, so ignore
|
||||
errors on invalid paths. */
|
||||
|
@ -1543,7 +1585,7 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
|||
PathSet & done, PathSet & validPaths)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
|
||||
if (done.find(path) != done.end()) return;
|
||||
done.insert(path);
|
||||
|
||||
|
@ -1570,10 +1612,10 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
|||
invalidatePath(path);
|
||||
} else
|
||||
printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path);
|
||||
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
validPaths.insert(path);
|
||||
}
|
||||
|
||||
|
@ -1583,9 +1625,9 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
|||
PathSet LocalStore::queryValidPathsOld()
|
||||
{
|
||||
PathSet paths;
|
||||
Strings entries = readDirectory(nixDBPath + "/info");
|
||||
Strings entries = readDirectory(settings.nixDBPath + "/info");
|
||||
foreach (Strings::iterator, i, entries)
|
||||
if (i->at(0) != '.') paths.insert(nixStore + "/" + *i);
|
||||
if (i->at(0) != '.') paths.insert(settings.nixStore + "/" + *i);
|
||||
return paths;
|
||||
}
|
||||
|
||||
|
@ -1597,7 +1639,7 @@ ValidPathInfo LocalStore::queryPathInfoOld(const Path & path)
|
|||
|
||||
/* Read the info file. */
|
||||
string baseName = baseNameOf(path);
|
||||
Path infoFile = (format("%1%/info/%2%") % nixDBPath % baseName).str();
|
||||
Path infoFile = (format("%1%/info/%2%") % settings.nixDBPath % baseName).str();
|
||||
if (!pathExists(infoFile))
|
||||
throw Error(format("path `%1%' is not valid") % path);
|
||||
string info = readFile(infoFile);
|
||||
|
@ -1639,14 +1681,14 @@ void LocalStore::upgradeStore6()
|
|||
PathSet validPaths = queryValidPathsOld();
|
||||
|
||||
SQLiteTxn txn(db);
|
||||
|
||||
|
||||
foreach (PathSet::iterator, i, validPaths) {
|
||||
addValidPath(queryPathInfoOld(*i), false);
|
||||
std::cerr << ".";
|
||||
}
|
||||
|
||||
std::cerr << "|";
|
||||
|
||||
|
||||
foreach (PathSet::iterator, i, validPaths) {
|
||||
ValidPathInfo info = queryPathInfoOld(*i);
|
||||
unsigned long long referrer = queryValidPathId(*i);
|
||||
|
|
|
@ -45,7 +45,7 @@ struct OptimiseStats
|
|||
struct RunningSubstituter
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
AutoCloseFD to, from, error;
|
||||
};
|
||||
|
||||
|
||||
|
@ -75,19 +75,16 @@ struct SQLiteStmt
|
|||
void bind64(long long value);
|
||||
void bind();
|
||||
};
|
||||
|
||||
|
||||
|
||||
class LocalStore : public StoreAPI
|
||||
{
|
||||
private:
|
||||
bool substitutablePathsLoaded;
|
||||
PathSet substitutablePaths;
|
||||
|
||||
typedef std::map<Path, RunningSubstituter> RunningSubstituters;
|
||||
RunningSubstituters runningSubstituters;
|
||||
|
||||
Path linksDir;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
/* Initialise the local store, upgrading the schema if
|
||||
|
@ -95,13 +92,15 @@ public:
|
|||
LocalStore(bool reserveSpace = true);
|
||||
|
||||
~LocalStore();
|
||||
|
||||
|
||||
/* Implementations of abstract store API methods. */
|
||||
|
||||
|
||||
bool isValidPath(const Path & path);
|
||||
|
||||
PathSet queryValidPaths();
|
||||
|
||||
PathSet queryValidPaths(const PathSet & paths);
|
||||
|
||||
PathSet queryAllValidPaths();
|
||||
|
||||
ValidPathInfo queryPathInfo(const Path & path);
|
||||
|
||||
Hash queryPathHash(const Path & path);
|
||||
|
@ -121,19 +120,17 @@ public:
|
|||
PathSet queryDerivationOutputs(const Path & path);
|
||||
|
||||
StringSet queryDerivationOutputNames(const Path & path);
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart);
|
||||
|
||||
PathSet querySubstitutablePaths();
|
||||
|
||||
bool hasSubstitutes(const Path & path);
|
||||
|
||||
bool querySubstitutablePathInfo(const Path & path,
|
||||
SubstitutablePathInfo & info);
|
||||
|
||||
bool querySubstitutablePathInfo(const Path & substituter,
|
||||
const Path & path, SubstitutablePathInfo & info);
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart);
|
||||
|
||||
PathSet querySubstitutablePaths(const PathSet & paths);
|
||||
|
||||
void querySubstitutablePathInfos(const Path & substituter,
|
||||
PathSet & paths, SubstitutablePathInfos & infos);
|
||||
|
||||
void querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos);
|
||||
|
||||
Path addToStore(const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter);
|
||||
|
@ -152,7 +149,7 @@ public:
|
|||
Sink & sink);
|
||||
|
||||
Paths importPaths(bool requireSignature, Source & source);
|
||||
|
||||
|
||||
void buildPaths(const PathSet & paths);
|
||||
|
||||
void ensurePath(const Path & path);
|
||||
|
@ -160,7 +157,7 @@ public:
|
|||
void addTempRoot(const Path & path);
|
||||
|
||||
void addIndirectRoot(const Path & path);
|
||||
|
||||
|
||||
void syncWithGC();
|
||||
|
||||
Roots findRoots();
|
||||
|
@ -173,7 +170,7 @@ public:
|
|||
|
||||
/* Optimise a single store path. */
|
||||
void optimisePath(const Path & path);
|
||||
|
||||
|
||||
/* Check the integrity of the Nix store. */
|
||||
void verifyStore(bool checkContents);
|
||||
|
||||
|
@ -232,18 +229,18 @@ private:
|
|||
unsigned long long queryValidPathId(const Path & path);
|
||||
|
||||
unsigned long long addValidPath(const ValidPathInfo & info, bool checkOutputs = true);
|
||||
|
||||
|
||||
void addReference(unsigned long long referrer, unsigned long long reference);
|
||||
|
||||
|
||||
void appendReferrer(const Path & from, const Path & to, bool lock);
|
||||
|
||||
|
||||
void rewriteReferrers(const Path & path, bool purge, PathSet referrers);
|
||||
|
||||
void invalidatePath(const Path & path);
|
||||
|
||||
/* Delete a path from the Nix store. */
|
||||
void invalidatePathChecked(const Path & path);
|
||||
|
||||
|
||||
void verifyPath(const Path & path, const PathSet & store,
|
||||
PathSet & done, PathSet & validPaths);
|
||||
|
||||
|
@ -256,14 +253,14 @@ private:
|
|||
struct GCState;
|
||||
|
||||
void deleteGarbage(GCState & state, const Path & path);
|
||||
|
||||
|
||||
bool tryToDelete(GCState & state, const Path & path);
|
||||
|
||||
|
||||
bool isActiveTempFile(const GCState & state,
|
||||
const Path & path, const string & suffix);
|
||||
|
||||
|
||||
int openGCLock(LockType lockType);
|
||||
|
||||
|
||||
void removeUnusedLinks(const GCState & state);
|
||||
|
||||
void startSubstituter(const Path & substituter,
|
||||
|
@ -272,7 +269,7 @@ private:
|
|||
Path createTempDirInStore();
|
||||
|
||||
Path importPath(bool requireSignature, Source & source);
|
||||
|
||||
|
||||
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
|
||||
|
||||
void optimisePath_(OptimiseStats & stats, const Path & path);
|
||||
|
@ -293,9 +290,6 @@ void canonicalisePathMetaData(const Path & path, bool recurse);
|
|||
|
||||
MakeError(PathInUse, Error);
|
||||
|
||||
/* Whether we are in build users mode. */
|
||||
bool haveBuildUsers();
|
||||
|
||||
/* Whether we are root. */
|
||||
bool amPrivileged();
|
||||
|
||||
|
@ -307,5 +301,5 @@ void getOwnership(const Path & path);
|
|||
void deletePathWrapped(const Path & path, unsigned long long & bytesFreed);
|
||||
|
||||
void deletePathWrapped(const Path & path);
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -52,68 +52,118 @@ void queryMissing(StoreAPI & store, const PathSet & targets,
|
|||
unsigned long long & downloadSize, unsigned long long & narSize)
|
||||
{
|
||||
downloadSize = narSize = 0;
|
||||
|
||||
|
||||
PathSet todo(targets.begin(), targets.end()), done;
|
||||
|
||||
while (!todo.empty()) {
|
||||
Path p = *(todo.begin());
|
||||
todo.erase(p);
|
||||
if (done.find(p) != done.end()) continue;
|
||||
done.insert(p);
|
||||
/* Getting substitute info has high latency when using the binary
|
||||
cache substituter. Thus it's essential to do substitute
|
||||
queries in parallel as much as possible. To accomplish this
|
||||
we do the following:
|
||||
|
||||
if (isDerivation(p)) {
|
||||
if (!store.isValidPath(p)) {
|
||||
unknown.insert(p);
|
||||
continue;
|
||||
- For all paths still to be processed (‘todo’), we add all
|
||||
paths for which we need info to the set ‘query’. For an
|
||||
unbuilt derivation this is the output paths; otherwise, it's
|
||||
the path itself.
|
||||
|
||||
- We get info about all paths in ‘query’ in parallel.
|
||||
|
||||
- We process the results and add new items to ‘todo’ if
|
||||
necessary. E.g. if a path is substitutable, then we need to
|
||||
get info on its references.
|
||||
|
||||
- Repeat until ‘todo’ is empty.
|
||||
*/
|
||||
|
||||
while (!todo.empty()) {
|
||||
|
||||
PathSet query, todoDrv, todoNonDrv;
|
||||
|
||||
foreach (PathSet::iterator, i, todo) {
|
||||
if (done.find(*i) != done.end()) continue;
|
||||
done.insert(*i);
|
||||
|
||||
if (isDerivation(*i)) {
|
||||
if (!store.isValidPath(*i)) {
|
||||
// FIXME: we could try to substitute p.
|
||||
unknown.insert(*i);
|
||||
continue;
|
||||
}
|
||||
Derivation drv = derivationFromPath(store, *i);
|
||||
|
||||
PathSet invalid;
|
||||
foreach (DerivationOutputs::iterator, j, drv.outputs)
|
||||
if (!store.isValidPath(j->second.path)) invalid.insert(j->second.path);
|
||||
if (invalid.empty()) continue;
|
||||
|
||||
todoDrv.insert(*i);
|
||||
if (settings.useSubstitutes) query.insert(invalid.begin(), invalid.end());
|
||||
}
|
||||
Derivation drv = derivationFromPath(store, p);
|
||||
|
||||
else {
|
||||
if (store.isValidPath(*i)) continue;
|
||||
query.insert(*i);
|
||||
todoNonDrv.insert(*i);
|
||||
}
|
||||
}
|
||||
|
||||
todo.clear();
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
store.querySubstitutablePathInfos(query, infos);
|
||||
|
||||
foreach (PathSet::iterator, i, todoDrv) {
|
||||
// FIXME: cache this
|
||||
Derivation drv = derivationFromPath(store, *i);
|
||||
|
||||
bool mustBuild = false;
|
||||
foreach (DerivationOutputs::iterator, i, drv.outputs)
|
||||
if (!store.isValidPath(i->second.path) &&
|
||||
!(queryBoolSetting("build-use-substitutes", true) && store.hasSubstitutes(i->second.path)))
|
||||
mustBuild = true;
|
||||
if (settings.useSubstitutes) {
|
||||
foreach (DerivationOutputs::iterator, j, drv.outputs)
|
||||
if (!store.isValidPath(j->second.path) &&
|
||||
infos.find(j->second.path) == infos.end())
|
||||
mustBuild = true;
|
||||
} else
|
||||
mustBuild = true;
|
||||
|
||||
if (mustBuild) {
|
||||
willBuild.insert(p);
|
||||
willBuild.insert(*i);
|
||||
todo.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||
foreach (DerivationInputs::iterator, i, drv.inputDrvs)
|
||||
todo.insert(i->first);
|
||||
} else
|
||||
} else
|
||||
foreach (DerivationOutputs::iterator, i, drv.outputs)
|
||||
todo.insert(i->second.path);
|
||||
todoNonDrv.insert(i->second.path);
|
||||
}
|
||||
|
||||
else {
|
||||
if (store.isValidPath(p)) continue;
|
||||
SubstitutablePathInfo info;
|
||||
if (store.querySubstitutablePathInfo(p, info)) {
|
||||
willSubstitute.insert(p);
|
||||
downloadSize += info.downloadSize;
|
||||
narSize += info.narSize;
|
||||
todo.insert(info.references.begin(), info.references.end());
|
||||
foreach (PathSet::iterator, i, todoNonDrv) {
|
||||
done.insert(*i);
|
||||
SubstitutablePathInfos::iterator info = infos.find(*i);
|
||||
if (info != infos.end()) {
|
||||
willSubstitute.insert(*i);
|
||||
downloadSize += info->second.downloadSize;
|
||||
narSize += info->second.narSize;
|
||||
todo.insert(info->second.references.begin(), info->second.references.end());
|
||||
} else
|
||||
unknown.insert(p);
|
||||
unknown.insert(*i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void dfsVisit(StoreAPI & store, const PathSet & paths,
|
||||
const Path & path, PathSet & visited, Paths & sorted,
|
||||
PathSet & parents)
|
||||
{
|
||||
if (parents.find(path) != parents.end())
|
||||
throw BuildError(format("cycle detected in the references of `%1%'") % path);
|
||||
|
||||
|
||||
if (visited.find(path) != visited.end()) return;
|
||||
visited.insert(path);
|
||||
parents.insert(path);
|
||||
|
||||
|
||||
PathSet references;
|
||||
if (store.isValidPath(path))
|
||||
store.queryReferences(path, references);
|
||||
|
||||
|
||||
foreach (PathSet::iterator, i, references)
|
||||
/* Don't traverse into paths that don't exist. That can
|
||||
happen due to substitutes for non-existent paths. */
|
||||
|
|
|
@ -19,7 +19,7 @@ static void makeWritable(const Path & path)
|
|||
{
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
if (S_ISDIR(st.st_mode) || S_ISREG(st.st_mode)) makeMutable(path);
|
||||
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError(format("changing writability of `%1%'") % path);
|
||||
|
@ -57,22 +57,22 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
|||
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectory(path);
|
||||
foreach (Strings::iterator, i, names)
|
||||
optimisePath_(stats, path + "/" + *i);
|
||||
foreach (Strings::iterator, i, names)
|
||||
optimisePath_(stats, path + "/" + *i);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/* We can hard link regular files and maybe symlinks. */
|
||||
if (!S_ISREG(st.st_mode)
|
||||
#if CAN_LINK_SYMLINK
|
||||
&& !S_ISLNK(st.st_mode)
|
||||
#endif
|
||||
) return;
|
||||
|
||||
|
||||
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||
modified, in particular when running programs as root under
|
||||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
|
@ -113,14 +113,14 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
|||
current file with a hard link to that file. */
|
||||
struct stat stLink;
|
||||
if (lstat(linkPath.c_str(), &stLink))
|
||||
throw SysError(format("getting attributes of path `%1%'") % linkPath);
|
||||
|
||||
throw SysError(format("getting attributes of path `%1%'") % linkPath);
|
||||
|
||||
stats.sameContents++;
|
||||
if (st.st_ino == stLink.st_ino) {
|
||||
printMsg(lvlDebug, format("`%1%' is already linked to `%2%'") % path % linkPath);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
printMsg(lvlTalkative, format("linking `%1%' to `%2%'") % path % linkPath);
|
||||
|
||||
/* Make the containing directory writable, but only if it's not
|
||||
|
@ -128,7 +128,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
|||
permissions). */
|
||||
bool mustToggle = !isStorePath(path);
|
||||
if (mustToggle) makeWritable(dirOf(path));
|
||||
|
||||
|
||||
/* When we're done, make the directory read-only again and reset
|
||||
its timestamp back to 0. */
|
||||
MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
|
||||
|
@ -149,7 +149,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
|||
MakeImmutable mk1(linkPath);
|
||||
|
||||
Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
|
||||
% nixStore % getpid() % rand()).str();
|
||||
% settings.nixStore % getpid() % rand()).str();
|
||||
|
||||
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
|
||||
if (errno == EMLINK) {
|
||||
|
@ -194,7 +194,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
|||
|
||||
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||
{
|
||||
PathSet paths = queryValidPaths();
|
||||
PathSet paths = queryAllValidPaths();
|
||||
|
||||
foreach (PathSet::iterator, i, paths) {
|
||||
addTempRoot(*i);
|
||||
|
@ -207,10 +207,8 @@ void LocalStore::optimiseStore(OptimiseStats & stats)
|
|||
|
||||
void LocalStore::optimisePath(const Path & path)
|
||||
{
|
||||
if (queryBoolSetting("auto-optimise-store", true)) {
|
||||
OptimiseStats stats;
|
||||
optimisePath_(stats, path);
|
||||
}
|
||||
OptimiseStats stats;
|
||||
if (settings.autoOptimiseStore) optimisePath_(stats, path);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ void RemoteStore::openConnection(bool reserveSpace)
|
|||
else
|
||||
throw Error(format("invalid setting for NIX_REMOTE, `%1%'")
|
||||
% remoteMode);
|
||||
|
||||
|
||||
from.fd = fdSocket;
|
||||
to.fd = fdSocket;
|
||||
|
||||
|
@ -100,18 +100,18 @@ void RemoteStore::forkSlave()
|
|||
/* Start the worker. */
|
||||
Path worker = getEnv("NIX_WORKER");
|
||||
if (worker == "")
|
||||
worker = nixBinDir + "/nix-worker";
|
||||
worker = settings.nixBinDir + "/nix-worker";
|
||||
|
||||
child = fork();
|
||||
|
||||
|
||||
switch (child) {
|
||||
|
||||
|
||||
case -1:
|
||||
throw SysError("unable to fork");
|
||||
|
||||
case 0:
|
||||
try { /* child */
|
||||
|
||||
|
||||
if (dup2(fdChild, STDOUT_FILENO) == -1)
|
||||
throw SysError("dupping write side");
|
||||
|
||||
|
@ -124,7 +124,7 @@ void RemoteStore::forkSlave()
|
|||
execlp(worker.c_str(), worker.c_str(), "--slave", NULL);
|
||||
|
||||
throw SysError(format("executing `%1%'") % worker);
|
||||
|
||||
|
||||
} catch (std::exception & e) {
|
||||
std::cerr << format("child error: %1%\n") % e.what();
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ void RemoteStore::connectToDaemon()
|
|||
if (fdSocket == -1)
|
||||
throw SysError("cannot create Unix domain socket");
|
||||
|
||||
string socketPath = nixStateDir + DEFAULT_SOCKET_PATH;
|
||||
string socketPath = settings.nixStateDir + DEFAULT_SOCKET_PATH;
|
||||
|
||||
/* Urgh, sockaddr_un allows path names of only 108 characters. So
|
||||
chdir to the socket directory so that we can pass a relative
|
||||
|
@ -150,16 +150,16 @@ void RemoteStore::connectToDaemon()
|
|||
applications... */
|
||||
AutoCloseFD fdPrevDir = open(".", O_RDONLY);
|
||||
if (fdPrevDir == -1) throw SysError("couldn't open current directory");
|
||||
chdir(dirOf(socketPath).c_str());
|
||||
chdir(dirOf(socketPath).c_str());
|
||||
Path socketPathRel = "./" + baseNameOf(socketPath);
|
||||
|
||||
|
||||
struct sockaddr_un addr;
|
||||
addr.sun_family = AF_UNIX;
|
||||
if (socketPathRel.size() >= sizeof(addr.sun_path))
|
||||
throw Error(format("socket path `%1%' is too long") % socketPathRel);
|
||||
using namespace std;
|
||||
strcpy(addr.sun_path, socketPathRel.c_str());
|
||||
|
||||
|
||||
if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1)
|
||||
throw SysError(format("cannot connect to daemon at `%1%'") % socketPath);
|
||||
|
||||
|
@ -184,24 +184,34 @@ RemoteStore::~RemoteStore()
|
|||
void RemoteStore::setOptions()
|
||||
{
|
||||
writeInt(wopSetOptions, to);
|
||||
writeInt(keepFailed, to);
|
||||
writeInt(keepGoing, to);
|
||||
writeInt(tryFallback, to);
|
||||
|
||||
writeInt(settings.keepFailed, to);
|
||||
writeInt(settings.keepGoing, to);
|
||||
writeInt(settings.tryFallback, to);
|
||||
writeInt(verbosity, to);
|
||||
writeInt(maxBuildJobs, to);
|
||||
writeInt(maxSilentTime, to);
|
||||
writeInt(settings.maxBuildJobs, to);
|
||||
writeInt(settings.maxSilentTime, to);
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 2)
|
||||
writeInt(useBuildHook, to);
|
||||
writeInt(settings.useBuildHook, to);
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) {
|
||||
writeInt(buildVerbosity, to);
|
||||
writeInt(settings.buildVerbosity, to);
|
||||
writeInt(logType, to);
|
||||
writeInt(printBuildTrace, to);
|
||||
writeInt(settings.printBuildTrace, to);
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 6)
|
||||
writeInt(buildCores, to);
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 10)
|
||||
writeInt(queryBoolSetting("build-use-substitutes", true), to);
|
||||
|
||||
writeInt(settings.buildCores, to);
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 10)
|
||||
writeInt(settings.useSubstitutes, to);
|
||||
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) {
|
||||
Settings::SettingsMap overrides = settings.getOverrides();
|
||||
writeInt(overrides.size(), to);
|
||||
foreach (Settings::SettingsMap::iterator, i, overrides) {
|
||||
writeString(i->first, to);
|
||||
writeString(i->second, to);
|
||||
}
|
||||
}
|
||||
|
||||
processStderr();
|
||||
}
|
||||
|
||||
|
@ -217,42 +227,96 @@ bool RemoteStore::isValidPath(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryValidPaths()
|
||||
PathSet RemoteStore::queryValidPaths(const PathSet & paths)
|
||||
{
|
||||
openConnection();
|
||||
writeInt(wopQueryValidPaths, to);
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||
PathSet res;
|
||||
foreach (PathSet::const_iterator, i, paths)
|
||||
if (isValidPath(*i)) res.insert(*i);
|
||||
return res;
|
||||
} else {
|
||||
writeInt(wopQueryValidPaths, to);
|
||||
writeStrings(paths, to);
|
||||
processStderr();
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryAllValidPaths()
|
||||
{
|
||||
openConnection();
|
||||
writeInt(wopQueryAllValidPaths, to);
|
||||
processStderr();
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
|
||||
|
||||
bool RemoteStore::hasSubstitutes(const Path & path)
|
||||
PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
|
||||
{
|
||||
openConnection();
|
||||
writeInt(wopHasSubstitutes, to);
|
||||
writeString(path, to);
|
||||
processStderr();
|
||||
unsigned int reply = readInt(from);
|
||||
return reply != 0;
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||
PathSet res;
|
||||
foreach (PathSet::const_iterator, i, paths) {
|
||||
writeInt(wopHasSubstitutes, to);
|
||||
writeString(*i, to);
|
||||
processStderr();
|
||||
if (readInt(from)) res.insert(*i);
|
||||
}
|
||||
return res;
|
||||
} else {
|
||||
writeInt(wopQuerySubstitutablePaths, to);
|
||||
writeStrings(paths, to);
|
||||
processStderr();
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool RemoteStore::querySubstitutablePathInfo(const Path & path,
|
||||
SubstitutablePathInfo & info)
|
||||
void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos)
|
||||
{
|
||||
if (paths.empty()) return;
|
||||
|
||||
openConnection();
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return false;
|
||||
writeInt(wopQuerySubstitutablePathInfo, to);
|
||||
writeString(path, to);
|
||||
processStderr();
|
||||
unsigned int reply = readInt(from);
|
||||
if (reply == 0) return false;
|
||||
info.deriver = readString(from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths<PathSet>(from);
|
||||
info.downloadSize = readLongLong(from);
|
||||
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
|
||||
return true;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||
|
||||
foreach (PathSet::const_iterator, i, paths) {
|
||||
SubstitutablePathInfo info;
|
||||
writeInt(wopQuerySubstitutablePathInfo, to);
|
||||
writeString(*i, to);
|
||||
processStderr();
|
||||
unsigned int reply = readInt(from);
|
||||
if (reply == 0) continue;
|
||||
info.deriver = readString(from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths<PathSet>(from);
|
||||
info.downloadSize = readLongLong(from);
|
||||
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
|
||||
infos[*i] = info;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
writeInt(wopQuerySubstitutablePathInfos, to);
|
||||
writeStrings(paths, to);
|
||||
processStderr();
|
||||
unsigned int count = readInt(from);
|
||||
for (unsigned int n = 0; n < count; n++) {
|
||||
Path path = readStorePath(from);
|
||||
SubstitutablePathInfo & info(infos[path]);
|
||||
info.deriver = readString(from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths<PathSet>(from);
|
||||
info.downloadSize = readLongLong(from);
|
||||
info.narSize = readLongLong(from);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -357,7 +421,7 @@ Path RemoteStore::addToStore(const Path & _srcPath,
|
|||
bool recursive, HashType hashAlgo, PathFilter & filter)
|
||||
{
|
||||
openConnection();
|
||||
|
||||
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
writeInt(wopAddToStore, to);
|
||||
|
@ -380,7 +444,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
|
|||
writeString(name, to);
|
||||
writeString(s, to);
|
||||
writeStrings(references, to);
|
||||
|
||||
|
||||
processStderr();
|
||||
return readStorePath(from);
|
||||
}
|
||||
|
@ -477,7 +541,7 @@ Roots RemoteStore::findRoots()
|
|||
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
openConnection(false);
|
||||
|
||||
|
||||
writeInt(wopCollectGarbage, to);
|
||||
writeInt(options.action, to);
|
||||
writeStrings(options.pathsToDelete, to);
|
||||
|
@ -489,9 +553,9 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
writeInt(0, to);
|
||||
writeInt(0, to);
|
||||
}
|
||||
|
||||
|
||||
processStderr();
|
||||
|
||||
|
||||
results.paths = readStrings<PathSet>(from);
|
||||
results.bytesFreed = readLongLong(from);
|
||||
readLongLong(from); // obsolete
|
||||
|
|
|
@ -26,7 +26,9 @@ public:
|
|||
|
||||
bool isValidPath(const Path & path);
|
||||
|
||||
PathSet queryValidPaths();
|
||||
PathSet queryValidPaths(const PathSet & paths);
|
||||
|
||||
PathSet queryAllValidPaths();
|
||||
|
||||
ValidPathInfo queryPathInfo(const Path & path);
|
||||
|
||||
|
@ -44,10 +46,10 @@ public:
|
|||
|
||||
Path queryPathFromHashPart(const string & hashPart);
|
||||
|
||||
bool hasSubstitutes(const Path & path);
|
||||
PathSet querySubstitutablePaths(const PathSet & paths);
|
||||
|
||||
bool querySubstitutablePathInfo(const Path & path,
|
||||
SubstitutablePathInfo & info);
|
||||
void querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos);
|
||||
|
||||
Path addToStore(const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
|
|
|
@ -19,16 +19,16 @@ GCOptions::GCOptions()
|
|||
bool isInStore(const Path & path)
|
||||
{
|
||||
return path[0] == '/'
|
||||
&& string(path, 0, nixStore.size()) == nixStore
|
||||
&& path.size() >= nixStore.size() + 2
|
||||
&& path[nixStore.size()] == '/';
|
||||
&& string(path, 0, settings.nixStore.size()) == settings.nixStore
|
||||
&& path.size() >= settings.nixStore.size() + 2
|
||||
&& path[settings.nixStore.size()] == '/';
|
||||
}
|
||||
|
||||
|
||||
bool isStorePath(const Path & path)
|
||||
{
|
||||
return isInStore(path)
|
||||
&& path.find('/', nixStore.size() + 1) == Path::npos;
|
||||
&& path.find('/', settings.nixStore.size() + 1) == Path::npos;
|
||||
}
|
||||
|
||||
|
||||
|
@ -43,7 +43,7 @@ Path toStorePath(const Path & path)
|
|||
{
|
||||
if (!isInStore(path))
|
||||
throw Error(format("path `%1%' is not in the Nix store") % path);
|
||||
Path::size_type slash = path.find('/', nixStore.size() + 1);
|
||||
Path::size_type slash = path.find('/', settings.nixStore.size() + 1);
|
||||
if (slash == Path::npos)
|
||||
return path;
|
||||
else
|
||||
|
@ -74,7 +74,7 @@ Path followLinksToStorePath(const Path & path)
|
|||
string storePathToName(const Path & path)
|
||||
{
|
||||
assertStorePath(path);
|
||||
return string(path, nixStore.size() + 34);
|
||||
return string(path, settings.nixStore.size() + 34);
|
||||
}
|
||||
|
||||
|
||||
|
@ -173,11 +173,11 @@ Path makeStorePath(const string & type,
|
|||
{
|
||||
/* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
|
||||
string s = type + ":sha256:" + printHash(hash) + ":"
|
||||
+ nixStore + ":" + name;
|
||||
+ settings.nixStore + ":" + name;
|
||||
|
||||
checkStoreName(name);
|
||||
|
||||
return nixStore + "/"
|
||||
return settings.nixStore + "/"
|
||||
+ printHash32(compressHash(hashString(htSHA256, s), 20))
|
||||
+ "-" + name;
|
||||
}
|
||||
|
|
|
@ -80,6 +80,8 @@ struct SubstitutablePathInfo
|
|||
unsigned long long narSize; /* 0 = unknown */
|
||||
};
|
||||
|
||||
typedef std::map<Path, SubstitutablePathInfo> SubstitutablePathInfos;
|
||||
|
||||
|
||||
struct ValidPathInfo
|
||||
{
|
||||
|
@ -102,20 +104,23 @@ public:
|
|||
|
||||
virtual ~StoreAPI() { }
|
||||
|
||||
/* Checks whether a path is valid. */
|
||||
/* Check whether a path is valid. */
|
||||
virtual bool isValidPath(const Path & path) = 0;
|
||||
|
||||
/* Query the set of valid paths. */
|
||||
virtual PathSet queryValidPaths() = 0;
|
||||
/* Query which of the given paths is valid. */
|
||||
virtual PathSet queryValidPaths(const PathSet & paths) = 0;
|
||||
|
||||
/* Query the set of all valid paths. */
|
||||
virtual PathSet queryAllValidPaths() = 0;
|
||||
|
||||
/* Query information about a valid path. */
|
||||
virtual ValidPathInfo queryPathInfo(const Path & path) = 0;
|
||||
|
||||
/* Queries the hash of a valid path. */
|
||||
/* Query the hash of a valid path. */
|
||||
virtual Hash queryPathHash(const Path & path) = 0;
|
||||
|
||||
/* Queries the set of outgoing FS references for a store path.
|
||||
The result is not cleared. */
|
||||
/* Query the set of outgoing FS references for a store path. The
|
||||
result is not cleared. */
|
||||
virtual void queryReferences(const Path & path,
|
||||
PathSet & references) = 0;
|
||||
|
||||
|
@ -138,13 +143,14 @@ public:
|
|||
path, or "" if the path doesn't exist. */
|
||||
virtual Path queryPathFromHashPart(const string & hashPart) = 0;
|
||||
|
||||
/* Query whether a path has substitutes. */
|
||||
virtual bool hasSubstitutes(const Path & path) = 0;
|
||||
/* Query which of the given paths have substitutes. */
|
||||
virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0;
|
||||
|
||||
/* Query the references, deriver and download size of a
|
||||
substitutable path. */
|
||||
virtual bool querySubstitutablePathInfo(const Path & path,
|
||||
SubstitutablePathInfo & info) = 0;
|
||||
/* Query substitute info (i.e. references, derivers and download
|
||||
sizes) of a set of paths. If a path does not have substitute
|
||||
info, it's omitted from the resulting ‘infos’ map. */
|
||||
virtual void querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos) = 0;
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
|
|
|
@ -6,7 +6,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION 0x10b
|
||||
#define PROTOCOL_VERSION 0x10c
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
@ -32,13 +32,16 @@ typedef enum {
|
|||
wopCollectGarbage = 20,
|
||||
wopQuerySubstitutablePathInfo = 21,
|
||||
wopQueryDerivationOutputs = 22,
|
||||
wopQueryValidPaths = 23,
|
||||
wopQueryAllValidPaths = 23,
|
||||
wopQueryFailedPaths = 24,
|
||||
wopClearFailedPaths = 25,
|
||||
wopQueryPathInfo = 26,
|
||||
wopImportPaths = 27,
|
||||
wopQueryDerivationOutputNames = 28,
|
||||
wopQueryPathFromHashPart = 29,
|
||||
wopQuerySubstitutablePathInfos = 30,
|
||||
wopQueryValidPaths = 31,
|
||||
wopQuerySubstitutablePaths = 32,
|
||||
} WorkerOp;
|
||||
|
||||
|
||||
|
@ -60,5 +63,5 @@ typedef enum {
|
|||
Path readStorePath(Source & from);
|
||||
template<class T> T readStorePaths(Source & from);
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -253,7 +253,7 @@ string readLine(int fd)
|
|||
if (errno != EINTR)
|
||||
throw SysError("reading a line");
|
||||
} else if (rd == 0)
|
||||
throw Error("unexpected EOF reading a line");
|
||||
throw EndOfFile("unexpected EOF reading a line");
|
||||
else {
|
||||
if (ch == '\n') return s;
|
||||
s += ch;
|
||||
|
@ -1010,6 +1010,13 @@ string concatStringsSep(const string & sep, const Strings & ss)
|
|||
}
|
||||
|
||||
|
||||
string chomp(const string & s)
|
||||
{
|
||||
size_t i = s.find_last_not_of(" \n\r\t");
|
||||
return i == string::npos ? "" : string(s, 0, i + 1);
|
||||
}
|
||||
|
||||
|
||||
string statusToString(int status)
|
||||
{
|
||||
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
|
||||
|
|
|
@ -294,6 +294,10 @@ Strings tokenizeString(const string & s, const string & separators = " \t\n\r");
|
|||
string concatStringsSep(const string & sep, const Strings & ss);
|
||||
|
||||
|
||||
/* Remove trailing whitespace from a string. */
|
||||
string chomp(const string & s);
|
||||
|
||||
|
||||
/* Convert the exit status of a child as returned by wait() into an
|
||||
error string. */
|
||||
string statusToString(int status);
|
||||
|
|
|
@ -55,7 +55,6 @@ struct Globals
|
|||
EvalState state;
|
||||
bool dryRun;
|
||||
bool preserveInstalled;
|
||||
bool keepDerivations;
|
||||
string forceName;
|
||||
bool prebuiltOnly;
|
||||
};
|
||||
|
@ -113,6 +112,11 @@ static void getAllExprs(EvalState & state,
|
|||
StringSet namesSorted(names.begin(), names.end());
|
||||
|
||||
foreach (StringSet::iterator, i, namesSorted) {
|
||||
/* Ignore the manifest.nix used by profiles. This is
|
||||
necessary to prevent it from showing up in channels (which
|
||||
are implemented using profiles). */
|
||||
if (*i == "manifest.nix") continue;
|
||||
|
||||
Path path2 = path + "/" + *i;
|
||||
|
||||
struct stat st;
|
||||
|
@ -211,9 +215,12 @@ static int comparePriorities(EvalState & state,
|
|||
|
||||
static bool isPrebuilt(EvalState & state, const DrvInfo & elem)
|
||||
{
|
||||
assert(false);
|
||||
#if 0
|
||||
return
|
||||
store->isValidPath(elem.queryOutPath(state)) ||
|
||||
store->hasSubstitutes(elem.queryOutPath(state));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -263,8 +270,8 @@ static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems,
|
|||
|
||||
if (k != newest.end()) {
|
||||
d = j->first.system == k->second.first.system ? 0 :
|
||||
j->first.system == thisSystem ? 1 :
|
||||
k->second.first.system == thisSystem ? -1 : 0;
|
||||
j->first.system == settings.thisSystem ? 1 :
|
||||
k->second.first.system == settings.thisSystem ? -1 : 0;
|
||||
if (d == 0)
|
||||
d = comparePriorities(state, j->first, k->second.first);
|
||||
if (d == 0)
|
||||
|
@ -495,7 +502,7 @@ static void installDerivations(Globals & globals,
|
|||
if (globals.dryRun) return;
|
||||
|
||||
if (createUserEnv(globals.state, allElems,
|
||||
profile, globals.keepDerivations, lockToken)) break;
|
||||
profile, settings.envKeepDerivations, lockToken)) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -602,7 +609,7 @@ static void upgradeDerivations(Globals & globals,
|
|||
if (globals.dryRun) return;
|
||||
|
||||
if (createUserEnv(globals.state, newElems,
|
||||
globals.profile, globals.keepDerivations, lockToken)) break;
|
||||
globals.profile, settings.envKeepDerivations, lockToken)) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -669,7 +676,7 @@ static void opSetFlag(Globals & globals,
|
|||
|
||||
/* Write the new user environment. */
|
||||
if (createUserEnv(globals.state, installedElems,
|
||||
globals.profile, globals.keepDerivations, lockToken)) break;
|
||||
globals.profile, settings.envKeepDerivations, lockToken)) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -737,7 +744,7 @@ static void uninstallDerivations(Globals & globals, Strings & selectors,
|
|||
if (globals.dryRun) return;
|
||||
|
||||
if (createUserEnv(globals.state, newElems,
|
||||
profile, globals.keepDerivations, lockToken)) break;
|
||||
profile, settings.envKeepDerivations, lockToken)) break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -866,7 +873,7 @@ static void opQuery(Globals & globals,
|
|||
|
||||
enum { sInstalled, sAvailable } source = sInstalled;
|
||||
|
||||
readOnlyMode = true; /* makes evaluation a bit faster */
|
||||
settings.readOnlyMode = true; /* makes evaluation a bit faster */
|
||||
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ) {
|
||||
string arg = *i++;
|
||||
|
@ -929,6 +936,22 @@ static void opQuery(Globals & globals,
|
|||
installed.insert(i->queryOutPath(globals.state));
|
||||
}
|
||||
|
||||
|
||||
/* Query which paths have substitutes. */
|
||||
PathSet validPaths, substitutablePaths;
|
||||
if (printStatus) {
|
||||
PathSet paths;
|
||||
foreach (vector<DrvInfo>::iterator, i, elems2)
|
||||
try {
|
||||
paths.insert(i->queryOutPath(globals.state));
|
||||
} catch (AssertionError & e) {
|
||||
printMsg(lvlTalkative, format("skipping derivation named `%1%' which gives an assertion failure") % i->name);
|
||||
i->setFailed();
|
||||
}
|
||||
validPaths = store->queryValidPaths(paths);
|
||||
substitutablePaths = store->querySubstitutablePaths(paths);
|
||||
}
|
||||
|
||||
|
||||
/* Print the desired columns, or XML output. */
|
||||
Table table;
|
||||
|
@ -938,6 +961,8 @@ static void opQuery(Globals & globals,
|
|||
|
||||
foreach (vector<DrvInfo>::iterator, i, elems2) {
|
||||
try {
|
||||
if (i->hasFailed()) continue;
|
||||
|
||||
startNest(nest, lvlDebug, format("outputting query result `%1%'") % i->attrPath);
|
||||
|
||||
if (globals.prebuiltOnly && !isPrebuilt(globals.state, *i)) continue;
|
||||
|
@ -949,9 +974,10 @@ static void opQuery(Globals & globals,
|
|||
XMLAttrs attrs;
|
||||
|
||||
if (printStatus) {
|
||||
bool hasSubs = store->hasSubstitutes(i->queryOutPath(globals.state));
|
||||
bool isInstalled = installed.find(i->queryOutPath(globals.state)) != installed.end();
|
||||
bool isValid = store->isValidPath(i->queryOutPath(globals.state));
|
||||
Path outPath = i->queryOutPath(globals.state);
|
||||
bool hasSubs = substitutablePaths.find(outPath) != substitutablePaths.end();
|
||||
bool isInstalled = installed.find(outPath) != installed.end();
|
||||
bool isValid = validPaths.find(outPath) != validPaths.end();
|
||||
if (xmlOutput) {
|
||||
attrs["installed"] = isInstalled ? "1" : "0";
|
||||
attrs["valid"] = isValid ? "1" : "0";
|
||||
|
@ -1240,9 +1266,6 @@ void run(Strings args)
|
|||
globals.preserveInstalled = false;
|
||||
globals.prebuiltOnly = false;
|
||||
|
||||
globals.keepDerivations =
|
||||
queryBoolSetting("env-keep-derivations", false);
|
||||
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ) {
|
||||
string arg = *i++;
|
||||
|
||||
|
@ -1309,7 +1332,7 @@ void run(Strings args)
|
|||
Path profileLink = getHomeDir() + "/.nix-profile";
|
||||
globals.profile = pathExists(profileLink)
|
||||
? absPath(readLink(profileLink), dirOf(profileLink))
|
||||
: canonPath(nixStateDir + "/profiles/default");
|
||||
: canonPath(settings.nixStateDir + "/profiles/default");
|
||||
}
|
||||
|
||||
store = openStore();
|
||||
|
|
|
@ -96,11 +96,11 @@ void run(Strings args)
|
|||
if (arg == "-")
|
||||
readStdin = true;
|
||||
else if (arg == "--eval-only") {
|
||||
readOnlyMode = true;
|
||||
settings.readOnlyMode = true;
|
||||
evalOnly = true;
|
||||
}
|
||||
else if (arg == "--parse-only") {
|
||||
readOnlyMode = true;
|
||||
settings.readOnlyMode = true;
|
||||
parseOnly = evalOnly = true;
|
||||
}
|
||||
else if (arg == "--find-file")
|
||||
|
|
|
@ -47,7 +47,7 @@ LocalStore & ensureLocalStore()
|
|||
|
||||
|
||||
static Path useDeriver(Path path)
|
||||
{
|
||||
{
|
||||
if (!isDerivation(path)) {
|
||||
path = store->queryDeriver(path);
|
||||
if (path == "")
|
||||
|
@ -93,18 +93,18 @@ static PathSet realisePath(const Path & path)
|
|||
static void opRealise(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool dryRun = false;
|
||||
|
||||
|
||||
foreach (Strings::iterator, i, opFlags)
|
||||
if (*i == "--dry-run") dryRun = true;
|
||||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
foreach (Strings::iterator, i, opArgs)
|
||||
*i = followLinksToStorePath(*i);
|
||||
|
||||
|
||||
printMissing(*store, PathSet(opArgs.begin(), opArgs.end()));
|
||||
|
||||
|
||||
if (dryRun) return;
|
||||
|
||||
|
||||
/* Build all paths at the same time to exploit parallelism. */
|
||||
PathSet paths(opArgs.begin(), opArgs.end());
|
||||
store->buildPaths(paths);
|
||||
|
@ -132,7 +132,7 @@ static void opAdd(Strings opFlags, Strings opArgs)
|
|||
static void opAddFixed(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool recursive = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = opFlags.begin();
|
||||
i != opFlags.end(); ++i)
|
||||
if (*i == "--recursive") recursive = true;
|
||||
|
@ -140,7 +140,7 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
|
|||
|
||||
if (opArgs.empty())
|
||||
throw UsageError("first argument must be hash algorithm");
|
||||
|
||||
|
||||
HashType hashAlgo = parseHashType(opArgs.front());
|
||||
opArgs.pop_front();
|
||||
|
||||
|
@ -153,7 +153,7 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
|
|||
static void opPrintFixedPath(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool recursive = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = opFlags.begin();
|
||||
i != opFlags.end(); ++i)
|
||||
if (*i == "--recursive") recursive = true;
|
||||
|
@ -161,7 +161,7 @@ static void opPrintFixedPath(Strings opFlags, Strings opArgs)
|
|||
|
||||
if (opArgs.size() != 3)
|
||||
throw UsageError(format("`--print-fixed-path' requires three arguments"));
|
||||
|
||||
|
||||
Strings::iterator i = opArgs.begin();
|
||||
HashType hashAlgo = parseHashType(*i++);
|
||||
string hash = *i++;
|
||||
|
@ -209,12 +209,12 @@ static void printTree(const Path & path,
|
|||
|
||||
PathSet references;
|
||||
store->queryReferences(path, references);
|
||||
|
||||
#if 0
|
||||
|
||||
#if 0
|
||||
for (PathSet::iterator i = drv.inputSrcs.begin();
|
||||
i != drv.inputSrcs.end(); ++i)
|
||||
cout << format("%1%%2%\n") % (tailPad + treeConn) % *i;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Topologically sort under the relation A < B iff A \in
|
||||
closure(B). That is, if derivation A is an (possibly indirect)
|
||||
|
@ -270,7 +270,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
switch (query) {
|
||||
|
||||
|
||||
case qOutputs: {
|
||||
foreach (Strings::iterator, i, opArgs) {
|
||||
*i = followLinksToStorePath(*i);
|
||||
|
@ -297,7 +297,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
}
|
||||
}
|
||||
Paths sorted = topoSortPaths(*store, paths);
|
||||
for (Paths::reverse_iterator i = sorted.rbegin();
|
||||
for (Paths::reverse_iterator i = sorted.rbegin();
|
||||
i != sorted.rend(); ++i)
|
||||
cout << format("%s\n") % *i;
|
||||
break;
|
||||
|
@ -332,7 +332,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
if (query == qHash) {
|
||||
assert(info.hash.type == htSHA256);
|
||||
cout << format("sha256:%1%\n") % printHash32(info.hash);
|
||||
} else if (query == qSize)
|
||||
} else if (query == qSize)
|
||||
cout << format("%1%\n") % info.narSize;
|
||||
}
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
printTree(followLinksToStorePath(*i), "", "", done);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case qGraph: {
|
||||
PathSet roots;
|
||||
foreach (Strings::iterator, i, opArgs) {
|
||||
|
@ -370,7 +370,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
cout << format("%1%\n") % followLinksToStorePath(*i);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case qRoots: {
|
||||
PathSet referrers;
|
||||
foreach (Strings::iterator, i, opArgs) {
|
||||
|
@ -384,7 +384,7 @@ static void opQuery(Strings opFlags, Strings opArgs)
|
|||
cout << format("%1%\n") % i->first;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
@ -430,9 +430,9 @@ static void opReadLog(Strings opFlags, Strings opArgs)
|
|||
|
||||
foreach (Strings::iterator, i, opArgs) {
|
||||
Path path = useDeriver(followLinksToStorePath(*i));
|
||||
|
||||
|
||||
Path logPath = (format("%1%/%2%/%3%") %
|
||||
nixLogDir % drvsLogDir % baseNameOf(path)).str();
|
||||
settings.nixLogDir % drvsLogDir % baseNameOf(path)).str();
|
||||
Path logBz2Path = logPath + ".bz2";
|
||||
|
||||
if (pathExists(logPath)) {
|
||||
|
@ -458,7 +458,7 @@ static void opReadLog(Strings opFlags, Strings opArgs)
|
|||
} while (err != BZ_STREAM_END);
|
||||
BZ2_bzReadClose(&err, bz);
|
||||
}
|
||||
|
||||
|
||||
else throw Error(format("build log of derivation `%1%' is not available") % path);
|
||||
}
|
||||
}
|
||||
|
@ -469,7 +469,7 @@ static void opDumpDB(Strings opFlags, Strings opArgs)
|
|||
if (!opFlags.empty()) throw UsageError("unknown flag");
|
||||
if (!opArgs.empty())
|
||||
throw UsageError("no arguments expected");
|
||||
PathSet validPaths = store->queryValidPaths();
|
||||
PathSet validPaths = store->queryAllValidPaths();
|
||||
foreach (PathSet::iterator, i, validPaths)
|
||||
cout << store->makeValidityRegistration(singleton<PathSet>(*i), true, true);
|
||||
}
|
||||
|
@ -478,7 +478,7 @@ static void opDumpDB(Strings opFlags, Strings opArgs)
|
|||
static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
|
||||
{
|
||||
ValidPathInfos infos;
|
||||
|
||||
|
||||
while (1) {
|
||||
ValidPathInfo info = decodeValidPathInfo(cin, hashGiven);
|
||||
if (info.path == "") break;
|
||||
|
@ -512,7 +512,7 @@ static void opRegisterValidity(Strings opFlags, Strings opArgs)
|
|||
{
|
||||
bool reregister = false; // !!! maybe this should be the default
|
||||
bool hashGiven = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = opFlags.begin();
|
||||
i != opFlags.end(); ++i)
|
||||
if (*i == "--reregister") reregister = true;
|
||||
|
@ -528,7 +528,7 @@ static void opRegisterValidity(Strings opFlags, Strings opArgs)
|
|||
static void opCheckValidity(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool printInvalid = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = opFlags.begin();
|
||||
i != opFlags.end(); ++i)
|
||||
if (*i == "--print-invalid") printInvalid = true;
|
||||
|
@ -554,13 +554,13 @@ static string showBytes(unsigned long long bytes)
|
|||
}
|
||||
|
||||
|
||||
struct PrintFreed
|
||||
struct PrintFreed
|
||||
{
|
||||
bool show;
|
||||
const GCResults & results;
|
||||
PrintFreed(bool show, const GCResults & results)
|
||||
: show(show), results(results) { }
|
||||
~PrintFreed()
|
||||
~PrintFreed()
|
||||
{
|
||||
if (show)
|
||||
cout << format("%1% store paths deleted, %2% freed\n")
|
||||
|
@ -575,9 +575,9 @@ static void opGC(Strings opFlags, Strings opArgs)
|
|||
bool printRoots = false;
|
||||
GCOptions options;
|
||||
options.action = GCOptions::gcDeleteDead;
|
||||
|
||||
|
||||
GCResults results;
|
||||
|
||||
|
||||
/* Do what? */
|
||||
foreach (Strings::iterator, i, opFlags)
|
||||
if (*i == "--print-roots") printRoots = true;
|
||||
|
@ -616,14 +616,14 @@ static void opDelete(Strings opFlags, Strings opArgs)
|
|||
{
|
||||
GCOptions options;
|
||||
options.action = GCOptions::gcDeleteSpecific;
|
||||
|
||||
|
||||
foreach (Strings::iterator, i, opFlags)
|
||||
if (*i == "--ignore-liveness") options.ignoreLiveness = true;
|
||||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
foreach (Strings::iterator, i, opArgs)
|
||||
options.pathsToDelete.insert(followLinksToStorePath(*i));
|
||||
|
||||
|
||||
GCResults results;
|
||||
PrintFreed freed(true, results);
|
||||
store->collectGarbage(options, results);
|
||||
|
@ -674,9 +674,9 @@ static void opImport(Strings opFlags, Strings opArgs)
|
|||
foreach (Strings::iterator, i, opFlags)
|
||||
if (*i == "--require-signature") requireSignature = true;
|
||||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
|
||||
if (!opArgs.empty()) throw UsageError("no arguments expected");
|
||||
|
||||
|
||||
FdSource source(STDIN_FILENO);
|
||||
Paths paths = store->importPaths(requireSignature, source);
|
||||
|
||||
|
@ -703,12 +703,12 @@ static void opVerify(Strings opFlags, Strings opArgs)
|
|||
throw UsageError("no arguments expected");
|
||||
|
||||
bool checkContents = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = opFlags.begin();
|
||||
i != opFlags.end(); ++i)
|
||||
if (*i == "--check-contents") checkContents = true;
|
||||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
|
||||
ensureLocalStore().verifyStore(checkContents);
|
||||
}
|
||||
|
||||
|
@ -847,7 +847,7 @@ void run(Strings args)
|
|||
}
|
||||
else if (arg == "--indirect")
|
||||
indirectRoot = true;
|
||||
else if (arg[0] == '-') {
|
||||
else if (arg[0] == '-') {
|
||||
opFlags.push_back(arg);
|
||||
if (arg == "--max-freed" || arg == "--max-links" || arg == "--max-atime") { /* !!! hack */
|
||||
if (i != args.end()) opFlags.push_back(*i++);
|
||||
|
|
|
@ -95,7 +95,7 @@ static bool isFarSideClosed(int socket)
|
|||
throw Error("EOF expected (protocol error?)");
|
||||
else if (rd == -1 && errno != ECONNRESET)
|
||||
throw SysError("expected connection reset or EOF");
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int
|
|||
we're either sending or receiving from the client, so we'll be
|
||||
notified of client death anyway. */
|
||||
setSigPollAction(false);
|
||||
|
||||
|
||||
canSendStderr = false;
|
||||
|
||||
if (success)
|
||||
|
@ -220,7 +220,7 @@ struct TunnelSource : BufferedSource
|
|||
so we have to disable the SIGPOLL handler. */
|
||||
setSigPollAction(false);
|
||||
canSendStderr = false;
|
||||
|
||||
|
||||
writeInt(STDERR_READ, to);
|
||||
writeInt(len, to);
|
||||
to.flush();
|
||||
|
@ -279,7 +279,7 @@ static void performOp(unsigned int clientVersion,
|
|||
{
|
||||
switch (op) {
|
||||
|
||||
#if 0
|
||||
#if 0
|
||||
case wopQuit: {
|
||||
/* Close the database. */
|
||||
store.reset((StoreAPI *) 0);
|
||||
|
@ -297,12 +297,30 @@ static void performOp(unsigned int clientVersion,
|
|||
break;
|
||||
}
|
||||
|
||||
case wopQueryValidPaths: {
|
||||
PathSet paths = readStorePaths<PathSet>(from);
|
||||
startWork();
|
||||
PathSet res = store->queryValidPaths(paths);
|
||||
stopWork();
|
||||
writeStrings(res, to);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopHasSubstitutes: {
|
||||
Path path = readStorePath(from);
|
||||
startWork();
|
||||
bool result = store->hasSubstitutes(path);
|
||||
PathSet res = store->querySubstitutablePaths(singleton<PathSet>(path));
|
||||
stopWork();
|
||||
writeInt(result, to);
|
||||
writeInt(res.find(path) != res.end(), to);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopQuerySubstitutablePaths: {
|
||||
PathSet paths = readStorePaths<PathSet>(from);
|
||||
startWork();
|
||||
PathSet res = store->querySubstitutablePaths(paths);
|
||||
stopWork();
|
||||
writeStrings(res, to);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -373,7 +391,7 @@ static void performOp(unsigned int clientVersion,
|
|||
|
||||
SavingSourceAdapter savedNAR(from);
|
||||
RetrieveRegularNARSink savedRegular;
|
||||
|
||||
|
||||
if (recursive) {
|
||||
/* Get the entire NAR dump from the client and save it to
|
||||
a string so that we can pass it to
|
||||
|
@ -382,13 +400,13 @@ static void performOp(unsigned int clientVersion,
|
|||
parseDump(sink, savedNAR);
|
||||
} else
|
||||
parseDump(savedRegular, from);
|
||||
|
||||
|
||||
startWork();
|
||||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
Path path = dynamic_cast<LocalStore *>(store.get())
|
||||
->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
|
||||
stopWork();
|
||||
|
||||
|
||||
writeString(path, to);
|
||||
break;
|
||||
}
|
||||
|
@ -494,41 +512,45 @@ static void performOp(unsigned int clientVersion,
|
|||
}
|
||||
|
||||
GCResults results;
|
||||
|
||||
|
||||
startWork();
|
||||
if (options.ignoreLiveness)
|
||||
throw Error("you are not allowed to ignore liveness");
|
||||
store->collectGarbage(options, results);
|
||||
stopWork();
|
||||
|
||||
|
||||
writeStrings(results.paths, to);
|
||||
writeLongLong(results.bytesFreed, to);
|
||||
writeLongLong(0, to); // obsolete
|
||||
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case wopSetOptions: {
|
||||
keepFailed = readInt(from) != 0;
|
||||
keepGoing = readInt(from) != 0;
|
||||
tryFallback = readInt(from) != 0;
|
||||
settings.keepFailed = readInt(from) != 0;
|
||||
settings.keepGoing = readInt(from) != 0;
|
||||
settings.tryFallback = readInt(from) != 0;
|
||||
verbosity = (Verbosity) readInt(from);
|
||||
maxBuildJobs = readInt(from);
|
||||
maxSilentTime = readInt(from);
|
||||
settings.maxBuildJobs = readInt(from);
|
||||
settings.maxSilentTime = readInt(from);
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
|
||||
useBuildHook = readInt(from) != 0;
|
||||
settings.useBuildHook = readInt(from) != 0;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 4) {
|
||||
buildVerbosity = (Verbosity) readInt(from);
|
||||
settings.buildVerbosity = (Verbosity) readInt(from);
|
||||
logType = (LogType) readInt(from);
|
||||
printBuildTrace = readInt(from) != 0;
|
||||
settings.printBuildTrace = readInt(from) != 0;
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 6)
|
||||
buildCores = readInt(from);
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 10) {
|
||||
int x = readInt(from);
|
||||
Strings ss;
|
||||
ss.push_back(x == 0 ? "false" : "true");
|
||||
overrideSetting("build-use-substitutes", ss);
|
||||
settings.buildCores = readInt(from);
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 10)
|
||||
settings.useSubstitutes = readInt(from) != 0;
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
||||
unsigned int n = readInt(from);
|
||||
for (unsigned int i = 0; i < n; i++) {
|
||||
string name = readString(from);
|
||||
string value = readString(from);
|
||||
settings.set("untrusted-" + name, value);
|
||||
}
|
||||
}
|
||||
startWork();
|
||||
stopWork();
|
||||
|
@ -538,23 +560,43 @@ static void performOp(unsigned int clientVersion,
|
|||
case wopQuerySubstitutablePathInfo: {
|
||||
Path path = absPath(readString(from));
|
||||
startWork();
|
||||
SubstitutablePathInfo info;
|
||||
bool res = store->querySubstitutablePathInfo(path, info);
|
||||
SubstitutablePathInfos infos;
|
||||
store->querySubstitutablePathInfos(singleton<PathSet>(path), infos);
|
||||
stopWork();
|
||||
writeInt(res ? 1 : 0, to);
|
||||
if (res) {
|
||||
writeString(info.deriver, to);
|
||||
writeStrings(info.references, to);
|
||||
writeLongLong(info.downloadSize, to);
|
||||
SubstitutablePathInfos::iterator i = infos.find(path);
|
||||
if (i == infos.end())
|
||||
writeInt(0, to);
|
||||
else {
|
||||
writeInt(1, to);
|
||||
writeString(i->second.deriver, to);
|
||||
writeStrings(i->second.references, to);
|
||||
writeLongLong(i->second.downloadSize, to);
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
|
||||
writeLongLong(info.narSize, to);
|
||||
writeLongLong(i->second.narSize, to);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case wopQueryValidPaths: {
|
||||
|
||||
case wopQuerySubstitutablePathInfos: {
|
||||
PathSet paths = readStorePaths<PathSet>(from);
|
||||
startWork();
|
||||
PathSet paths = store->queryValidPaths();
|
||||
SubstitutablePathInfos infos;
|
||||
store->querySubstitutablePathInfos(paths, infos);
|
||||
stopWork();
|
||||
writeInt(infos.size(), to);
|
||||
foreach (SubstitutablePathInfos::iterator, i, infos) {
|
||||
writeString(i->first, to);
|
||||
writeString(i->second.deriver, to);
|
||||
writeStrings(i->second.references, to);
|
||||
writeLongLong(i->second.downloadSize, to);
|
||||
writeLongLong(i->second.narSize, to);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case wopQueryAllValidPaths: {
|
||||
startWork();
|
||||
PathSet paths = store->queryAllValidPaths();
|
||||
stopWork();
|
||||
writeStrings(paths, to);
|
||||
break;
|
||||
|
@ -599,7 +641,7 @@ static void performOp(unsigned int clientVersion,
|
|||
static void processConnection()
|
||||
{
|
||||
canSendStderr = false;
|
||||
myPid = getpid();
|
||||
myPid = getpid();
|
||||
writeToStderr = tunnelStderr;
|
||||
|
||||
#ifdef HAVE_HUP_NOTIFICATION
|
||||
|
@ -643,7 +685,7 @@ static void processConnection()
|
|||
|
||||
stopWork();
|
||||
to.flush();
|
||||
|
||||
|
||||
} catch (Error & e) {
|
||||
stopWork(false, e.msg());
|
||||
to.flush();
|
||||
|
@ -652,7 +694,7 @@ static void processConnection()
|
|||
|
||||
/* Process client requests. */
|
||||
unsigned int opCount = 0;
|
||||
|
||||
|
||||
while (true) {
|
||||
WorkerOp op;
|
||||
try {
|
||||
|
@ -724,13 +766,13 @@ static void daemonLoop()
|
|||
|
||||
/* Otherwise, create and bind to a Unix domain socket. */
|
||||
else {
|
||||
|
||||
|
||||
/* Create and bind to a Unix domain socket. */
|
||||
fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
|
||||
if (fdSocket == -1)
|
||||
throw SysError("cannot create Unix domain socket");
|
||||
|
||||
string socketPath = nixStateDir + DEFAULT_SOCKET_PATH;
|
||||
string socketPath = settings.nixStateDir + DEFAULT_SOCKET_PATH;
|
||||
|
||||
createDirs(dirOf(socketPath));
|
||||
|
||||
|
@ -739,7 +781,7 @@ static void daemonLoop()
|
|||
relative path name. */
|
||||
chdir(dirOf(socketPath).c_str());
|
||||
Path socketPathRel = "./" + baseNameOf(socketPath);
|
||||
|
||||
|
||||
struct sockaddr_un addr;
|
||||
addr.sun_family = AF_UNIX;
|
||||
if (socketPathRel.size() >= sizeof(addr.sun_path))
|
||||
|
@ -764,7 +806,7 @@ static void daemonLoop()
|
|||
}
|
||||
|
||||
closeOnExec(fdSocket);
|
||||
|
||||
|
||||
/* Loop accepting connections. */
|
||||
while (1) {
|
||||
|
||||
|
@ -772,7 +814,7 @@ static void daemonLoop()
|
|||
/* Important: the server process *cannot* open the SQLite
|
||||
database, because it doesn't like forks very much. */
|
||||
assert(!store);
|
||||
|
||||
|
||||
/* Accept a connection. */
|
||||
struct sockaddr_un remoteAddr;
|
||||
socklen_t remoteAddrLen = sizeof(remoteAddr);
|
||||
|
@ -781,14 +823,14 @@ static void daemonLoop()
|
|||
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
|
||||
checkInterrupt();
|
||||
if (remote == -1) {
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
else
|
||||
throw SysError("accepting connection");
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
else
|
||||
throw SysError("accepting connection");
|
||||
}
|
||||
|
||||
closeOnExec(remote);
|
||||
|
||||
|
||||
/* Get the identity of the caller, if possible. */
|
||||
uid_t clientUid = -1;
|
||||
pid_t clientPid = -1;
|
||||
|
@ -803,13 +845,13 @@ static void daemonLoop()
|
|||
#endif
|
||||
|
||||
printMsg(lvlInfo, format("accepted connection from pid %1%, uid %2%") % clientPid % clientUid);
|
||||
|
||||
|
||||
/* Fork a child to handle the connection. */
|
||||
pid_t child;
|
||||
child = fork();
|
||||
|
||||
|
||||
switch (child) {
|
||||
|
||||
|
||||
case -1:
|
||||
throw SysError("unable to fork");
|
||||
|
||||
|
@ -828,16 +870,12 @@ static void daemonLoop()
|
|||
string processName = int2String(clientPid);
|
||||
strncpy(argvSaved[1], processName.c_str(), strlen(argvSaved[1]));
|
||||
}
|
||||
|
||||
/* Since the daemon can be long-running, the
|
||||
settings may have changed. So force a reload. */
|
||||
reloadSettings();
|
||||
|
||||
|
||||
/* Handle the connection. */
|
||||
from.fd = remote;
|
||||
to.fd = remote;
|
||||
processConnection();
|
||||
|
||||
|
||||
} catch (std::exception & e) {
|
||||
std::cerr << format("child error: %1%\n") % e.what();
|
||||
}
|
||||
|
@ -857,7 +895,7 @@ void run(Strings args)
|
|||
{
|
||||
bool slave = false;
|
||||
bool daemon = false;
|
||||
|
||||
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ) {
|
||||
string arg = *i++;
|
||||
if (arg == "--slave") slave = true;
|
||||
|
|
|
@ -16,11 +16,13 @@
|
|||
-e "s^@shell\@^$(bash)^g" \
|
||||
-e "s^@curl\@^$(curl)^g" \
|
||||
-e "s^@bzip2\@^$(bzip2)^g" \
|
||||
-e "s^@xz\@^$(xz)^g" \
|
||||
-e "s^@perl\@^$(perl)^g" \
|
||||
-e "s^@perlFlags\@^$(perlFlags)^g" \
|
||||
-e "s^@coreutils\@^$(coreutils)^g" \
|
||||
-e "s^@sed\@^$(sed)^g" \
|
||||
-e "s^@tar\@^$(tar)^g" \
|
||||
-e "s^@tarFlags\@^$(tarFlags)^g" \
|
||||
-e "s^@gzip\@^$(gzip)^g" \
|
||||
-e "s^@pv\@^$(pv)^g" \
|
||||
-e "s^@tr\@^$(tr)^g" \
|
||||
|
|
|
@ -9,7 +9,8 @@ TESTS = init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
|
|||
gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
|
||||
remote-store.sh export.sh export-graph.sh negative-caching.sh \
|
||||
binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \
|
||||
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh
|
||||
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
|
||||
binary-cache.sh
|
||||
|
||||
XFAIL_TESTS =
|
||||
|
||||
|
|
35
tests/binary-cache.sh
Normal file
35
tests/binary-cache.sh
Normal file
|
@ -0,0 +1,35 @@
|
|||
source common.sh
|
||||
|
||||
clearStore
|
||||
|
||||
# Create the binary cache.
|
||||
cacheDir=$TEST_ROOT/binary-cache
|
||||
rm -rf $cacheDir
|
||||
|
||||
outPath=$(nix-build dependencies.nix --no-out-link)
|
||||
|
||||
nix-push --dest $cacheDir $outPath
|
||||
|
||||
|
||||
# By default, a binary cache doesn't support "nix-env -qas", but does
|
||||
# support installation.
|
||||
clearStore
|
||||
rm -f $NIX_STATE_DIR/binary-cache*
|
||||
|
||||
nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---"
|
||||
|
||||
nix-store --option binary-caches "file://$cacheDir" -r $outPath
|
||||
|
||||
|
||||
# But with the right configuration, "nix-env -qas" should also work.
|
||||
clearStore
|
||||
rm -f $NIX_STATE_DIR/binary-cache*
|
||||
echo "WantMassQuery: 1" >> $cacheDir/nix-cache-info
|
||||
|
||||
nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "--S"
|
||||
|
||||
nix-store --option binary-caches "file://$cacheDir" -r $outPath
|
||||
|
||||
nix-store --check-validity $outPath
|
||||
nix-store -qR $outPath | grep input-2
|
||||
|
|
@ -7,14 +7,17 @@ mkdir -p $TEST_ROOT/cache2 $TEST_ROOT/patches
|
|||
RESULT=$TEST_ROOT/result
|
||||
|
||||
# Build version 1 and 2 of the "foo" package.
|
||||
nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest1 \
|
||||
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 \
|
||||
$(nix-build -o $RESULT binary-patching.nix --arg version 1)
|
||||
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest1
|
||||
|
||||
out2=$(nix-build -o $RESULT binary-patching.nix --arg version 2)
|
||||
nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest2 $out2
|
||||
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out2
|
||||
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest2
|
||||
|
||||
out3=$(nix-build -o $RESULT binary-patching.nix --arg version 3)
|
||||
nix-push --copy $TEST_ROOT/cache2 $TEST_ROOT/manifest3 $out3
|
||||
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out3
|
||||
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest3
|
||||
|
||||
rm $RESULT
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ export NIX_DB_DIR=$TEST_ROOT/db
|
|||
export NIX_CONF_DIR=$TEST_ROOT/etc
|
||||
export NIX_MANIFESTS_DIR=$TEST_ROOT/var/nix/manifests
|
||||
export SHARED=$TEST_ROOT/shared
|
||||
export NIX_REMOTE=$NIX_REMOTE_
|
||||
|
||||
export PATH=@bindir@:$PATH
|
||||
|
||||
|
@ -79,3 +80,5 @@ fail() {
|
|||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -x
|
||||
|
|
|
@ -9,7 +9,7 @@ clearStore
|
|||
clearProfiles
|
||||
|
||||
cat > $TEST_ROOT/foo.nixpkg <<EOF
|
||||
NIXPKG1 file://$TEST_ROOT/manifest simple $system $drvPath $outPath
|
||||
NIXPKG1 file://$TEST_ROOT/cache/MANIFEST simple $system $drvPath $outPath
|
||||
EOF
|
||||
|
||||
nix-install-package --non-interactive -p $profiles/test $TEST_ROOT/foo.nixpkg
|
||||
|
|
|
@ -19,7 +19,7 @@ nix-channel --remove xyzzy
|
|||
# Create a channel.
|
||||
rm -rf $TEST_ROOT/foo
|
||||
mkdir -p $TEST_ROOT/foo
|
||||
nix-push --copy $TEST_ROOT/foo $TEST_ROOT/foo/MANIFEST $(nix-store -r $(nix-instantiate dependencies.nix))
|
||||
nix-push --dest $TEST_ROOT/foo --manifest --bzip2 $(nix-store -r $(nix-instantiate dependencies.nix))
|
||||
rm -rf $TEST_ROOT/nixexprs
|
||||
mkdir -p $TEST_ROOT/nixexprs
|
||||
cp config.nix dependencies.nix dependencies.builder*.sh $TEST_ROOT/nixexprs/
|
||||
|
|
|
@ -2,7 +2,7 @@ source common.sh
|
|||
|
||||
pullCache () {
|
||||
echo "pulling cache..."
|
||||
nix-pull file://$TEST_ROOT/manifest
|
||||
nix-pull file://$TEST_ROOT/cache/MANIFEST
|
||||
}
|
||||
|
||||
clearStore
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
source common.sh
|
||||
|
||||
clearStore
|
||||
|
||||
drvPath=$(nix-instantiate dependencies.nix)
|
||||
outPath=$(nix-store -r $drvPath)
|
||||
|
||||
|
@ -7,4 +9,4 @@ echo "pushing $drvPath"
|
|||
|
||||
mkdir -p $TEST_ROOT/cache
|
||||
|
||||
nix-push --copy $TEST_ROOT/cache $TEST_ROOT/manifest $drvPath
|
||||
nix-push --dest $TEST_ROOT/cache --manifest $drvPath --bzip2
|
||||
|
|
|
@ -10,6 +10,7 @@ touch $reference
|
|||
|
||||
echo "making registration..."
|
||||
|
||||
set +x
|
||||
for ((n = 0; n < $max; n++)); do
|
||||
storePath=$NIX_STORE_DIR/$n
|
||||
echo -n > $storePath
|
||||
|
@ -19,6 +20,7 @@ for ((n = 0; n < $max; n++)); do
|
|||
fi
|
||||
echo $storePath; echo; echo 2; echo $reference; echo $ref2
|
||||
done > $TEST_ROOT/reg_info
|
||||
set -x
|
||||
|
||||
echo "registering..."
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ source common.sh
|
|||
echo '*** testing slave mode ***'
|
||||
clearStore
|
||||
clearManifests
|
||||
NIX_REMOTE=slave $SHELL ./user-envs.sh
|
||||
NIX_REMOTE_=slave $SHELL ./user-envs.sh
|
||||
|
||||
echo '*** testing daemon mode ***'
|
||||
clearStore
|
||||
|
|
|
@ -2,22 +2,25 @@
|
|||
echo substituter args: $* >&2
|
||||
|
||||
if test $1 = "--query"; then
|
||||
while read cmd; do
|
||||
echo FOO $cmd >&2
|
||||
while read cmd args; do
|
||||
echo "CMD = $cmd, ARGS = $args" >&2
|
||||
if test "$cmd" = "have"; then
|
||||
read path
|
||||
if grep -q "$path" $TEST_ROOT/sub-paths; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
for path in $args; do
|
||||
read path
|
||||
if grep -q "$path" $TEST_ROOT/sub-paths; then
|
||||
echo $path
|
||||
fi
|
||||
done
|
||||
echo
|
||||
elif test "$cmd" = "info"; then
|
||||
read path
|
||||
echo 1
|
||||
echo "" # deriver
|
||||
echo 0 # nr of refs
|
||||
echo $((1 * 1024 * 1024)) # download size
|
||||
echo $((2 * 1024 * 1024)) # nar size
|
||||
for path in $args; do
|
||||
echo $path
|
||||
echo "" # deriver
|
||||
echo 0 # nr of refs
|
||||
echo $((1 * 1024 * 1024)) # download size
|
||||
echo $((2 * 1024 * 1024)) # nar size
|
||||
done
|
||||
echo
|
||||
else
|
||||
echo "bad command $cmd"
|
||||
exit 1
|
||||
|
@ -26,6 +29,7 @@ if test $1 = "--query"; then
|
|||
elif test $1 = "--substitute"; then
|
||||
mkdir $2
|
||||
echo "Hallo Wereld" > $2/hello
|
||||
echo # no expected hash
|
||||
else
|
||||
echo "unknown substituter operation"
|
||||
exit 1
|
||||
|
|
|
@ -2,21 +2,23 @@
|
|||
echo substituter2 args: $* >&2
|
||||
|
||||
if test $1 = "--query"; then
|
||||
while read cmd; do
|
||||
if test "$cmd" = "have"; then
|
||||
read path
|
||||
if grep -q "$path" $TEST_ROOT/sub-paths; then
|
||||
echo 1
|
||||
else
|
||||
echo 0
|
||||
fi
|
||||
elif test "$cmd" = "info"; then
|
||||
read path
|
||||
echo 1
|
||||
echo "" # deriver
|
||||
echo 0 # nr of refs
|
||||
echo 0 # download size
|
||||
echo 0 # nar size
|
||||
while read cmd args; do
|
||||
if test "$cmd" = have; then
|
||||
for path in $args; do
|
||||
if grep -q "$path" $TEST_ROOT/sub-paths; then
|
||||
echo $path
|
||||
fi
|
||||
done
|
||||
echo
|
||||
elif test "$cmd" = info; then
|
||||
for path in $args; do
|
||||
echo $path
|
||||
echo "" # deriver
|
||||
echo 0 # nr of refs
|
||||
echo 0 # download size
|
||||
echo 0 # nar size
|
||||
done
|
||||
echo
|
||||
else
|
||||
echo "bad command $cmd"
|
||||
exit 1
|
||||
|
|
Loading…
Reference in a new issue