diff --git a/configure.ac b/configure.ac
index 876e0a862..0443fe492 100644
--- a/configure.ac
+++ b/configure.ac
@@ -5,42 +5,42 @@ AM_INIT_AUTOMAKE([dist-bzip2 foreign])
AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.])
-AC_CANONICAL_HOST
-
+AC_PROG_SED
# Construct a Nix system name (like "i686-linux").
+AC_CANONICAL_HOST
AC_MSG_CHECKING([for the canonical Nix system name])
-cpu_name=$(uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
-machine_name=$(uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
-case $machine_name in
- i*86)
- machine_name=i686
- ;;
- x86_64)
- machine_name=x86_64
- ;;
- ppc)
- machine_name=powerpc
- ;;
- *)
- if test "$cpu_name" != "unknown"; then
- machine_name=$cpu_name
- fi
- ;;
-esac
+AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
+ [Platform identifier (e.g., `i686-linux').]),
+ [system=$withval],
+ [case "$host_cpu" in
+ i*86)
+ machine_name="i686";;
+ amd64)
+ machine_name="x86_64";;
+ *)
+ machine_name="$host_cpu";;
+ esac
-sys_name=$(uname -s | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
+ case "$host_os" in
+ linux-gnu*)
+ # For backward compatibility, strip the `-gnu' part.
+ system="$machine_name-linux";;
+ *)
+ # Strip the version number from names such as `gnu0.3',
+ # `darwin10.2.0', etc.
+ system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
+ esac])
+
+sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
case $sys_name in
cygwin*)
sys_name=cygwin
;;
esac
-
-AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
- [Platform identifier (e.g., `i686-linux').]),
- system=$withval, system="${machine_name}-${sys_name}")
+
AC_MSG_RESULT($system)
AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
@@ -62,7 +62,7 @@ fi
# Solaris-specific stuff.
-if test "$sys_name" = "sunos"; then
+if test "$sys_name" = sunos; then
# Solaris requires -lsocket -lnsl for network functions
LIBS="-lsocket -lnsl $LIBS"
fi
@@ -255,7 +255,7 @@ AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH],
[prefix of SQLite]),
sqlite=$withval, sqlite=)
AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite")
-SQLITE_VERSION=3070701
+SQLITE_VERSION=3070900
AC_SUBST(SQLITE_VERSION)
if test -z "$sqlite"; then
sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la'
diff --git a/doc/manual/nix-copy-closure.xml b/doc/manual/nix-copy-closure.xml
index 584e713f1..45cfc0f34 100644
--- a/doc/manual/nix-copy-closure.xml
+++ b/doc/manual/nix-copy-closure.xml
@@ -24,8 +24,11 @@
+
+
+
- user@machine
+ user@machine
paths
@@ -84,25 +87,36 @@ those paths. If this bothers you, use
Let the sending machine cryptographically sign the
dump of each path with the key in
- /nix/etc/nix/signing-key.sec. If the user on
- the target machine does not have direct access to the Nix store
- (i.e., if the target machine has a multi-user Nix installation),
- then the target machine will check the dump against
- /nix/etc/nix/signing-key.pub before unpacking
- it in its Nix store. This allows secure sharing of store paths
- between untrusted users on two machines, provided that there is a
- trust relation between the Nix installations on both machines
- (namely, they have matching public/secret keys).
+ sysconfdir/nix/signing-key.sec.
+ If the user on the target machine does not have direct access to
+ the Nix store (i.e., if the target machine has a multi-user Nix
+ installation), then the target machine will check the dump against
+ sysconfdir/nix/signing-key.pub
+ before unpacking it in its Nix store. This allows secure sharing
+ of store paths between untrusted users on two machines, provided
+ that there is a trust relation between the Nix installations on
+ both machines (namely, they have matching public/secret
+ keys).
-
+ / /
- Compress the dump of each path with
- gzip before sending it.
+ Compress the dump of each path with respectively
+ gzip, bzip2 or
+ xz before sending it. The corresponding
+ decompression program must be installed on the target
+ machine.
+
+
+ Also copy the outputs of store derivations included
+ in the closure.
+
+
+
diff --git a/doc/manual/nix-env.xml b/doc/manual/nix-env.xml
index 403ab2678..a03afaf5c 100644
--- a/doc/manual/nix-env.xml
+++ b/doc/manual/nix-env.xml
@@ -2,7 +2,7 @@
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-nix-env">
-
+
nix-env
1
@@ -60,7 +60,7 @@ environments.
nix-env takes exactly one
operation flag which indicates the subcommand to
be performed. These are documented below.
-
+
@@ -78,19 +78,19 @@ linkend="sec-common-options" />.
-
+
Specifies the Nix expression (designated below as
the active Nix expression) used by the
, , and
operations to obtain
derivations. The default is
~/.nix-defexpr.
-
+
-
+
-
+
Specifies the profile to be used by those
operations that operate on a profile (designated below as the
active profile). A profile is sequence of
@@ -98,11 +98,11 @@ linkend="sec-common-options" />.
which is the current generation. The default
profile is the target of the symbolic link
~/.nix-profile (see below).
-
+
-
+
-
+
For the ,
, ,
and
@@ -117,23 +117,23 @@ linkend="sec-common-options" />.
substitute is available).
-
+
system
-
+
By default, operations such as only include derivations matching the current
platform. This option allows you to use derivations for the
specified platform system. The special
value * causes derivations for any platform to
be included.
-
+
-
+
@@ -170,25 +170,25 @@ linkend="sec-common-options" />.
this directory.
-
+
~/.nix-profile
-
+
A symbolic link to the user's current profile. By
default, this symlink points to
prefix/var/nix/profiles/default.
The PATH environment variable should include
~/.nix-profile/bin for the user environment
to be visible to the user.
-
+
-
+
-
+
@@ -214,7 +214,7 @@ linkend="sec-common-options" />.
Description
-
+
The install operation creates a new user environment, based on
the current generation of the active profile, to which a set of store
paths described by args is added. The
@@ -256,7 +256,7 @@ number of possible ways:
attribute paths that select attributes from the
top-level Nix expression. This is faster than using derivation
names and unambiguous. To find out the attribute paths of available
- packages, use nix-env -qaA '*'.
+ packages, use nix-env -qaP '*'.
If
path is given,
@@ -306,20 +306,20 @@ number of possible ways:
-
+
Do not remove derivations with a name matching one
of the derivations being installed. Usually, trying to have two
versions of the same package installed in the same generation of a
profile will lead to an error in building the generation, due to
file name clashes between the two versions. However, this is not
the case for all packages.
-
+
-
+
Examples
@@ -327,7 +327,7 @@ number of possible ways:
active Nix expression:
-$ nix-env --install gcc-3.3.2
+$ nix-env --install gcc-3.3.2
installing `gcc-3.3.2'
uninstalling `gcc-3.1'
@@ -408,15 +408,15 @@ the following paths will be substituted:
/nix/store/8zbipvm4gp9jfqh9nnk1n3bary1a37gs-perl-XML-Parser-2.34
/nix/store/b8a2bg7gnyvvvjjibp4axg9x1hzkw36c-mono-1.1.4
...
-
+
-
+
-
+
Operation
@@ -442,7 +442,7 @@ the following paths will be substituted:
Description
-
+
The upgrade operation creates a new user environment, based on
the current generation of the active profile, in which all store paths
are replaced for which there are newer versions in the set of paths
@@ -459,47 +459,47 @@ the same symbolic name, only the one with the highest version is
installed.
-
+
Flags
-
+
Only upgrade a derivation to newer versions. This
is the default.
-
+
-
+
In addition to upgrading to newer versions, also
“upgrade” to derivations that have the same version. Version are
not a unique identification of a derivation, so there may be many
derivations that have the same version. This flag may be useful
to force “synchronisation” between the installed and available
derivations.
-
+
-
+
Only “upgrade” to derivations
that have the same version. This may not seem very useful, but it
actually is, e.g., when there is a new release of Nixpkgs and you
want to replace installed applications with the same versions
built against newer dependencies (to reduce the number of
dependencies floating around on your system).
-
+
-
+
In addition to upgrading to newer versions, also
“upgrade” to derivations that have the same or a lower version.
I.e., derivations may actually be downgraded depending on what is
available in the active Nix expression.
-
+
@@ -523,10 +523,10 @@ $ nix-env --upgrade pan
$ nix-env -u '*' (try to upgrade everything)
upgrading `hello-2.1.2' to `hello-2.1.3'
-upgrading `mozilla-1.2' to `mozilla-1.4'
+upgrading `mozilla-1.2' to `mozilla-1.4'
-
+
Versions
The upgrade operation determines whether a derivation
@@ -570,14 +570,14 @@ lexicographically (i.e., using case-sensitive string comparison).
2.3a < 2.3c
2.3pre1 < 2.3c
2.3pre1 < 2.3q
-
+
-
+
-
+
@@ -596,14 +596,14 @@ lexicographically (i.e., using case-sensitive string comparison).
Description
-
+
The uninstall operation creates a new user environment, based on
the current generation of the active profile, from which the store
paths designated by the symbolic names
names are removed.
-
+
Examples
@@ -611,11 +611,11 @@ $ nix-env --uninstall gcc
$ nix-env -e '*' (remove everything)
-
+
-
+
Operation
@@ -632,7 +632,7 @@ $ nix-env -e '*' (remove everything)
Description
-
+
The operation allows meta attributes
of installed packages to be modified. There are several attributes
that can be usefully modified, because they affect the behaviour of
@@ -670,7 +670,7 @@ script:
-
+
Examples
To prevent the currently installed Firefox from being upgraded:
@@ -716,13 +716,13 @@ $ nix-env --set-flag priority 10 gcc
-
+
-
+
-
+
Operation
Synopsis
@@ -738,9 +738,9 @@ $ nix-env --set-flag priority 10 gcc
-
+
-
+
@@ -785,7 +785,7 @@ $ nix-env --set-flag priority 10 gcc
-
+
names
@@ -793,7 +793,7 @@ $ nix-env --set-flag priority 10 gcc
Description
-
+
The query operation displays information about either the store
paths that are installed in the current generation of the active
profile (), or the derivations that are
@@ -817,23 +817,23 @@ operates.
-
+
The query operates on the store paths that are
installed in the current generation of the active profile. This
is the default.
-
+
-
+
The query operates on the derivations that are
available in the active Nix expression.
-
+
-
+
@@ -874,7 +874,7 @@ user environment elements, etc. -->
-
+
Print the status of the
derivation. The status consists of three characters. The first
is I or -, indicating
@@ -888,7 +888,7 @@ user environment elements, etc. -->
third is S or -, indicating
whether a substitute is available for the
derivation.
-
+
@@ -901,17 +901,17 @@ user environment elements, etc. -->
nix-env --install.
-
+
-
+
Suppress printing of the name
attribute of each derivation.
-
+
/
-
+
Compare installed versions to available versions,
or vice versa (if is given). This is
useful for quickly seeing whether upgrades for installed
@@ -926,21 +926,21 @@ user environment elements, etc. -->
or installed.
-
+
= version
At most the same version of the package is
available or installed.
-
+
> version
Only older versions of the package are
available or installed.
-
+
- ?
No version of the package is available or
@@ -951,45 +951,45 @@ user environment elements, etc. -->
-
+
-
+
Print the system attribute of
the derivation.
-
+
-
+
Print the path of the store
derivation.
-
+
-
+
Print the output path of the
derivation.
-
+
-
+
Print a short (one-line) description of the
derivation, if available. The description is taken from the
meta.description attribute of the
derivation.
-
+
-
+
Print all of the meta-attributes of the
derivation. This option is only available with
.
-
+
@@ -1023,7 +1023,7 @@ IP- ORBit2-2.8.3 (installed and by definition present)(show available derivations in the Nix expression foo.nix)
-$ nix-env -f ./foo.nix -qa '*'
+$ nix-env -f ./foo.nix -qa '*'
foo-1.2.3
$ nix-env -qc '*' (compare installed versions to what’s available)
@@ -1034,7 +1034,7 @@ firefox-1.0.4 < 1.0.7 (a more recent version is availab
...
(show info about a specific package, in XML)
-$ nix-env -qa --xml --description firefox
+$ nix-env -qa --xml --description firefox
-
-
-
+
+
@@ -1067,25 +1067,25 @@ $ nix-env -qa --xml --description firefox
Description
-
+
This operation makes path the current
profile for the user. That is, the symlink
~/.nix-profile is made to point to
path.
-
+
Examples
$ nix-env -S ~/my-profile
-
+
-
+
Operation
@@ -1101,7 +1101,7 @@ $ nix-env -S ~/my-profile
Description
-
+
This operation print a list of all the currently existing
generations for the active profile. These may be switched to using
the operation. It also prints
@@ -1121,11 +1121,11 @@ $ nix-env --list-generations
98 2004-02-06 16:24:33 (current)
-
+
-
+
Operation
@@ -1142,7 +1142,7 @@ $ nix-env --list-generations
Description
-
+
This operation deletes the specified generations of the current
profile. The generations can be a list of generation numbers, or the
special value old to delete all non-current
@@ -1150,7 +1150,7 @@ generations. Periodically deleting old generations is important to
make garbage collection effective.
-
+
Examples
@@ -1159,11 +1159,11 @@ $ nix-env --delete-generations 3 4 8
$ nix-env -p other_profile --delete-generations old
-
+
-
+
Operation
@@ -1183,7 +1183,7 @@ $ nix-env -p other_profile --delete-generations old
Description
-
+
This operation makes generation number
generation the current generation of the
active profile. That is, if the
@@ -1207,11 +1207,11 @@ $ nix-env -G 42
switching from generation 50 to 42
-
+
-
+
Operation
@@ -1226,7 +1226,7 @@ switching from generation 50 to 42
Description
-
+
This operation switches to the “previous” generation of the
active profile, that is, the highest numbered generation lower than
the current generation, if it exists. It is just a convenience
@@ -1246,9 +1246,9 @@ $ nix-env --rolback
error: no generation older than the current (91) exists
-
+
-
+
diff --git a/perl/Makefile.am b/perl/Makefile.am
index eded469f9..ccb08b447 100644
--- a/perl/Makefile.am
+++ b/perl/Makefile.am
@@ -2,10 +2,12 @@ perlversion := $(shell perl -e 'use Config; print $$Config{version};')
perlarchname := $(shell perl -e 'use Config; print $$Config{archname};')
perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname)
-PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/Config.pm.in
+PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in
+# Hack required by "make check".
all: $(PERL_MODULES:.in=)
- ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/Store.so
+ mkdir -p lib/auto/Nix/Store
+ ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/auto/Nix/Store/Store.so
install-exec-local: $(PERL_MODULES:.in=)
$(INSTALL) -d $(DESTDIR)$(perllibdir)/Nix
diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in
index 658305fd9..b657683be 100644
--- a/perl/lib/Nix/Config.pm.in
+++ b/perl/lib/Nix/Config.pm.in
@@ -4,6 +4,7 @@ $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
+$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
$bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@";
$curl = "@curl@";
diff --git a/perl/lib/Nix/CopyClosure.pm b/perl/lib/Nix/CopyClosure.pm
new file mode 100644
index 000000000..045f6bfaf
--- /dev/null
+++ b/perl/lib/Nix/CopyClosure.pm
@@ -0,0 +1,46 @@
+package Nix::CopyClosure;
+
+use strict;
+use Nix::Config;
+use Nix::Store;
+
+
+sub copyTo {
+ my ($sshHost, $sshOpts, $storePaths, $compressor, $decompressor, $includeOutputs, $dryRun, $sign) = @_;
+
+ $compressor = "$compressor |" if $compressor ne "";
+ $decompressor = "$decompressor |" if $decompressor ne "";
+
+ # Get the closure of this path.
+ my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs,
+ map { followLinksToStorePath $_ } @{$storePaths})));
+
+ # Ask the remote host which paths are invalid. Because of limits
+ # to the command line length, do this in chunks. Eventually,
+ # we'll want to use ‘--from-stdin’, but we can't rely on the
+ # target having this option yet.
+ my @missing = ();
+ while (scalar(@closure) > 0) {
+ my @ps = splice(@closure, 0, 1500);
+ open(READ, "set -f; ssh $sshHost @{$sshOpts} nix-store --check-validity --print-invalid @ps|");
+ while () {
+ chomp;
+ push @missing, $_;
+ }
+ close READ or die;
+ }
+
+ # Export the store paths and import them on the remote machine.
+ if (scalar @missing > 0) {
+ print STDERR "copying ", scalar @missing, " missing paths to ‘$sshHost’...\n";
+ #print STDERR " $_\n" foreach @missing;
+ unless ($dryRun) {
+ open SSH, "| $compressor ssh $sshHost @{$sshOpts} '$decompressor nix-store --import'" or die;
+ exportPaths(fileno(SSH), $sign, @missing);
+ close SSH or die "copying store paths to remote machine `$sshHost' failed: $?";
+ }
+ }
+}
+
+
+1;
diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm
index 7790cfe3b..d1717a0a8 100644
--- a/perl/lib/Nix/Manifest.pm
+++ b/perl/lib/Nix/Manifest.pm
@@ -53,8 +53,14 @@ sub addPatch {
sub readManifest_ {
my ($manifest, $addNAR, $addPatch) = @_;
- open MANIFEST, "<$manifest"
- or die "cannot open `$manifest': $!";
+ # Decompress the manifest if necessary.
+ if ($manifest =~ /\.bz2$/) {
+ open MANIFEST, "$Nix::Config::bzip2 -d < $manifest |"
+ or die "cannot decompress `$manifest': $!";
+ } else {
+ open MANIFEST, "<$manifest"
+ or die "cannot open `$manifest': $!";
+ }
my $inside = 0;
my $type;
@@ -120,7 +126,6 @@ sub readManifest_ {
elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; }
elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; }
elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; }
- elsif (/^\s*SuccOf:\s*(\/\S+)\s*$/) { } # obsolete
elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; }
elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
@@ -286,14 +291,22 @@ EOF
open MAINLOCK, ">>$lockFile" or die "unable to acquire lock ‘$lockFile’: $!\n";
flock(MAINLOCK, LOCK_EX) or die;
+ our $insertNAR = $dbh->prepare(
+ "insert into NARs(manifest, storePath, url, hash, size, narHash, " .
+ "narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
+
+ our $insertPatch = $dbh->prepare(
+ "insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
+ "size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
+
$dbh->begin_work;
# Read each manifest in $manifestDir and add it to the database,
# unless we've already done so on a previous run.
my %seen;
- for my $manifest (glob "$manifestDir/*.nixmanifest") {
- $manifest = Cwd::abs_path($manifest);
+ for my $manifestLink (glob "$manifestDir/*.nixmanifest") {
+ my $manifest = Cwd::abs_path($manifestLink);
my $timestamp = lstat($manifest)->mtime;
$seen{$manifest} = 1;
@@ -312,20 +325,16 @@ EOF
sub addNARToDB {
my ($storePath, $narFile) = @_;
- $dbh->do(
- "insert into NARs(manifest, storePath, url, hash, size, narHash, " .
- "narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
- {}, $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
+ $insertNAR->execute(
+ $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
$narFile->{narHash}, $narFile->{narSize}, $narFile->{references},
$narFile->{deriver}, $narFile->{system});
};
sub addPatchToDB {
my ($storePath, $patch) = @_;
- $dbh->do(
- "insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
- "size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
- {}, $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
+ $insertPatch->execute(
+ $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
$patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize},
$patch->{patchType});
};
@@ -333,10 +342,10 @@ EOF
my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB);
if ($version < 3) {
- die "you have an old-style manifest `$manifest'; please delete it";
+ die "you have an old-style or corrupt manifest `$manifestLink'; please delete it";
}
if ($version >= 10) {
- die "manifest `$manifest' is too new; please delete it or upgrade Nix";
+ die "manifest `$manifestLink' is too new; please delete it or upgrade Nix";
}
}
diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm
index bef6e7460..4283e77a4 100644
--- a/perl/lib/Nix/Store.pm
+++ b/perl/lib/Nix/Store.pm
@@ -1,6 +1,5 @@
package Nix::Store;
-use 5.010001;
use strict;
use warnings;
@@ -12,7 +11,12 @@ our %EXPORT_TAGS = ( 'all' => [ qw( ) ] );
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
-our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath);
+our @EXPORT = qw(
+ isValidPath queryReferences queryPathInfo queryDeriver queryPathHash
+ topoSortPaths computeFSClosure followLinksToStorePath exportPaths
+ hashPath hashFile hashString
+ addToStore makeFixedOutputPath
+);
our $VERSION = '0.15';
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index af71ad955..f8a577fce 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -18,10 +18,8 @@ using namespace nix;
void doInit()
{
if (!store) {
- nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", "/nix/store")));
- nixStateDir = canonPath(getEnv("NIX_STATE_DIR", "/nix/var/nix"));
- nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
try {
+ setDefaultsFromEnvironment();
store = openStore();
} catch (Error & e) {
croak(e.what());
@@ -69,7 +67,7 @@ SV * queryPathHash(char * path)
try {
doInit();
Hash hash = store->queryPathHash(path);
- string s = "sha256:" + printHash(hash);
+ string s = "sha256:" + printHash32(hash);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
@@ -148,3 +146,73 @@ SV * followLinksToStorePath(char * path)
}
OUTPUT:
RETVAL
+
+
+void exportPaths(int fd, int sign, ...)
+ PPCODE:
+ try {
+ doInit();
+ Paths paths;
+ for (int n = 2; n < items; ++n) paths.push_back(SvPV_nolen(ST(n)));
+ FdSink sink(fd);
+ exportPaths(*store, paths, sign, sink);
+ } catch (Error & e) {
+ croak(e.what());
+ }
+
+
+SV * hashPath(char * algo, int base32, char * path)
+ PPCODE:
+ try {
+ Hash h = hashPath(parseHashType(algo), path).first;
+ string s = base32 ? printHash32(h) : printHash(h);
+ XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+ } catch (Error & e) {
+ croak(e.what());
+ }
+
+
+SV * hashFile(char * algo, int base32, char * path)
+ PPCODE:
+ try {
+ Hash h = hashFile(parseHashType(algo), path);
+ string s = base32 ? printHash32(h) : printHash(h);
+ XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+ } catch (Error & e) {
+ croak(e.what());
+ }
+
+
+SV * hashString(char * algo, int base32, char * s)
+ PPCODE:
+ try {
+ Hash h = hashString(parseHashType(algo), s);
+ string s = base32 ? printHash32(h) : printHash(h);
+ XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
+ } catch (Error & e) {
+ croak(e.what());
+ }
+
+
+SV * addToStore(char * srcPath, int recursive, char * algo)
+ PPCODE:
+ try {
+ doInit();
+ Path path = store->addToStore(srcPath, recursive, parseHashType(algo));
+ XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
+ } catch (Error & e) {
+ croak(e.what());
+ }
+
+
+SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
+ PPCODE:
+ try {
+ doInit();
+ HashType ht = parseHashType(algo);
+ Path path = makeFixedOutputPath(recursive, ht,
+ parseHash16or32(ht, hash), name);
+ XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
+ } catch (Error & e) {
+ croak(e.what());
+ }
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in
index e8c76086d..c805d6740 100755
--- a/scripts/build-remote.pl.in
+++ b/scripts/build-remote.pl.in
@@ -3,7 +3,9 @@
use Fcntl ':flock';
use English '-no_match_vars';
use IO::Handle;
+use Nix::Config;
use Nix::SSH qw/sshOpts openSSHConnection/;
+use Nix::CopyClosure;
no warnings('once');
@@ -208,7 +210,7 @@ print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
my $maybeSign = "";
-$maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec";
+$maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec";
# Register the derivation as a temporary GC root. Note that $PPID is
@@ -224,8 +226,7 @@ sub removeRoots {
# Copy the derivation and its dependencies to the build machine.
-system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0
- or die "cannot copy inputs to $hostName: $?";
+Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne "");
# Perform the build.
@@ -239,7 +240,7 @@ my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsD
# in which case every child receives SIGHUP; however, `-tt' doesn't
# work on some platforms when connection sharing is used.)
pipe STDIN, DUMMY; # make sure we have a readable STDIN
-if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
+if (system("exec ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
# Note that if we get exit code 100 from `nix-store -r', it
# denotes a permanent build failure (as opposed to an SSH problem
# or a temporary Nix problem). We propagate this to the caller to
@@ -259,7 +260,7 @@ foreach my $output (@outputs) {
my $maybeSignRemote = "";
$maybeSignRemote = "--sign" if $UID != 0;
- system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
+ system("exec ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
"| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0
or die "cannot copy $output from $hostName: $?";
}
diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in
index a827a995f..ef663dabb 100755
--- a/scripts/download-using-manifests.pl.in
+++ b/scripts/download-using-manifests.pl.in
@@ -3,6 +3,7 @@
use strict;
use Nix::Config;
use Nix::Manifest;
+use Nix::Store;
use POSIX qw(strftime);
use File::Temp qw(tempdir);
@@ -19,14 +20,8 @@ my $fast = 1;
my $dbh = updateManifestDB();
-sub isValidPath {
- my $p = shift;
- if ($fast) {
- return -e $p;
- } else {
- return system("$Nix::Config::binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
- }
-}
+# $hashCache->{$algo}->{$path} yields the $algo-hash of $path.
+my $hashCache;
sub parseHash {
@@ -101,15 +96,17 @@ sub computeSmallestDownload {
foreach my $patch (@{$patchList}) {
if (isValidPath($patch->{basePath})) {
- # !!! this should be cached
my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
- my $format = "--base32";
- $format = "" if $baseHashAlgo eq "md5";
- my $hash = $fast && $baseHashAlgo eq "sha256"
- ? `$Nix::Config::binDir/nix-store -q --hash "$patch->{basePath}"`
- : `$Nix::Config::binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
- chomp $hash;
- $hash =~ s/.*://;
+
+ my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}};
+ if (!defined $hash) {
+ $hash = $fast && $baseHashAlgo eq "sha256"
+ ? queryPathHash($patch->{basePath})
+ : hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath});
+ $hash =~ s/.*://;
+ $hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
+ }
+
next if $hash ne $baseHash;
}
push @queue, $patch->{basePath};
@@ -257,7 +254,7 @@ open LOGFILE, ">>$logFile" or die "cannot open log file $logFile";
my $date = strftime ("%F %H:%M:%S UTC", gmtime (time));
print LOGFILE "$$ get $targetPath $date\n";
-print "\n*** Trying to download/patch `$targetPath'\n";
+print STDERR "\n*** Trying to download/patch `$targetPath'\n";
# Compute the shortest path.
@@ -281,7 +278,7 @@ sub downloadFile {
$ENV{"PRINT_PATH"} = 1;
$ENV{"QUIET"} = 1;
my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
- die "download of `$url' failed" . ($! ? ": $!" : "") unless $? == 0;
+ die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
chomp $path;
return $path;
}
@@ -293,17 +290,17 @@ while (scalar @path > 0) {
my $u = $edge->{start};
my $v = $edge->{end};
- print "\n*** Step $curStep/$maxStep: ";
+ print STDERR "\n*** Step $curStep/$maxStep: ";
if ($edge->{type} eq "present") {
- print "using already present path `$v'\n";
+ print STDERR "using already present path `$v'\n";
print LOGFILE "$$ present $v\n";
if ($curStep < $maxStep) {
# Since this is not the last step, the path will be used
# as a base to one or more patches. So turn the base path
# into a NAR archive, to which we can apply the patch.
- print " packing base path...\n";
+ print STDERR " packing base path...\n";
system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0
or die "cannot dump `$v'";
}
@@ -311,17 +308,17 @@ while (scalar @path > 0) {
elsif ($edge->{type} eq "patch") {
my $patch = $edge->{info};
- print "applying patch `$patch->{url}' to `$u' to create `$v'\n";
+ print STDERR "applying patch `$patch->{url}' to `$u' to create `$v'\n";
print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n";
# Download the patch.
- print " downloading patch...\n";
+ print STDERR " downloading patch...\n";
my $patchPath = downloadFile "$patch->{url}";
# Apply the patch to the NAR archive produced in step 1 (for
# the already present path) or a later step (for patch sequences).
- print " applying patch...\n";
+ print STDERR " applying patch...\n";
system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0
or die "cannot apply patch `$patchPath' to $tmpNar";
@@ -331,7 +328,7 @@ while (scalar @path > 0) {
} else {
# This was the last patch. Unpack the final NAR archive
# into the target path.
- print " unpacking patched archive...\n";
+ print STDERR " unpacking patched archive...\n";
system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0
or die "cannot unpack $tmpNar2 into `$v'";
}
@@ -341,13 +338,13 @@ while (scalar @path > 0) {
elsif ($edge->{type} eq "narfile") {
my $narFile = $edge->{info};
- print "downloading `$narFile->{url}' into `$v'\n";
+ print STDERR "downloading `$narFile->{url}' into `$v'\n";
my $size = $narFile->{size} || -1;
print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
# Download the archive.
- print " downloading archive...\n";
+ print STDERR " downloading archive...\n";
my $narFilePath = downloadFile "$narFile->{url}";
if ($curStep < $maxStep) {
@@ -356,7 +353,7 @@ while (scalar @path > 0) {
or die "cannot unpack `$narFilePath' into `$v'";
} else {
# Unpack the archive into the target path.
- print " unpacking archive...\n";
+ print STDERR " unpacking archive...\n";
system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0
or die "cannot unpack `$narFilePath' into `$v'";
}
@@ -376,20 +373,15 @@ if (defined $finalNarHash) {
# The hash in the manifest can be either in base-16 or base-32.
# Handle both.
- my $extraFlag =
- ($hashAlgo eq "sha256" && length($hash) != 64)
- ? "--base32" : "";
+ my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath);
- my $hash2 = `$Nix::Config::binDir/nix-hash --type $hashAlgo $extraFlag $targetPath`
- or die "cannot compute hash of path `$targetPath'";
- chomp $hash2;
-
- die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2"
+ die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n"
if $hash ne $hash2;
} else {
- die "cannot check integrity of the downloaded path since its hash is not known";
+ die "cannot check integrity of the downloaded path since its hash is not known\n";
}
+print STDERR "\n";
print LOGFILE "$$ success\n";
close LOGFILE;
diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in
index 172acd9e7..8bb60e920 100755
--- a/scripts/nix-copy-closure.in
+++ b/scripts/nix-copy-closure.in
@@ -3,11 +3,12 @@
use Nix::SSH;
use Nix::Config;
use Nix::Store;
+use Nix::CopyClosure;
if (scalar @ARGV < 1) {
print STDERR <) {
- chomp;
- push @missing, $_;
- }
- close READ or die;
-
- # Export the store paths and import them on the remote machine.
- if (scalar @missing > 0) {
- print STDERR "copying these missing paths:\n";
- print STDERR " $_\n" foreach @missing;
- unless ($dryRun) {
- my $extraOpts = $sign ? "--sign" : "";
- system("set -f; nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0
- or die "copying store paths to remote machine `$sshHost' failed: $?";
- }
- }
-
+ Nix::CopyClosure::copyTo($sshHost, [ @sshOpts ], [ @storePaths ], $compressor, $decompressor, $includeOutputs, $dryRun, $sign);
}
else { # Copy FROM the remote machine.
@@ -110,10 +96,12 @@ else { # Copy FROM the remote machine.
close READ or die "nix-store on remote machine `$sshHost' failed: $?";
- # Export the store paths on the remote machine and import them on locally.
+ # Export the store paths on the remote machine and import them locally.
if (scalar @missing > 0) {
- print STDERR "copying these missing paths:\n";
- print STDERR " $_\n" foreach @missing;
+ print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n";
+ #print STDERR " $_\n" foreach @missing;
+ $compressor = "| $compressor" if $compressor ne "";
+ $decompressor = "$decompressor |" if $decompressor ne "";
unless ($dryRun) {
my $extraOpts = $sign ? "--sign" : "";
system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $Nix::Config::binDir/nix-store --import") == 0
diff --git a/scripts/nix-prefetch-url.in b/scripts/nix-prefetch-url.in
index 45bad75f3..eea2b814b 100755
--- a/scripts/nix-prefetch-url.in
+++ b/scripts/nix-prefetch-url.in
@@ -1,165 +1,128 @@
-#! @shell@ -e
+#! @perl@ -w @perlFlags@
-url=$1
-expHash=$2
+use strict;
+use File::Basename;
+use File::Temp qw(tempdir);
+use File::stat;
+use Nix::Store;
+use Nix::Config;
-binDir=@bindir@
-if [ -n "$NIX_BIN_DIR" ]; then binDir="$NIX_BIN_DIR"; fi
+my $url = shift;
+my $expHash = shift;
+my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256";
+my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'};
-# needed to make it work on NixOS
-export PATH=$PATH:@coreutils@
+if (!defined $url || $url eq "") {
+ print STDERR <$fn" or die;
+ print TMP "$s" or die;
+ close TMP or die;
+}
-hashFormat=
-if test "$hashType" != "md5"; then
- hashFormat=--base32
-fi
+sub readFile {
+ local $/ = undef;
+ my ($fn) = @_;
+ open TMP, "<$fn" or die;
+ my $s = ;
+ close TMP or die;
+ return $s;
+}
-if test -z "$url"; then
- echo "syntax: nix-prefetch-url URL [EXPECTED-HASH]" >&2
- exit 1
-fi
+my $tmpDir = tempdir("nix-prefetch-url.XXXXXX", CLEANUP => 1, TMPDIR => 1)
+ or die "cannot create a temporary directory";
+
+# Hack to support the mirror:// scheme from Nixpkgs.
+if ($url =~ /^mirror:\/\//) {
+ system("$Nix::Config::binDir/nix-build '' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0
+ or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n";
+ my @expanded = split ' ', readFile("$tmpDir/urls");
+ die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0;
+ print STDERR "$url expands to $expanded[0]\n";
+ $url = $expanded[0];
+}
# Handle escaped characters in the URI. `+', `=' and `?' are the only
# characters that are valid in Nix store path names but have a special
# meaning in URIs.
-name=$(basename "$url" | @sed@ -e 's/%2b/+/g' -e 's/%3d/=/g' -e 's/%3f/\?/g')
-if test -z "$name"; then echo "invalid url"; exit 1; fi
+my $name = basename $url;
+die "cannot figure out file name for ‘$url’\n" if $name eq "";
+$name =~ s/%2b/+/g;
+$name =~ s/%3d/=/g;
+$name =~ s/%3f/?/g;
+my $finalPath;
+my $hash;
# If the hash was given, a file with that hash may already be in the
# store.
-if test -n "$expHash"; then
- finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$expHash" "$name")
- if ! $bindir/nix-store --check-validity "$finalPath" 2> /dev/null; then
- finalPath=
- fi
- hash=$expHash
-fi
-
-
-mkTempDir() {
- if test -n "$tmpPath"; then return; fi
- local i=0
- while true; do
- if test -z "$TMPDIR"; then TMPDIR=/tmp; fi
- tmpPath=$TMPDIR/nix-prefetch-url-$$-$i
- if mkdir "$tmpPath"; then break; fi
- # !!! to bad we can't check for ENOENT in mkdir, so this check
- # is slightly racy (it bombs out if somebody just removed
- # $tmpPath...).
- if ! test -e "$tmpPath"; then exit 1; fi
- i=$((i + 1))
- done
- trap removeTempDir EXIT SIGINT SIGQUIT
+if (defined $expHash) {
+ $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name);
+ if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; }
}
-removeTempDir() {
- if test -n "$tmpPath"; then
- rm -rf "$tmpPath" || true
- fi
-}
-
-
-doDownload() {
- @curl@ $cacheFlags --fail --location --max-redirs 20 --disable-epsv \
- --cookie-jar $tmpPath/cookies "$url" -o $tmpFile
-}
-
-
-# Hack to support the mirror:// scheme from Nixpkgs.
-if test "${url:0:9}" = "mirror://"; then
- if test -z "$NIXPKGS_ALL"; then
- echo "Resolving mirror:// URLs requires Nixpkgs. Please point \$NIXPKGS_ALL at a Nixpkgs tree." >&2
- exit 1
- fi
-
- mkTempDir
- nix-build "$NIXPKGS_ALL" -A resolveMirrorURLs --argstr url "$url" -o $tmpPath/urls > /dev/null
-
- expanded=($(cat $tmpPath/urls))
- if test "${#expanded[*]}" = 0; then
- echo "$0: cannot resolve $url." >&2
- exit 1
- fi
-
- echo "$url expands to ${expanded[*]} (using ${expanded[0]})" >&2
- url="${expanded[0]}"
-fi
-
-
# If we don't know the hash or a file with that hash doesn't exist,
# download the file and add it to the store.
-if test -z "$finalPath"; then
-
- mkTempDir
- tmpFile=$tmpPath/$name
+if (!defined $finalPath) {
+ my $tmpFile = "$tmpDir/$name";
+
# Optionally do timestamp-based caching of the download.
# Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is
# the hash and the timestamp of the file at $url. The caching of
# the file *contents* is done in Nix store, where it can be
# garbage-collected independently.
- if test -n "$NIX_DOWNLOAD_CACHE"; then
- echo -n "$url" > $tmpPath/url
- urlHash=$($binDir/nix-hash --type sha256 --base32 --flat $tmpPath/url)
- echo "$url" > "$NIX_DOWNLOAD_CACHE/$urlHash.url"
- cachedHashFN="$NIX_DOWNLOAD_CACHE/$urlHash.$hashType"
- cachedTimestampFN="$NIX_DOWNLOAD_CACHE/$urlHash.stamp"
- cacheFlags="--remote-time"
- if test -e "$cachedTimestampFN" -a -e "$cachedHashFN"; then
- # Only download the file if it is newer than the cached version.
- cacheFlags="$cacheFlags --time-cond $cachedTimestampFN"
- fi
- fi
-
+ my ($cachedTimestampFN, $cachedHashFN, @cacheFlags);
+ if (defined $cacheDir) {
+ my $urlHash = hashString("sha256", 1, $url);
+ writeFile "$cacheDir/$urlHash.url", $url;
+ $cachedHashFN = "$cacheDir/$urlHash.$hashType";
+ $cachedTimestampFN = "$cacheDir/$urlHash.stamp";
+ @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN;
+ }
+
# Perform the download.
- doDownload
+ my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || "")));
+ (system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n";
- if test -n "$NIX_DOWNLOAD_CACHE" -a ! -e $tmpFile; then
+ if (defined $cacheDir && ! -e $tmpFile) {
# Curl didn't create $tmpFile, so apparently there's no newer
# file on the server.
- hash=$(cat $cachedHashFN)
- finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$hash" "$name")
- if ! $binDir/nix-store --check-validity "$finalPath" 2> /dev/null; then
- echo "cached contents of \`$url' disappeared, redownloading..." >&2
- finalPath=
- cacheFlags="--remote-time"
- doDownload
- fi
- fi
+ $hash = readFile $cachedHashFN or die;
+ $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name);
+ unless (isValidPath $finalPath) {
+ print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n";
+ $finalPath = undef;
+ (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n";
+ }
+ }
- if test -z "$finalPath"; then
-
- # Compute the hash.
- hash=$($binDir/nix-hash --type "$hashType" $hashFormat --flat $tmpFile)
- if ! test -n "$QUIET"; then echo "hash is $hash" >&2; fi
-
- if test -n "$NIX_DOWNLOAD_CACHE"; then
- echo $hash > $cachedHashFN
- touch -r $tmpFile $cachedTimestampFN
- fi
-
- # Add the downloaded file to the Nix store.
- finalPath=$($binDir/nix-store --add-fixed "$hashType" $tmpFile)
-
- if test -n "$expHash" -a "$expHash" != "$hash"; then
- echo "hash mismatch for URL \`$url'" >&2
- exit 1
- fi
+ if (!defined $finalPath) {
- fi
-fi
+ # Compute the hash.
+ $hash = hashFile($hashType, $hashType ne "md5", $tmpFile);
+ if (defined $cacheDir) {
+ writeFile $cachedHashFN, $hash;
+ my $st = stat($tmpFile) or die;
+ open STAMP, ">$cachedTimestampFN" or die; close STAMP;
+ utime($st->atime, $st->mtime, $cachedTimestampFN) or die;
+ }
+
+ # Add the downloaded file to the Nix store.
+ $finalPath = addToStore($tmpFile, 0, $hashType);
+ }
-if ! test -n "$QUIET"; then echo "path is $finalPath" >&2; fi
+ die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash;
+}
-echo $hash
-
-if test -n "$PRINT_PATH"; then
- echo $finalPath
-fi
+print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'};
+print "$hash\n";
+print "$finalPath\n" if $ENV{'PRINT_PATH'};
diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in
index f3cba0c02..74545a350 100755
--- a/scripts/nix-pull.in
+++ b/scripts/nix-pull.in
@@ -33,10 +33,6 @@ if (! -l $manifestDirLink) {
# Process the URLs specified on the command line.
-my %narFiles;
-my %patches;
-
-my $skipWrongStore = 0;
sub downloadFile {
my $url = shift;
@@ -59,16 +55,7 @@ sub processURL {
# First see if a bzipped manifest is available.
if (system("$Nix::Config::curl --fail --silent --head '$url'.bz2 > /dev/null") == 0) {
print "fetching list of Nix archives at `$url.bz2'...\n";
- my $bzipped = downloadFile "$url.bz2";
-
- $manifest = "$tmpDir/MANIFEST";
-
- system("$Nix::Config::bzip2 -d < $bzipped > $manifest") == 0
- or die "cannot decompress manifest";
-
- $manifest = (`$Nix::Config::binDir/nix-store --add $manifest`
- or die "cannot copy $manifest to the store");
- chomp $manifest;
+ $manifest = downloadFile "$url.bz2";
}
# Otherwise, just get the uncompressed manifest.
@@ -77,20 +64,6 @@ sub processURL {
$manifest = downloadFile $url;
}
- my $version = readManifest($manifest, \%narFiles, \%patches);
-
- die "`$url' is not a manifest or it is too old (i.e., for Nix <= 0.7)\n" if $version < 3;
- die "manifest `$url' is too new\n" if $version >= 5;
-
- if ($skipWrongStore) {
- foreach my $path (keys %narFiles) {
- if (substr($path, 0, length($storeDir) + 1) ne "$storeDir/") {
- print STDERR "warning: manifest `$url' assumes a Nix store at a different location than $storeDir, skipping...\n";
- exit 0;
- }
- }
- }
-
my $baseName = "unnamed";
if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component
$baseName = $1;
@@ -129,12 +102,12 @@ sub processURL {
while (@ARGV) {
my $url = shift @ARGV;
if ($url eq "--skip-wrong-store") {
- $skipWrongStore = 1;
+ # No-op, no longer supported.
} else {
processURL $url;
}
}
-my $size = scalar (keys %narFiles);
-print "$size store paths in manifest\n";
+# Update the cache.
+updateManifestDB();
diff --git a/scripts/nix-push.in b/scripts/nix-push.in
index dcdad5721..cf46d00df 100755
--- a/scripts/nix-push.in
+++ b/scripts/nix-push.in
@@ -198,8 +198,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
# In some exceptional cases (such as VM tests that use the Nix
# store of the host), the database doesn't contain the hash. So
# compute it.
- if ($narHash eq "sha256:0000000000000000000000000000000000000000000000000000") {
- $narHash = `$binDir/nix-hash --type sha256 '$storePath'`;
+ if ($narHash =~ /^sha256:0*$/) {
+ $narHash = `$binDir/nix-hash --type sha256 --base32 '$storePath'`;
die "cannot hash `$storePath'" if $? != 0;
chomp $narHash;
$narHash = "sha256:$narHash";
diff --git a/src/libexpr/common-opts.cc b/src/libexpr/common-opts.cc
index d029d2ec3..e0865d9fc 100644
--- a/src/libexpr/common-opts.cc
+++ b/src/libexpr/common-opts.cc
@@ -44,4 +44,15 @@ bool parseSearchPathArg(const string & arg, Strings::iterator & i,
}
+Path lookupFileArg(EvalState & state, string s)
+{
+ if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
+ Path p = state.findFile(s.substr(1, s.size() - 2));
+ if (p == "") throw Error(format("file `%1%' was not found in the Nix search path (add it using $NIX_PATH or -I)") % p);
+ return p;
+ } else
+ return absPath(s);
+}
+
+
}
diff --git a/src/libexpr/common-opts.hh b/src/libexpr/common-opts.hh
index 6b7247fc3..c28641e90 100644
--- a/src/libexpr/common-opts.hh
+++ b/src/libexpr/common-opts.hh
@@ -14,6 +14,8 @@ bool parseOptionArg(const string & arg, Strings::iterator & i,
bool parseSearchPathArg(const string & arg, Strings::iterator & i,
const Strings::iterator & argsEnd, EvalState & state);
+Path lookupFileArg(EvalState & state, string s);
+
}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index b11562baa..dc361c043 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -363,9 +363,8 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
foreach (PathSet::iterator, j, refs) {
drv.inputSrcs.insert(*j);
if (isDerivation(*j))
- drv.inputDrvs[*j] = store -> queryDerivationOutputNames(*j);
+ drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j);
}
-
explicitlyPassed = true;
} else if (path.at(0) == '!') {
size_t index;
@@ -387,7 +386,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
debug(format("derivation uses `%1%'") % path);
if (!useDrvAsSrc && isDerivation(path))
if (explicitlyPassed)
- drv.inputDrvs[path] = store -> queryDerivationOutputNames(path);
+ drv.inputDrvs[path] = store->queryDerivationOutputNames(path);
else if (drv.inputDrvs.find(path) == drv.inputDrvs.end())
drv.inputDrvs[path] = singleton(output);
else
@@ -416,17 +415,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
HashType ht = parseHashType(outputHashAlgo);
if (ht == htUnknown)
throw EvalError(format("unknown hash algorithm `%1%'") % outputHashAlgo);
- Hash h(ht);
- if (outputHash.size() == h.hashSize * 2)
- /* hexadecimal representation */
- h = parseHash(ht, outputHash);
- else if (outputHash.size() == hashLength32(h))
- /* base-32 representation */
- h = parseHash32(ht, outputHash);
- else
- throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
- % outputHash % outputHashAlgo);
- string s = outputHash;
+ Hash h = parseHash16or32(ht, outputHash);
outputHash = printHash(h);
if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
diff --git a/src/libmain/Makefile.am b/src/libmain/Makefile.am
index ababc3595..404353c62 100644
--- a/src/libmain/Makefile.am
+++ b/src/libmain/Makefile.am
@@ -7,13 +7,6 @@ libmain_la_LIBADD = ../libstore/libstore.la @BDW_GC_LIBS@
pkginclude_HEADERS = shared.hh
AM_CXXFLAGS = \
- -DNIX_STORE_DIR=\"$(storedir)\" \
- -DNIX_DATA_DIR=\"$(datadir)\" \
- -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
- -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
- -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
- -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
- -DNIX_BIN_DIR=\"$(bindir)\" \
-DNIX_VERSION=\"$(VERSION)\" \
-I$(srcdir)/.. -I$(srcdir)/../libutil \
-I$(srcdir)/../libstore
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 8532cdaad..d3b73f8fd 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -65,7 +65,7 @@ void printMissing(StoreAPI & store, const PathSet & paths)
}
if (!willSubstitute.empty()) {
- printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):")
+ printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
% (downloadSize / (1024.0 * 1024.0))
% (narSize / (1024.0 * 1024.0)));
foreach (PathSet::iterator, i, willSubstitute)
@@ -90,23 +90,6 @@ static void setLogType(string lt)
}
-static void closeStore()
-{
- try {
- throw;
- } catch (std::exception & e) {
- printMsg(lvlError,
- format("FATAL: unexpected exception (closing store and aborting): %1%") % e.what());
- }
- try {
- store.reset((StoreAPI *) 0);
- } catch (...) {
- ignoreException();
- }
- abort();
-}
-
-
RemoveTempRoots::~RemoveTempRoots()
{
removeTempRoots();
@@ -120,30 +103,8 @@ static bool showTrace = false;
processor. */
static void initAndRun(int argc, char * * argv)
{
- /* Setup Nix paths. */
- nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
- nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
- nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
- nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
- nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
- nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
- nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
- nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
-
- string subs = getEnv("NIX_SUBSTITUTERS", "default");
- if (subs == "default") {
- substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
- substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
- } else
- substituters = tokenizeString(subs, ":");
-
- /* Get some settings from the configuration file. */
- thisSystem = querySetting("system", SYSTEM);
- maxBuildJobs = queryIntSetting("build-max-jobs", 1);
- buildCores = queryIntSetting("build-cores", 1);
- maxSilentTime = queryIntSetting("build-max-silent-time", 0);
- buildTimeout = queryIntSetting("build-timeout", 0);
-
+ setDefaultsFromEnvironment();
+
/* Catch SIGINT. */
struct sigaction act;
act.sa_handler = sigintHandler;
@@ -260,12 +221,6 @@ static void initAndRun(int argc, char * * argv)
exit. */
RemoveTempRoots removeTempRoots __attribute__((unused));
- /* Make sure that the database gets closed properly, even if
- terminate() is called (which happens sometimes due to bugs in
- destructor/exceptions interaction, but that needn't preclude a
- clean shutdown of the database). */
- std::set_terminate(closeStore);
-
run(remaining);
/* Close the Nix database. */
diff --git a/src/libstore/Makefile.am b/src/libstore/Makefile.am
index e19256b92..39a61233b 100644
--- a/src/libstore/Makefile.am
+++ b/src/libstore/Makefile.am
@@ -15,7 +15,16 @@ libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_
EXTRA_DIST = schema.sql
AM_CXXFLAGS = -Wall \
- ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil
+ ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil \
+ -DNIX_STORE_DIR=\"$(storedir)\" \
+ -DNIX_DATA_DIR=\"$(datadir)\" \
+ -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
+ -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
+ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
+ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
+ -DNIX_BIN_DIR=\"$(bindir)\" \
+ -I$(srcdir)/.. -I$(srcdir)/../libutil \
+ -I$(srcdir)/../libstore
local-store.lo: schema.sql.hh
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index d12f41d66..a8ef9b23e 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -1650,6 +1650,9 @@ void DerivationGoal::startBuilder()
(format("nixbld:!:%1%:\n")
% (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+ /* Create /etc/hosts with localhost entry. */
+ writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+
/* Bind-mount a user-configurable set of directories from the
host file system. The `/dev/pts' directory must be mounted
separately so that newly-created pseudo-terminals show
@@ -2199,9 +2202,7 @@ void SubstitutionGoal::tryNext()
if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal
with it. */
- printMsg(lvlError,
- format("path `%1%' is required, but there is no substituter that can build it")
- % storePath);
+ debug(format("path `%1%' is required, but there is no substituter that can build it") % storePath);
amDone(ecFailed);
return;
}
@@ -2232,8 +2233,7 @@ void SubstitutionGoal::referencesValid()
trace("all references realised");
if (nrFailed > 0) {
- printMsg(lvlError,
- format("some references of path `%1%' could not be realised") % storePath);
+ debug(format("some references of path `%1%' could not be realised") % storePath);
amDone(ecFailed);
return;
}
@@ -2286,9 +2286,7 @@ void SubstitutionGoal::tryToRun()
return;
}
- printMsg(lvlInfo,
- format("substituting path `%1%' using substituter `%2%'")
- % storePath % sub);
+ printMsg(lvlInfo, format("fetching path `%1%'...") % storePath);
logPipe.create();
@@ -2364,19 +2362,15 @@ void SubstitutionGoal::finished()
try {
if (!statusOk(status))
- throw SubstError(format("builder for `%1%' %2%")
+ throw SubstError(format("fetching path `%1%' %2%")
% storePath % statusToString(status));
if (!pathExists(storePath))
- throw SubstError(
- format("substitute did not produce path `%1%'")
- % storePath);
+ throw SubstError(format("substitute did not produce path `%1%'") % storePath);
} catch (SubstError & e) {
- printMsg(lvlInfo,
- format("substitution of path `%1%' using substituter `%2%' failed: %3%")
- % storePath % sub % e.msg());
+ printMsg(lvlInfo, e.msg());
if (printBuildTrace) {
printMsg(lvlError, format("@ substituter-failed %1% %2% %3%")
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 2e9dc8823..5c22f1406 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -1,3 +1,5 @@
+#include "config.h"
+
#include "globals.hh"
#include "util.hh"
@@ -138,5 +140,33 @@ void reloadSettings()
settings.clear();
}
+
+void setDefaultsFromEnvironment()
+{
+ /* Setup Nix paths. */
+ nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
+ nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
+ nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
+ nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
+ nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
+ nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
+ nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
+ nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
+
+ string subs = getEnv("NIX_SUBSTITUTERS", "default");
+ if (subs == "default") {
+ substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
+ substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
+ } else
+ substituters = tokenizeString(subs, ":");
+
+ /* Get some settings from the configuration file. */
+ thisSystem = querySetting("system", SYSTEM);
+ maxBuildJobs = queryIntSetting("build-max-jobs", 1);
+ buildCores = queryIntSetting("build-cores", 1);
+ maxSilentTime = queryIntSetting("build-max-silent-time", 0);
+ buildTimeout = queryIntSetting("build-timeout", 0);
+}
+
}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 231c1f850..12a9b9ca1 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -114,7 +114,9 @@ void overrideSetting(const string & name, const Strings & value);
void reloadSettings();
-
+void setDefaultsFromEnvironment();
+
+
}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 8ca5daa9f..29817df9d 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -327,10 +327,9 @@ void LocalStore::openDB(bool create)
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "setting synchronous mode");
- /* Set the SQLite journal mode. WAL mode is fastest, but doesn't
- seem entirely stable at the moment (Oct. 2010). Thus, use
- truncate mode by default. */
- string mode = queryBoolSetting("use-sqlite-wal", false) ? "wal" : "truncate";
+ /* Set the SQLite journal mode. WAL mode is fastest, so it's the
+ default. */
+ string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate";
string prevMode;
{
SQLiteStmt stmt;
@@ -367,7 +366,7 @@ void LocalStore::openDB(bool create)
stmtRegisterValidPath.create(db,
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
stmtUpdatePathInfo.create(db,
- "update ValidPaths set narSize = ? where path = ?;");
+ "update ValidPaths set narSize = ?, hash = ? where path = ?;");
stmtAddReference.create(db,
"insert or replace into Refs (referrer, reference) values (?, ?);");
stmtQueryPathInfo.create(db,
@@ -684,7 +683,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path)
}
-/* Update path info in the database. Currently only updated the
+/* Update path info in the database. Currently only updates the
narSize field. */
void LocalStore::updatePathInfo(const ValidPathInfo & info)
{
@@ -693,6 +692,7 @@ void LocalStore::updatePathInfo(const ValidPathInfo & info)
stmtUpdatePathInfo.bind64(info.narSize);
else
stmtUpdatePathInfo.bind(); // null
+ stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
stmtUpdatePathInfo.bind(info.path);
if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path);
@@ -1125,16 +1125,14 @@ struct HashAndWriteSink : Sink
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
{
}
- virtual void operator ()
- (const unsigned char * data, unsigned int len)
+ virtual void operator () (const unsigned char * data, size_t len)
{
writeSink(data, len);
hashSink(data, len);
}
Hash currentHash()
{
- HashSink hashSinkClone(hashSink);
- return hashSinkClone.finish().first;
+ return hashSink.currentHash().first;
}
};
@@ -1180,7 +1178,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
PathSet references;
queryReferences(path, references);
- writeStringSet(references, hashAndWriteSink);
+ writeStrings(references, hashAndWriteSink);
Path deriver = queryDeriver(path);
writeString(deriver, hashAndWriteSink);
@@ -1223,11 +1221,11 @@ struct HashAndReadSource : Source
{
hashing = true;
}
- virtual void operator ()
- (unsigned char * data, unsigned int len)
+ size_t read(unsigned char * data, size_t len)
{
- readSource(data, len);
- if (hashing) hashSink(data, len);
+ size_t n = readSource.read(data, len);
+ if (hashing) hashSink(data, n);
+ return n;
}
};
@@ -1267,7 +1265,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
Path dstPath = readStorePath(hashAndReadSource);
- PathSet references = readStorePaths(hashAndReadSource);
+ PathSet references = readStorePaths(hashAndReadSource);
Path deriver = readString(hashAndReadSource);
if (deriver != "") assertStorePath(deriver);
@@ -1278,7 +1276,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
bool haveSignature = readInt(hashAndReadSource) == 1;
if (requireSignature && !haveSignature)
- throw Error("imported archive lacks a signature");
+ throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
if (haveSignature) {
string signature = readString(hashAndReadSource);
@@ -1354,6 +1352,19 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
}
+Paths LocalStore::importPaths(bool requireSignature, Source & source)
+{
+ Paths res;
+ while (true) {
+ unsigned long long n = readLongLong(source);
+ if (n == 0) break;
+ if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
+ res.push_back(importPath(requireSignature, source));
+ }
+ return res;
+}
+
+
void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed,
unsigned long long & blocksFreed)
{
@@ -1369,7 +1380,7 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr
PathSet referrers; queryReferrers(path, referrers);
referrers.erase(path); /* ignore self-references */
if (!referrers.empty())
- throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'")
+ throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%")
% path % showPaths(referrers));
invalidatePath(path);
}
@@ -1409,6 +1420,8 @@ void LocalStore::verifyStore(bool checkContents)
if (checkContents) {
printMsg(lvlInfo, "checking hashes...");
+ Hash nullHash(htSHA256);
+
foreach (PathSet::iterator, i, validPaths) {
try {
ValidPathInfo info = queryPathInfo(*i);
@@ -1417,17 +1430,30 @@ void LocalStore::verifyStore(bool checkContents)
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
HashResult current = hashPath(info.hash.type, *i);
- if (current.first != info.hash) {
+ if (info.hash != nullHash && info.hash != current.first) {
printMsg(lvlError, format("path `%1%' was modified! "
"expected hash `%2%', got `%3%'")
% *i % printHash(info.hash) % printHash(current.first));
} else {
+
+ bool update = false;
+
+ /* Fill in missing hashes. */
+ if (info.hash == nullHash) {
+ printMsg(lvlError, format("fixing missing hash on `%1%'") % *i);
+ info.hash = current.first;
+ update = true;
+ }
+
/* Fill in missing narSize fields (from old stores). */
if (info.narSize == 0) {
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
info.narSize = current.second;
- updatePathInfo(info);
+ update = true;
}
+
+ if (update) updatePathInfo(info);
+
}
} catch (Error & e) {
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index b97e2f406..2739c4eea 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -148,7 +148,7 @@ public:
void exportPath(const Path & path, bool sign,
Sink & sink);
- Path importPath(bool requireSignature, Source & source);
+ Paths importPaths(bool requireSignature, Source & source);
void buildDerivations(const PathSet & drvPaths);
@@ -261,6 +261,8 @@ private:
Path createTempDirInStore();
+ Path importPath(bool requireSignature, Source & source);
+
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
};
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index ade9c9aa2..c1f9e3ba7 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -57,11 +57,11 @@ struct RefScanSink : Sink
RefScanSink() : hashSink(htSHA256) { }
- void operator () (const unsigned char * data, unsigned int len);
+ void operator () (const unsigned char * data, size_t len);
};
-void RefScanSink::operator () (const unsigned char * data, unsigned int len)
+void RefScanSink::operator () (const unsigned char * data, size_t len)
{
hashSink(data, len);
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 84c87246f..ce99c205e 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -27,13 +27,15 @@ Path readStorePath(Source & from)
}
-PathSet readStorePaths(Source & from)
+template T readStorePaths(Source & from)
{
- PathSet paths = readStringSet(from);
- foreach (PathSet::iterator, i, paths) assertStorePath(*i);
+ T paths = readStrings(from);
+ foreach (typename T::iterator, i, paths) assertStorePath(*i);
return paths;
}
+template PathSet readStorePaths(Source & from);
+
RemoteStore::RemoteStore()
{
@@ -65,6 +67,7 @@ void RemoteStore::openConnection()
/* Send the magic greeting, check for the reply. */
try {
writeInt(WORKER_MAGIC_1, to);
+ to.flush();
unsigned int magic = readInt(from);
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
@@ -166,6 +169,7 @@ void RemoteStore::connectToDaemon()
RemoteStore::~RemoteStore()
{
try {
+ to.flush();
fdSocket.close();
if (child != -1)
child.wait(true);
@@ -213,7 +217,7 @@ PathSet RemoteStore::queryValidPaths()
openConnection();
writeInt(wopQueryValidPaths, to);
processStderr();
- return readStorePaths(from);
+ return readStorePaths(from);
}
@@ -240,7 +244,7 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path,
if (reply == 0) return false;
info.deriver = readString(from);
if (info.deriver != "") assertStorePath(info.deriver);
- info.references = readStorePaths(from);
+ info.references = readStorePaths(from);
info.downloadSize = readLongLong(from);
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
return true;
@@ -258,7 +262,7 @@ ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
info.deriver = readString(from);
if (info.deriver != "") assertStorePath(info.deriver);
info.hash = parseHash(htSHA256, readString(from));
- info.references = readStorePaths(from);
+ info.references = readStorePaths(from);
info.registrationTime = readInt(from);
info.narSize = readLongLong(from);
return info;
@@ -283,7 +287,7 @@ void RemoteStore::queryReferences(const Path & path,
writeInt(wopQueryReferences, to);
writeString(path, to);
processStderr();
- PathSet references2 = readStorePaths(from);
+ PathSet references2 = readStorePaths(from);
references.insert(references2.begin(), references2.end());
}
@@ -295,7 +299,7 @@ void RemoteStore::queryReferrers(const Path & path,
writeInt(wopQueryReferrers, to);
writeString(path, to);
processStderr();
- PathSet referrers2 = readStorePaths(from);
+ PathSet referrers2 = readStorePaths(from);
referrers.insert(referrers2.begin(), referrers2.end());
}
@@ -318,7 +322,7 @@ PathSet RemoteStore::queryDerivationOutputs(const Path & path)
writeInt(wopQueryDerivationOutputs, to);
writeString(path, to);
processStderr();
- return readStorePaths(from);
+ return readStorePaths(from);
}
@@ -338,7 +342,7 @@ Path RemoteStore::addToStore(const Path & _srcPath,
openConnection();
Path srcPath(absPath(_srcPath));
-
+
writeInt(wopAddToStore, to);
writeString(baseNameOf(srcPath), to);
/* backwards compatibility hack */
@@ -358,7 +362,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
writeInt(wopAddTextToStore, to);
writeString(name, to);
writeString(s, to);
- writeStringSet(references, to);
+ writeStrings(references, to);
processStderr();
return readStorePath(from);
@@ -377,14 +381,14 @@ void RemoteStore::exportPath(const Path & path, bool sign,
}
-Path RemoteStore::importPath(bool requireSignature, Source & source)
+Paths RemoteStore::importPaths(bool requireSignature, Source & source)
{
openConnection();
- writeInt(wopImportPath, to);
+ writeInt(wopImportPaths, to);
/* We ignore requireSignature, since the worker forces it to true
- anyway. */
+ anyway. */
processStderr(0, &source);
- return readStorePath(from);
+ return readStorePaths(from);
}
@@ -392,7 +396,7 @@ void RemoteStore::buildDerivations(const PathSet & drvPaths)
{
openConnection();
writeInt(wopBuildDerivations, to);
- writeStringSet(drvPaths, to);
+ writeStrings(drvPaths, to);
processStderr();
readInt(from);
}
@@ -459,7 +463,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
writeInt(wopCollectGarbage, to);
writeInt(options.action, to);
- writeStringSet(options.pathsToDelete, to);
+ writeStrings(options.pathsToDelete, to);
writeInt(options.ignoreLiveness, to);
writeLongLong(options.maxFreed, to);
writeInt(options.maxLinks, to);
@@ -471,7 +475,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
processStderr();
- results.paths = readStringSet(from);
+ results.paths = readStrings(from);
results.bytesFreed = readLongLong(from);
results.blocksFreed = readLongLong(from);
}
@@ -482,7 +486,7 @@ PathSet RemoteStore::queryFailedPaths()
openConnection();
writeInt(wopQueryFailedPaths, to);
processStderr();
- return readStorePaths(from);
+ return readStorePaths(from);
}
@@ -490,7 +494,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
{
openConnection();
writeInt(wopClearFailedPaths, to);
- writeStringSet(paths, to);
+ writeStrings(paths, to);
processStderr();
readInt(from);
}
@@ -498,6 +502,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
void RemoteStore::processStderr(Sink * sink, Source * source)
{
+ to.flush();
unsigned int msg;
while ((msg = readInt(from)) == STDERR_NEXT
|| msg == STDERR_READ || msg == STDERR_WRITE) {
@@ -508,11 +513,11 @@ void RemoteStore::processStderr(Sink * sink, Source * source)
}
else if (msg == STDERR_READ) {
if (!source) throw Error("no source");
- unsigned int len = readInt(from);
+ size_t len = readInt(from);
unsigned char * buf = new unsigned char[len];
AutoDeleteArray d(buf);
- (*source)(buf, len);
- writeString(string((const char *) buf, len), to);
+ writeString(buf, source->read(buf, len), to);
+ to.flush();
}
else {
string s = readString(from);
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 3be9e315a..c5853ef53 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -58,7 +58,7 @@ public:
void exportPath(const Path & path, bool sign,
Sink & sink);
- Path importPath(bool requireSignature, Source & source);
+ Paths importPaths(bool requireSignature, Source & source);
void buildDerivations(const PathSet & drvPaths);
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index d67ff2c77..36ade2170 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -298,6 +298,17 @@ string showPaths(const PathSet & paths)
}
+void exportPaths(StoreAPI & store, const Paths & paths,
+ bool sign, Sink & sink)
+{
+ foreach (Paths::const_iterator, i, paths) {
+ writeInt(1, sink);
+ store.exportPath(*i, sign, sink);
+ }
+ writeInt(0, sink);
+}
+
+
}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 038465749..14890f522 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -154,9 +154,7 @@ public:
/* Copy the contents of a path to the store and register the
validity the resulting path. The resulting path is returned.
- If `fixed' is true, then the output of a fixed-output
- derivation is pre-loaded into the Nix store. The function
- object `filter' can be used to exclude files (see
+ The function object `filter' can be used to exclude files (see
libutil/archive.hh). */
virtual Path addToStore(const Path & srcPath,
bool recursive = true, HashType hashAlgo = htSHA256,
@@ -174,9 +172,9 @@ public:
virtual void exportPath(const Path & path, bool sign,
Sink & sink) = 0;
- /* Import a NAR dump created by exportPath() into the Nix
- store. */
- virtual Path importPath(bool requireSignature, Source & source) = 0;
+ /* Import a sequence of NAR dumps created by exportPaths() into
+ the Nix store. */
+ virtual Paths importPaths(bool requireSignature, Source & source) = 0;
/* Ensure that the output paths of the derivation are valid. If
they are already valid, this is a no-op. Otherwise, validity
@@ -345,6 +343,12 @@ ValidPathInfo decodeValidPathInfo(std::istream & str,
bool hashGiven = false);
+/* Export multiple paths in the format expected by ‘nix-store
+ --import’. */
+void exportPaths(StoreAPI & store, const Paths & paths,
+ bool sign, Sink & sink);
+
+
}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 576e754d2..ef1e0993d 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -8,7 +8,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
-#define PROTOCOL_VERSION 0x108
+#define PROTOCOL_VERSION 0x109
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@@ -29,7 +29,6 @@ typedef enum {
wopSyncWithGC = 13,
wopFindRoots = 14,
wopExportPath = 16,
- wopImportPath = 17,
wopQueryDeriver = 18,
wopSetOptions = 19,
wopCollectGarbage = 20,
@@ -39,7 +38,8 @@ typedef enum {
wopQueryFailedPaths = 24,
wopClearFailedPaths = 25,
wopQueryPathInfo = 26,
- wopQueryDerivationOutputNames = 27,
+ wopImportPaths = 27,
+ wopQueryDerivationOutputNames = 28,
} WorkerOp;
@@ -59,7 +59,7 @@ typedef enum {
Path readStorePath(Source & from);
-PathSet readStorePaths(Source & from);
+template T readStorePaths(Source & from);
}
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index b9e784699..bbfe7847f 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -204,6 +204,22 @@ Hash parseHash32(HashType ht, const string & s)
}
+Hash parseHash16or32(HashType ht, const string & s)
+{
+ Hash hash(ht);
+ if (s.size() == hash.hashSize * 2)
+ /* hexadecimal representation */
+ hash = parseHash(ht, s);
+ else if (s.size() == hashLength32(hash))
+ /* base-32 representation */
+ hash = parseHash32(ht, s);
+ else
+ throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
+ % s % printHashType(ht));
+ return hash;
+}
+
+
bool isHash(const string & s)
{
if (s.length() != 32) return false;
@@ -290,21 +306,13 @@ HashSink::HashSink(HashType ht) : ht(ht)
start(ht, *ctx);
}
-HashSink::HashSink(const HashSink & h)
-{
- ht = h.ht;
- bytes = h.bytes;
- ctx = new Ctx;
- *ctx = *h.ctx;
-}
-
HashSink::~HashSink()
{
+ bufPos = 0;
delete ctx;
}
-void HashSink::operator ()
- (const unsigned char * data, unsigned int len)
+void HashSink::write(const unsigned char * data, size_t len)
{
bytes += len;
update(ht, *ctx, data, len);
@@ -312,11 +320,21 @@ void HashSink::operator ()
HashResult HashSink::finish()
{
+ flush();
Hash hash(ht);
nix::finish(ht, *ctx, hash.hash);
return HashResult(hash, bytes);
}
+HashResult HashSink::currentHash()
+{
+ flush();
+ Ctx ctx2 = *ctx;
+ Hash hash(ht);
+ nix::finish(ht, ctx2, hash.hash);
+ return HashResult(hash, bytes);
+}
+
HashResult hashPath(
HashType ht, const Path & path, PathFilter & filter)
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
index 13740954d..e0b6478cc 100644
--- a/src/libutil/hash.hh
+++ b/src/libutil/hash.hh
@@ -58,6 +58,9 @@ string printHash32(const Hash & hash);
/* Parse a base-32 representation of a hash code. */
Hash parseHash32(HashType ht, const string & s);
+/* Parse a base-16 or base-32 representation of a hash code. */
+Hash parseHash16or32(HashType ht, const string & s);
+
/* Verify that the given string is a valid hash code. */
bool isHash(const string & s);
@@ -88,7 +91,7 @@ string printHashType(HashType ht);
union Ctx;
-class HashSink : public Sink
+class HashSink : public BufferedSink
{
private:
HashType ht;
@@ -99,8 +102,9 @@ public:
HashSink(HashType ht);
HashSink(const HashSink & h);
~HashSink();
- virtual void operator () (const unsigned char * data, unsigned int len);
+ void write(const unsigned char * data, size_t len);
HashResult finish();
+ HashResult currentHash();
};
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 9b4222713..c4563ffd1 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -2,24 +2,117 @@
#include "util.hh"
#include
+#include
namespace nix {
-void FdSink::operator () (const unsigned char * data, unsigned int len)
+BufferedSink::~BufferedSink()
+{
+ /* We can't call flush() here, because C++ for some insane reason
+ doesn't allow you to call virtual methods from a destructor. */
+ assert(!bufPos);
+ if (buffer) delete[] buffer;
+}
+
+
+void BufferedSink::operator () (const unsigned char * data, size_t len)
+{
+ if (!buffer) buffer = new unsigned char[bufSize];
+
+ while (len) {
+ /* Optimisation: bypass the buffer if the data exceeds the
+ buffer size. */
+ if (bufPos + len >= bufSize) {
+ flush();
+ write(data, len);
+ break;
+ }
+ /* Otherwise, copy the bytes to the buffer. Flush the buffer
+ when it's full. */
+ size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
+ memcpy(buffer + bufPos, data, n);
+ data += n; bufPos += n; len -= n;
+ if (bufPos == bufSize) flush();
+ }
+}
+
+
+void BufferedSink::flush()
+{
+ if (bufPos == 0) return;
+ size_t n = bufPos;
+ bufPos = 0; // don't trigger the assert() in ~BufferedSink()
+ write(buffer, n);
+}
+
+
+FdSink::~FdSink()
+{
+ try { flush(); } catch (...) { ignoreException(); }
+}
+
+
+void FdSink::write(const unsigned char * data, size_t len)
{
writeFull(fd, data, len);
}
-void FdSource::operator () (unsigned char * data, unsigned int len)
+void Source::operator () (unsigned char * data, size_t len)
{
- readFull(fd, data, len);
+ while (len) {
+ size_t n = read(data, len);
+ data += n; len -= n;
+ }
}
-void writePadding(unsigned int len, Sink & sink)
+BufferedSource::~BufferedSource()
+{
+ if (buffer) delete[] buffer;
+}
+
+
+size_t BufferedSource::read(unsigned char * data, size_t len)
+{
+ if (!buffer) buffer = new unsigned char[bufSize];
+
+ if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize);
+
+ /* Copy out the data in the buffer. */
+ size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
+ memcpy(data, buffer + bufPosOut, n);
+ bufPosOut += n;
+ if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
+ return n;
+}
+
+
+size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
+{
+ ssize_t n;
+ do {
+ checkInterrupt();
+ n = ::read(fd, (char *) data, bufSize);
+ } while (n == -1 && errno == EINTR);
+ if (n == -1) throw SysError("reading from file");
+ if (n == 0) throw EndOfFile("unexpected end-of-file");
+ return n;
+}
+
+
+size_t StringSource::read(unsigned char * data, size_t len)
+{
+ if (pos == s.size()) throw EndOfFile("end of string reached");
+ size_t n = s.copy((char *) data, len, pos);
+ pos += n;
+ return n;
+}
+
+
+void writePadding(size_t len, Sink & sink)
{
if (len % 8) {
unsigned char zero[8];
@@ -56,28 +149,36 @@ void writeLongLong(unsigned long long n, Sink & sink)
}
-void writeString(const string & s, Sink & sink)
+void writeString(const unsigned char * buf, size_t len, Sink & sink)
{
- unsigned int len = s.length();
writeInt(len, sink);
- sink((const unsigned char *) s.c_str(), len);
+ sink(buf, len);
writePadding(len, sink);
}
-void writeStringSet(const StringSet & ss, Sink & sink)
+void writeString(const string & s, Sink & sink)
{
- writeInt(ss.size(), sink);
- for (StringSet::iterator i = ss.begin(); i != ss.end(); ++i)
- writeString(*i, sink);
+ writeString((const unsigned char *) s.c_str(), s.size(), sink);
}
-void readPadding(unsigned int len, Source & source)
+template void writeStrings(const T & ss, Sink & sink)
+{
+ writeInt(ss.size(), sink);
+ foreach (typename T::const_iterator, i, ss)
+ writeString(*i, sink);
+}
+
+template void writeStrings(const Paths & ss, Sink & sink);
+template void writeStrings(const PathSet & ss, Sink & sink);
+
+
+void readPadding(size_t len, Source & source)
{
if (len % 8) {
unsigned char zero[8];
- unsigned int n = 8 - (len % 8);
+ size_t n = 8 - (len % 8);
source(zero, n);
for (unsigned int i = 0; i < n; i++)
if (zero[i]) throw SerialisationError("non-zero padding");
@@ -115,9 +216,19 @@ unsigned long long readLongLong(Source & source)
}
+size_t readString(unsigned char * buf, size_t max, Source & source)
+{
+ size_t len = readInt(source);
+ if (len > max) throw Error("string is too long");
+ source(buf, len);
+ readPadding(len, source);
+ return len;
+}
+
+
string readString(Source & source)
{
- unsigned int len = readInt(source);
+ size_t len = readInt(source);
unsigned char * buf = new unsigned char[len];
AutoDeleteArray d(buf);
source(buf, len);
@@ -126,14 +237,17 @@ string readString(Source & source)
}
-StringSet readStringSet(Source & source)
+template T readStrings(Source & source)
{
unsigned int count = readInt(source);
- StringSet ss;
+ T ss;
while (count--)
- ss.insert(readString(source));
+ ss.insert(ss.end(), readString(source));
return ss;
}
+template Paths readStrings(Source & source);
+template PathSet readStrings(Source & source);
+
}
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 0e797d63b..ded4b12a0 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -11,7 +11,25 @@ namespace nix {
struct Sink
{
virtual ~Sink() { }
- virtual void operator () (const unsigned char * data, unsigned int len) = 0;
+ virtual void operator () (const unsigned char * data, size_t len) = 0;
+};
+
+
+/* A buffered abstract sink. */
+struct BufferedSink : Sink
+{
+ size_t bufSize, bufPos;
+ unsigned char * buffer;
+
+ BufferedSink(size_t bufSize = 32 * 1024)
+ : bufSize(bufSize), bufPos(0), buffer(0) { }
+ ~BufferedSink();
+
+ void operator () (const unsigned char * data, size_t len);
+
+ void flush();
+
+ virtual void write(const unsigned char * data, size_t len) = 0;
};
@@ -20,49 +38,55 @@ struct Source
{
virtual ~Source() { }
- /* The callee should store exactly *len bytes in the buffer
- pointed to by data. It should block if that much data is not
- yet available, or throw an error if it is not going to be
- available. */
- virtual void operator () (unsigned char * data, unsigned int len) = 0;
+ /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
+ It blocks until all the requested data is available, or throws
+ an error if it is not going to be available. */
+ void operator () (unsigned char * data, size_t len);
+
+ /* Store up to ‘len’ in the buffer pointed to by ‘data’, and
+ return the number of bytes stored. If blocks until at least
+ one byte is available. */
+ virtual size_t read(unsigned char * data, size_t len) = 0;
+};
+
+
+/* A buffered abstract source. */
+struct BufferedSource : Source
+{
+ size_t bufSize, bufPosIn, bufPosOut;
+ unsigned char * buffer;
+
+ BufferedSource(size_t bufSize = 32 * 1024)
+ : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { }
+ ~BufferedSource();
+
+ size_t read(unsigned char * data, size_t len);
+
+ /* Underlying read call, to be overriden. */
+ virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
};
/* A sink that writes data to a file descriptor. */
-struct FdSink : Sink
+struct FdSink : BufferedSink
{
int fd;
- FdSink()
- {
- fd = -1;
- }
+ FdSink() : fd(-1) { }
+ FdSink(int fd) : fd(fd) { }
+ ~FdSink();
- FdSink(int fd)
- {
- this->fd = fd;
- }
-
- void operator () (const unsigned char * data, unsigned int len);
+ void write(const unsigned char * data, size_t len);
};
/* A source that reads data from a file descriptor. */
-struct FdSource : Source
+struct FdSource : BufferedSource
{
int fd;
-
- FdSource()
- {
- fd = -1;
- }
-
- FdSource(int fd)
- {
- this->fd = fd;
- }
-
- void operator () (unsigned char * data, unsigned int len);
+ FdSource() : fd(-1) { }
+ FdSource(int fd) : fd(fd) { }
+ size_t readUnbuffered(unsigned char * data, size_t len);
};
@@ -70,7 +94,7 @@ struct FdSource : Source
struct StringSink : Sink
{
string s;
- virtual void operator () (const unsigned char * data, unsigned int len)
+ void operator () (const unsigned char * data, size_t len)
{
s.append((const char *) data, len);
}
@@ -81,29 +105,25 @@ struct StringSink : Sink
struct StringSource : Source
{
const string & s;
- unsigned int pos;
+ size_t pos;
StringSource(const string & _s) : s(_s), pos(0) { }
- virtual void operator () (unsigned char * data, unsigned int len)
- {
- s.copy((char *) data, len, pos);
- pos += len;
- if (pos > s.size())
- throw Error("end of string reached");
- }
+ size_t read(unsigned char * data, size_t len);
};
-void writePadding(unsigned int len, Sink & sink);
+void writePadding(size_t len, Sink & sink);
void writeInt(unsigned int n, Sink & sink);
void writeLongLong(unsigned long long n, Sink & sink);
+void writeString(const unsigned char * buf, size_t len, Sink & sink);
void writeString(const string & s, Sink & sink);
-void writeStringSet(const StringSet & ss, Sink & sink);
+template void writeStrings(const T & ss, Sink & sink);
-void readPadding(unsigned int len, Source & source);
+void readPadding(size_t len, Source & source);
unsigned int readInt(Source & source);
unsigned long long readLongLong(Source & source);
+size_t readString(unsigned char * buf, size_t max, Source & source);
string readString(Source & source);
-StringSet readStringSet(Source & source);
+template T readStrings(Source & source);
MakeError(SerialisationError, Error)
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 3dfecb2d7..a8d9076cf 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -1270,7 +1270,7 @@ void run(Strings args)
else if (arg == "--profile" || arg == "-p")
globals.profile = absPath(needArg(i, args, arg));
else if (arg == "--file" || arg == "-f")
- globals.instSource.nixExprPath = absPath(needArg(i, args, arg));
+ globals.instSource.nixExprPath = lookupFileArg(globals.state, needArg(i, args, arg));
else if (arg == "--switch-profile" || arg == "-S")
op = opSwitchProfile;
else if (arg == "--switch-generation" || arg == "-G")
diff --git a/src/nix-hash/nix-hash.cc b/src/nix-hash/nix-hash.cc
index 4867234bf..5b35ccd9d 100644
--- a/src/nix-hash/nix-hash.cc
+++ b/src/nix-hash/nix-hash.cc
@@ -43,7 +43,7 @@ void run(Strings args)
}
if (op == opHash) {
- for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) {
+ foreach (Strings::iterator, i, ss) {
Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first;
if (truncate && h.hashSize > 20) h = compressHash(h, 20);
std::cout << format("%1%\n") %
@@ -52,8 +52,8 @@ void run(Strings args)
}
else {
- for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) {
- Hash h = op == opTo16 ? parseHash32(ht, *i) : parseHash(ht, *i);
+ foreach (Strings::iterator, i, ss) {
+ Hash h = parseHash16or32(ht, *i);
std::cout << format("%1%\n") %
(op == opTo16 ? printHash(h) : printHash32(h));
}
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index 98eadbd69..93aa50943 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -138,8 +138,7 @@ void run(Strings args)
}
foreach (Strings::iterator, i, files) {
- Path path = absPath(*i);
- Expr * e = state.parseExprFromFile(path);
+ Expr * e = state.parseExprFromFile(lookupFileArg(state, *i));
processExpr(state, attrPaths, parseOnly, strict, autoArgs,
evalOnly, xmlOutput, xmlOutputSourceLocation, e);
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 371ca54af..e92ccb153 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -133,14 +133,6 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
}
-static Hash parseHash16or32(HashType ht, const string & s)
-{
- return s.size() == Hash(ht).hashSize * 2
- ? parseHash(ht, s)
- : parseHash32(ht, s);
-}
-
-
/* Hack to support caching in `nix-prefetch-url'. */
static void opPrintFixedPath(Strings opFlags, Strings opArgs)
{
@@ -594,11 +586,7 @@ static void opExport(Strings opFlags, Strings opArgs)
else throw UsageError(format("unknown flag `%1%'") % *i);
FdSink sink(STDOUT_FILENO);
- for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i) {
- writeInt(1, sink);
- store->exportPath(*i, sign, sink);
- }
- writeInt(0, sink);
+ exportPaths(*store, opArgs, sign, sink);
}
@@ -612,12 +600,10 @@ static void opImport(Strings opFlags, Strings opArgs)
if (!opArgs.empty()) throw UsageError("no arguments expected");
FdSource source(STDIN_FILENO);
- while (true) {
- unsigned long long n = readLongLong(source);
- if (n == 0) break;
- if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
- cout << format("%1%\n") % store->importPath(requireSignature, source) << std::flush;
- }
+ Paths paths = store->importPaths(requireSignature, source);
+
+ foreach (Paths::iterator, i, paths)
+ cout << format("%1%\n") % *i << std::flush;
}
diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc
index 3587bd7fd..68567f341 100644
--- a/src/nix-worker/nix-worker.cc
+++ b/src/nix-worker/nix-worker.cc
@@ -56,7 +56,8 @@ static void tunnelStderr(const unsigned char * buf, size_t count)
if (canSendStderr && myPid == getpid()) {
try {
writeInt(STDERR_NEXT, to);
- writeString(string((char *) buf, count), to);
+ writeString(buf, count, to);
+ to.flush();
} catch (...) {
/* Write failed; that means that the other side is
gone. */
@@ -200,26 +201,20 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int
struct TunnelSink : Sink
{
Sink & to;
- TunnelSink(Sink & to) : to(to)
- {
- }
- virtual void operator ()
- (const unsigned char * data, unsigned int len)
+ TunnelSink(Sink & to) : to(to) { }
+ virtual void operator () (const unsigned char * data, size_t len)
{
writeInt(STDERR_WRITE, to);
- writeString(string((const char *) data, len), to);
+ writeString(data, len, to);
}
};
-struct TunnelSource : Source
+struct TunnelSource : BufferedSource
{
Source & from;
- TunnelSource(Source & from) : from(from)
- {
- }
- virtual void operator ()
- (unsigned char * data, unsigned int len)
+ TunnelSource(Source & from) : from(from) { }
+ size_t readUnbuffered(unsigned char * data, size_t len)
{
/* Careful: we're going to receive data from the client now,
so we have to disable the SIGPOLL handler. */
@@ -228,11 +223,12 @@ struct TunnelSource : Source
writeInt(STDERR_READ, to);
writeInt(len, to);
- string s = readString(from);
- if (s.size() != len) throw Error("not enough data");
- memcpy(data, (const unsigned char *) s.c_str(), len);
+ to.flush();
+ size_t n = readString(data, len, from);
startWork();
+ if (n == 0) throw EndOfFile("unexpected end-of-file");
+ return n;
}
};
@@ -241,11 +237,14 @@ struct TunnelSource : Source
the contents of the file to `s'. Otherwise barf. */
struct RetrieveRegularNARSink : ParseSink
{
+ bool regular;
string s;
+ RetrieveRegularNARSink() : regular(true) { }
+
void createDirectory(const Path & path)
{
- throw Error("regular file expected");
+ regular = false;
}
void receiveContents(unsigned char * data, unsigned int len)
@@ -255,7 +254,7 @@ struct RetrieveRegularNARSink : ParseSink
void createSymlink(const Path & path, const string & target)
{
- throw Error("regular file expected");
+ regular = false;
}
};
@@ -266,10 +265,11 @@ struct SavingSourceAdapter : Source
Source & orig;
string s;
SavingSourceAdapter(Source & orig) : orig(orig) { }
- void operator () (unsigned char * data, unsigned int len)
+ size_t read(unsigned char * data, size_t len)
{
- orig(data, len);
- s.append((const char *) data, len);
+ size_t n = orig.read(data, len);
+ s.append((const char *) data, n);
+ return n;
}
};
@@ -327,7 +327,7 @@ static void performOp(unsigned int clientVersion,
store->queryReferrers(path, paths);
else paths = store->queryDerivationOutputs(path);
stopWork();
- writeStringSet(paths, to);
+ writeStrings(paths, to);
break;
}
@@ -371,11 +371,11 @@ static void performOp(unsigned int clientVersion,
addToStoreFromDump(). */
ParseSink sink; /* null sink; just parse the NAR */
parseDump(sink, savedNAR);
- } else {
+ } else
parseDump(savedRegular, from);
- }
startWork();
+ if (!savedRegular.regular) throw Error("regular file expected");
Path path = dynamic_cast(store.get())
->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
stopWork();
@@ -387,7 +387,7 @@ static void performOp(unsigned int clientVersion,
case wopAddTextToStore: {
string suffix = readString(from);
string s = readString(from);
- PathSet refs = readStorePaths(from);
+ PathSet refs = readStorePaths(from);
startWork();
Path path = store->addTextToStore(suffix, s, refs);
stopWork();
@@ -406,17 +406,17 @@ static void performOp(unsigned int clientVersion,
break;
}
- case wopImportPath: {
+ case wopImportPaths: {
startWork();
TunnelSource source(from);
- Path path = store->importPath(true, source);
+ Paths paths = store->importPaths(true, source);
stopWork();
- writeString(path, to);
+ writeStrings(paths, to);
break;
}
case wopBuildDerivations: {
- PathSet drvs = readStorePaths(from);
+ PathSet drvs = readStorePaths(from);
startWork();
store->buildDerivations(drvs);
stopWork();
@@ -474,7 +474,7 @@ static void performOp(unsigned int clientVersion,
case wopCollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
- options.pathsToDelete = readStorePaths(from);
+ options.pathsToDelete = readStorePaths(from);
options.ignoreLiveness = readInt(from);
options.maxFreed = readLongLong(from);
options.maxLinks = readInt(from);
@@ -492,7 +492,7 @@ static void performOp(unsigned int clientVersion,
store->collectGarbage(options, results);
stopWork();
- writeStringSet(results.paths, to);
+ writeStrings(results.paths, to);
writeLongLong(results.bytesFreed, to);
writeLongLong(results.blocksFreed, to);
@@ -530,7 +530,7 @@ static void performOp(unsigned int clientVersion,
writeInt(res ? 1 : 0, to);
if (res) {
writeString(info.deriver, to);
- writeStringSet(info.references, to);
+ writeStrings(info.references, to);
writeLongLong(info.downloadSize, to);
if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
writeLongLong(info.narSize, to);
@@ -542,7 +542,7 @@ static void performOp(unsigned int clientVersion,
startWork();
PathSet paths = store->queryValidPaths();
stopWork();
- writeStringSet(paths, to);
+ writeStrings(paths, to);
break;
}
@@ -550,12 +550,12 @@ static void performOp(unsigned int clientVersion,
startWork();
PathSet paths = store->queryFailedPaths();
stopWork();
- writeStringSet(paths, to);
+ writeStrings(paths, to);
break;
}
case wopClearFailedPaths: {
- PathSet paths = readStringSet(from);
+ PathSet paths = readStrings(from);
startWork();
store->clearFailedPaths(paths);
stopWork();
@@ -570,7 +570,7 @@ static void performOp(unsigned int clientVersion,
stopWork();
writeString(info.deriver, to);
writeString(printHash(info.hash), to);
- writeStringSet(info.references, to);
+ writeStrings(info.references, to);
writeInt(info.registrationTime, to);
writeLongLong(info.narSize, to);
break;
@@ -603,8 +603,8 @@ static void processConnection()
unsigned int magic = readInt(from);
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
writeInt(WORKER_MAGIC_2, to);
-
writeInt(PROTOCOL_VERSION, to);
+ to.flush();
unsigned int clientVersion = readInt(from);
/* Send startup error messages to the client. */
@@ -626,9 +626,11 @@ static void processConnection()
store = boost::shared_ptr(new LocalStore());
stopWork();
+ to.flush();
} catch (Error & e) {
stopWork(false, e.msg());
+ to.flush();
return;
}
@@ -648,9 +650,19 @@ static void processConnection()
try {
performOp(clientVersion, from, to, op);
} catch (Error & e) {
+ /* If we're not in a state were we can send replies, then
+ something went wrong processing the input of the
+ client. This can happen especially if I/O errors occur
+ during addTextToStore() / importPath(). If that
+ happens, just send the error message and exit. */
+ bool errorAllowed = canSendStderr;
+ if (!errorAllowed) printMsg(lvlError, format("error processing client input: %1%") % e.msg());
stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
+ if (!errorAllowed) break;
}
+ to.flush();
+
assert(!canSendStderr);
};
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 0418f73e6..8b5aa4bd9 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1,4 +1,4 @@
-TESTS_ENVIRONMENT = $(bash) -e
+TESTS_ENVIRONMENT = NIX_REMOTE= $(bash) -e
extra1 = $(shell pwd)/test-tmp/shared
diff --git a/tests/common.sh.in b/tests/common.sh.in
index 62ac669df..4ab490a62 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -23,8 +23,6 @@ export SHARED=$TEST_ROOT/shared
export PATH=$NIX_BIN_DIR:$TOP/scripts:$PATH
-export NIX_REMOTE=
-
export REAL_BIN_DIR=@bindir@
export REAL_LIBEXEC_DIR=@libexecdir@
export REAL_LOCALSTATE_DIR=@localstatedir@
diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix
index 201ca9dba..de127b8ae 100644
--- a/tests/remote-builds.nix
+++ b/tests/remote-builds.nix
@@ -72,6 +72,7 @@ in
$client->succeed("chmod 600 /root/.ssh/id_dsa");
# Install the SSH key on the slaves.
+ $client->waitForJob("network-interfaces");
foreach my $slave ($slave1, $slave2) {
$slave->succeed("mkdir -m 700 /root/.ssh");
$slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
diff --git a/tests/user-envs.sh b/tests/user-envs.sh
index 025a5ff81..5037e28b9 100644
--- a/tests/user-envs.sh
+++ b/tests/user-envs.sh
@@ -36,7 +36,7 @@ nix-env -p $profiles/test -q '*' | grep -q foo-2.0pre1
test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
# Upgrade "foo": should install foo-2.0.
-nix-env -p $profiles/test -f ./user-envs.nix -u foo
+NIX_PATH=nixpkgs=./user-envs.nix nix-env -p $profiles/test -f '' -u foo
# Query installed: should contain foo-2.0 now.
test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1