* Sync with the trunk.

This commit is contained in:
Eelco Dolstra 2011-12-16 23:33:01 +00:00
commit 194d21f9f6
45 changed files with 928 additions and 682 deletions

View file

@ -5,32 +5,35 @@ AM_INIT_AUTOMAKE([dist-bzip2 foreign])
AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.]) AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.])
AC_CANONICAL_HOST AC_PROG_SED
# Construct a Nix system name (like "i686-linux"). # Construct a Nix system name (like "i686-linux").
AC_CANONICAL_HOST
AC_MSG_CHECKING([for the canonical Nix system name]) AC_MSG_CHECKING([for the canonical Nix system name])
cpu_name=$(uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
machine_name=$(uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
case $machine_name in AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
[Platform identifier (e.g., `i686-linux').]),
[system=$withval],
[case "$host_cpu" in
i*86) i*86)
machine_name=i686 machine_name="i686";;
;; amd64)
x86_64) machine_name="x86_64";;
machine_name=x86_64
;;
ppc)
machine_name=powerpc
;;
*) *)
if test "$cpu_name" != "unknown"; then machine_name="$host_cpu";;
machine_name=$cpu_name
fi
;;
esac esac
sys_name=$(uname -s | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_') case "$host_os" in
linux-gnu*)
# For backward compatibility, strip the `-gnu' part.
system="$machine_name-linux";;
*)
# Strip the version number from names such as `gnu0.3',
# `darwin10.2.0', etc.
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
esac])
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
case $sys_name in case $sys_name in
cygwin*) cygwin*)
@ -38,9 +41,6 @@ case $sys_name in
;; ;;
esac esac
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
[Platform identifier (e.g., `i686-linux').]),
system=$withval, system="${machine_name}-${sys_name}")
AC_MSG_RESULT($system) AC_MSG_RESULT($system)
AC_SUBST(system) AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')]) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
@ -62,7 +62,7 @@ fi
# Solaris-specific stuff. # Solaris-specific stuff.
if test "$sys_name" = "sunos"; then if test "$sys_name" = sunos; then
# Solaris requires -lsocket -lnsl for network functions # Solaris requires -lsocket -lnsl for network functions
LIBS="-lsocket -lnsl $LIBS" LIBS="-lsocket -lnsl $LIBS"
fi fi
@ -255,7 +255,7 @@ AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH],
[prefix of SQLite]), [prefix of SQLite]),
sqlite=$withval, sqlite=) sqlite=$withval, sqlite=)
AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite") AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite")
SQLITE_VERSION=3070701 SQLITE_VERSION=3070900
AC_SUBST(SQLITE_VERSION) AC_SUBST(SQLITE_VERSION)
if test -z "$sqlite"; then if test -z "$sqlite"; then
sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la' sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la'

View file

@ -24,8 +24,11 @@
</group> </group>
<arg><option>--sign</option></arg> <arg><option>--sign</option></arg>
<arg><option>--gzip</option></arg> <arg><option>--gzip</option></arg>
<arg><option>--bzip2</option></arg>
<arg><option>--xz</option></arg>
<arg><option>--include-outputs</option></arg>
<arg choice='plain'> <arg choice='plain'>
<arg><replaceable>user@</replaceable></arg><replaceable>machine</replaceable> <replaceable>user@</replaceable><replaceable>machine</replaceable>
</arg> </arg>
<arg choice='plain'><replaceable>paths</replaceable></arg> <arg choice='plain'><replaceable>paths</replaceable></arg>
</cmdsynopsis> </cmdsynopsis>
@ -84,22 +87,33 @@ those paths. If this bothers you, use
<listitem><para>Let the sending machine cryptographically sign the <listitem><para>Let the sending machine cryptographically sign the
dump of each path with the key in dump of each path with the key in
<filename>/nix/etc/nix/signing-key.sec</filename>. If the user on <filename><replaceable>sysconfdir</replaceable>/nix/signing-key.sec</filename>.
the target machine does not have direct access to the Nix store If the user on the target machine does not have direct access to
(i.e., if the target machine has a multi-user Nix installation), the Nix store (i.e., if the target machine has a multi-user Nix
then the target machine will check the dump against installation), then the target machine will check the dump against
<filename>/nix/etc/nix/signing-key.pub</filename> before unpacking <filename><replaceable>sysconfdir</replaceable>/nix/signing-key.pub</filename>
it in its Nix store. This allows secure sharing of store paths before unpacking it in its Nix store. This allows secure sharing
between untrusted users on two machines, provided that there is a of store paths between untrusted users on two machines, provided
trust relation between the Nix installations on both machines that there is a trust relation between the Nix installations on
(namely, they have matching public/secret keys).</para></listitem> both machines (namely, they have matching public/secret
keys).</para></listitem>
</varlistentry> </varlistentry>
<varlistentry><term><option>--gzip</option></term> <varlistentry><term><option>--gzip</option> / <option>--bzip2</option> / <option>--xz</option></term>
<listitem><para>Compress the dump of each path with <listitem><para>Compress the dump of each path with respectively
<command>gzip</command> before sending it.</para></listitem> <command>gzip</command>, <command>bzip2</command> or
<command>xz</command> before sending it. The corresponding
decompression program must be installed on the target
machine.</para></listitem>
</varlistentry>
<varlistentry><term><option>--include-outputs</option></term>
<listitem><para>Also copy the outputs of store derivations included
in the closure.</para></listitem>
</varlistentry> </varlistentry>

View file

@ -256,7 +256,7 @@ number of possible ways:
<emphasis>attribute paths</emphasis> that select attributes from the <emphasis>attribute paths</emphasis> that select attributes from the
top-level Nix expression. This is faster than using derivation top-level Nix expression. This is faster than using derivation
names and unambiguous. To find out the attribute paths of available names and unambiguous. To find out the attribute paths of available
packages, use <literal>nix-env -qaA '*'</literal>.</para></listitem> packages, use <literal>nix-env -qaP '*'</literal>.</para></listitem>
<listitem><para>If <option>--from-profile</option> <listitem><para>If <option>--from-profile</option>
<replaceable>path</replaceable> is given, <replaceable>path</replaceable> is given,

View file

@ -2,10 +2,12 @@ perlversion := $(shell perl -e 'use Config; print $$Config{version};')
perlarchname := $(shell perl -e 'use Config; print $$Config{archname};') perlarchname := $(shell perl -e 'use Config; print $$Config{archname};')
perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname) perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname)
PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/Config.pm.in PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in
# Hack required by "make check".
all: $(PERL_MODULES:.in=) all: $(PERL_MODULES:.in=)
ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/Store.so mkdir -p lib/auto/Nix/Store
ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/auto/Nix/Store/Store.so
install-exec-local: $(PERL_MODULES:.in=) install-exec-local: $(PERL_MODULES:.in=)
$(INSTALL) -d $(DESTDIR)$(perllibdir)/Nix $(INSTALL) -d $(DESTDIR)$(perllibdir)/Nix

View file

@ -4,6 +4,7 @@ $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@"; $libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests"; $manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix"; $logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
$bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@"; $bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@";
$curl = "@curl@"; $curl = "@curl@";

View file

@ -0,0 +1,46 @@
package Nix::CopyClosure;
use strict;
use Nix::Config;
use Nix::Store;
sub copyTo {
my ($sshHost, $sshOpts, $storePaths, $compressor, $decompressor, $includeOutputs, $dryRun, $sign) = @_;
$compressor = "$compressor |" if $compressor ne "";
$decompressor = "$decompressor |" if $decompressor ne "";
# Get the closure of this path.
my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs,
map { followLinksToStorePath $_ } @{$storePaths})));
# Ask the remote host which paths are invalid. Because of limits
# to the command line length, do this in chunks. Eventually,
# we'll want to use --from-stdin, but we can't rely on the
# target having this option yet.
my @missing = ();
while (scalar(@closure) > 0) {
my @ps = splice(@closure, 0, 1500);
open(READ, "set -f; ssh $sshHost @{$sshOpts} nix-store --check-validity --print-invalid @ps|");
while (<READ>) {
chomp;
push @missing, $_;
}
close READ or die;
}
# Export the store paths and import them on the remote machine.
if (scalar @missing > 0) {
print STDERR "copying ", scalar @missing, " missing paths to $sshHost...\n";
#print STDERR " $_\n" foreach @missing;
unless ($dryRun) {
open SSH, "| $compressor ssh $sshHost @{$sshOpts} '$decompressor nix-store --import'" or die;
exportPaths(fileno(SSH), $sign, @missing);
close SSH or die "copying store paths to remote machine `$sshHost' failed: $?";
}
}
}
1;

View file

@ -53,8 +53,14 @@ sub addPatch {
sub readManifest_ { sub readManifest_ {
my ($manifest, $addNAR, $addPatch) = @_; my ($manifest, $addNAR, $addPatch) = @_;
# Decompress the manifest if necessary.
if ($manifest =~ /\.bz2$/) {
open MANIFEST, "$Nix::Config::bzip2 -d < $manifest |"
or die "cannot decompress `$manifest': $!";
} else {
open MANIFEST, "<$manifest" open MANIFEST, "<$manifest"
or die "cannot open `$manifest': $!"; or die "cannot open `$manifest': $!";
}
my $inside = 0; my $inside = 0;
my $type; my $type;
@ -120,7 +126,6 @@ sub readManifest_ {
elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; } elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; }
elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; } elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; }
elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; } elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; }
elsif (/^\s*SuccOf:\s*(\/\S+)\s*$/) { } # obsolete
elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; } elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; }
elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; } elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; } elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
@ -286,14 +291,22 @@ EOF
open MAINLOCK, ">>$lockFile" or die "unable to acquire lock $lockFile: $!\n"; open MAINLOCK, ">>$lockFile" or die "unable to acquire lock $lockFile: $!\n";
flock(MAINLOCK, LOCK_EX) or die; flock(MAINLOCK, LOCK_EX) or die;
our $insertNAR = $dbh->prepare(
"insert into NARs(manifest, storePath, url, hash, size, narHash, " .
"narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
our $insertPatch = $dbh->prepare(
"insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
"size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
$dbh->begin_work; $dbh->begin_work;
# Read each manifest in $manifestDir and add it to the database, # Read each manifest in $manifestDir and add it to the database,
# unless we've already done so on a previous run. # unless we've already done so on a previous run.
my %seen; my %seen;
for my $manifest (glob "$manifestDir/*.nixmanifest") { for my $manifestLink (glob "$manifestDir/*.nixmanifest") {
$manifest = Cwd::abs_path($manifest); my $manifest = Cwd::abs_path($manifestLink);
my $timestamp = lstat($manifest)->mtime; my $timestamp = lstat($manifest)->mtime;
$seen{$manifest} = 1; $seen{$manifest} = 1;
@ -312,20 +325,16 @@ EOF
sub addNARToDB { sub addNARToDB {
my ($storePath, $narFile) = @_; my ($storePath, $narFile) = @_;
$dbh->do( $insertNAR->execute(
"insert into NARs(manifest, storePath, url, hash, size, narHash, " . $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
"narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
{}, $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
$narFile->{narHash}, $narFile->{narSize}, $narFile->{references}, $narFile->{narHash}, $narFile->{narSize}, $narFile->{references},
$narFile->{deriver}, $narFile->{system}); $narFile->{deriver}, $narFile->{system});
}; };
sub addPatchToDB { sub addPatchToDB {
my ($storePath, $patch) = @_; my ($storePath, $patch) = @_;
$dbh->do( $insertPatch->execute(
"insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " . $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
"size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
{}, $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
$patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize}, $patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize},
$patch->{patchType}); $patch->{patchType});
}; };
@ -333,10 +342,10 @@ EOF
my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB); my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB);
if ($version < 3) { if ($version < 3) {
die "you have an old-style manifest `$manifest'; please delete it"; die "you have an old-style or corrupt manifest `$manifestLink'; please delete it";
} }
if ($version >= 10) { if ($version >= 10) {
die "manifest `$manifest' is too new; please delete it or upgrade Nix"; die "manifest `$manifestLink' is too new; please delete it or upgrade Nix";
} }
} }

View file

@ -1,6 +1,5 @@
package Nix::Store; package Nix::Store;
use 5.010001;
use strict; use strict;
use warnings; use warnings;
@ -12,7 +11,12 @@ our %EXPORT_TAGS = ( 'all' => [ qw( ) ] );
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath); our @EXPORT = qw(
isValidPath queryReferences queryPathInfo queryDeriver queryPathHash
topoSortPaths computeFSClosure followLinksToStorePath exportPaths
hashPath hashFile hashString
addToStore makeFixedOutputPath
);
our $VERSION = '0.15'; our $VERSION = '0.15';

View file

@ -18,10 +18,8 @@ using namespace nix;
void doInit() void doInit()
{ {
if (!store) { if (!store) {
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", "/nix/store")));
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", "/nix/var/nix"));
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
try { try {
setDefaultsFromEnvironment();
store = openStore(); store = openStore();
} catch (Error & e) { } catch (Error & e) {
croak(e.what()); croak(e.what());
@ -69,7 +67,7 @@ SV * queryPathHash(char * path)
try { try {
doInit(); doInit();
Hash hash = store->queryPathHash(path); Hash hash = store->queryPathHash(path);
string s = "sha256:" + printHash(hash); string s = "sha256:" + printHash32(hash);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) { } catch (Error & e) {
croak(e.what()); croak(e.what());
@ -148,3 +146,73 @@ SV * followLinksToStorePath(char * path)
} }
OUTPUT: OUTPUT:
RETVAL RETVAL
void exportPaths(int fd, int sign, ...)
PPCODE:
try {
doInit();
Paths paths;
for (int n = 2; n < items; ++n) paths.push_back(SvPV_nolen(ST(n)));
FdSink sink(fd);
exportPaths(*store, paths, sign, sink);
} catch (Error & e) {
croak(e.what());
}
SV * hashPath(char * algo, int base32, char * path)
PPCODE:
try {
Hash h = hashPath(parseHashType(algo), path).first;
string s = base32 ? printHash32(h) : printHash(h);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
}
SV * hashFile(char * algo, int base32, char * path)
PPCODE:
try {
Hash h = hashFile(parseHashType(algo), path);
string s = base32 ? printHash32(h) : printHash(h);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
}
SV * hashString(char * algo, int base32, char * s)
PPCODE:
try {
Hash h = hashString(parseHashType(algo), s);
string s = base32 ? printHash32(h) : printHash(h);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
}
SV * addToStore(char * srcPath, int recursive, char * algo)
PPCODE:
try {
doInit();
Path path = store->addToStore(srcPath, recursive, parseHashType(algo));
XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
}
SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
PPCODE:
try {
doInit();
HashType ht = parseHashType(algo);
Path path = makeFixedOutputPath(recursive, ht,
parseHash16or32(ht, hash), name);
XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
} catch (Error & e) {
croak(e.what());
}

View file

@ -3,7 +3,9 @@
use Fcntl ':flock'; use Fcntl ':flock';
use English '-no_match_vars'; use English '-no_match_vars';
use IO::Handle; use IO::Handle;
use Nix::Config;
use Nix::SSH qw/sshOpts openSSHConnection/; use Nix::SSH qw/sshOpts openSSHConnection/;
use Nix::CopyClosure;
no warnings('once'); no warnings('once');
@ -208,7 +210,7 @@ print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
my $maybeSign = ""; my $maybeSign = "";
$maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec"; $maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec";
# Register the derivation as a temporary GC root. Note that $PPID is # Register the derivation as a temporary GC root. Note that $PPID is
@ -224,8 +226,7 @@ sub removeRoots {
# Copy the derivation and its dependencies to the build machine. # Copy the derivation and its dependencies to the build machine.
system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0 Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne "");
or die "cannot copy inputs to $hostName: $?";
# Perform the build. # Perform the build.
@ -239,7 +240,7 @@ my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsD
# in which case every child receives SIGHUP; however, `-tt' doesn't # in which case every child receives SIGHUP; however, `-tt' doesn't
# work on some platforms when connection sharing is used.) # work on some platforms when connection sharing is used.)
pipe STDIN, DUMMY; # make sure we have a readable STDIN pipe STDIN, DUMMY; # make sure we have a readable STDIN
if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) { if (system("exec ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
# Note that if we get exit code 100 from `nix-store -r', it # Note that if we get exit code 100 from `nix-store -r', it
# denotes a permanent build failure (as opposed to an SSH problem # denotes a permanent build failure (as opposed to an SSH problem
# or a temporary Nix problem). We propagate this to the caller to # or a temporary Nix problem). We propagate this to the caller to
@ -259,7 +260,7 @@ foreach my $output (@outputs) {
my $maybeSignRemote = ""; my $maybeSignRemote = "";
$maybeSignRemote = "--sign" if $UID != 0; $maybeSignRemote = "--sign" if $UID != 0;
system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" . system("exec ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
"| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0 "| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0
or die "cannot copy $output from $hostName: $?"; or die "cannot copy $output from $hostName: $?";
} }

View file

@ -3,6 +3,7 @@
use strict; use strict;
use Nix::Config; use Nix::Config;
use Nix::Manifest; use Nix::Manifest;
use Nix::Store;
use POSIX qw(strftime); use POSIX qw(strftime);
use File::Temp qw(tempdir); use File::Temp qw(tempdir);
@ -19,14 +20,8 @@ my $fast = 1;
my $dbh = updateManifestDB(); my $dbh = updateManifestDB();
sub isValidPath { # $hashCache->{$algo}->{$path} yields the $algo-hash of $path.
my $p = shift; my $hashCache;
if ($fast) {
return -e $p;
} else {
return system("$Nix::Config::binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
}
}
sub parseHash { sub parseHash {
@ -101,15 +96,17 @@ sub computeSmallestDownload {
foreach my $patch (@{$patchList}) { foreach my $patch (@{$patchList}) {
if (isValidPath($patch->{basePath})) { if (isValidPath($patch->{basePath})) {
# !!! this should be cached
my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash}; my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
my $format = "--base32";
$format = "" if $baseHashAlgo eq "md5"; my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}};
my $hash = $fast && $baseHashAlgo eq "sha256" if (!defined $hash) {
? `$Nix::Config::binDir/nix-store -q --hash "$patch->{basePath}"` $hash = $fast && $baseHashAlgo eq "sha256"
: `$Nix::Config::binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`; ? queryPathHash($patch->{basePath})
chomp $hash; : hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath});
$hash =~ s/.*://; $hash =~ s/.*://;
$hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
}
next if $hash ne $baseHash; next if $hash ne $baseHash;
} }
push @queue, $patch->{basePath}; push @queue, $patch->{basePath};
@ -257,7 +254,7 @@ open LOGFILE, ">>$logFile" or die "cannot open log file $logFile";
my $date = strftime ("%F %H:%M:%S UTC", gmtime (time)); my $date = strftime ("%F %H:%M:%S UTC", gmtime (time));
print LOGFILE "$$ get $targetPath $date\n"; print LOGFILE "$$ get $targetPath $date\n";
print "\n*** Trying to download/patch `$targetPath'\n"; print STDERR "\n*** Trying to download/patch `$targetPath'\n";
# Compute the shortest path. # Compute the shortest path.
@ -281,7 +278,7 @@ sub downloadFile {
$ENV{"PRINT_PATH"} = 1; $ENV{"PRINT_PATH"} = 1;
$ENV{"QUIET"} = 1; $ENV{"QUIET"} = 1;
my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`; my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
die "download of `$url' failed" . ($! ? ": $!" : "") unless $? == 0; die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
chomp $path; chomp $path;
return $path; return $path;
} }
@ -293,17 +290,17 @@ while (scalar @path > 0) {
my $u = $edge->{start}; my $u = $edge->{start};
my $v = $edge->{end}; my $v = $edge->{end};
print "\n*** Step $curStep/$maxStep: "; print STDERR "\n*** Step $curStep/$maxStep: ";
if ($edge->{type} eq "present") { if ($edge->{type} eq "present") {
print "using already present path `$v'\n"; print STDERR "using already present path `$v'\n";
print LOGFILE "$$ present $v\n"; print LOGFILE "$$ present $v\n";
if ($curStep < $maxStep) { if ($curStep < $maxStep) {
# Since this is not the last step, the path will be used # Since this is not the last step, the path will be used
# as a base to one or more patches. So turn the base path # as a base to one or more patches. So turn the base path
# into a NAR archive, to which we can apply the patch. # into a NAR archive, to which we can apply the patch.
print " packing base path...\n"; print STDERR " packing base path...\n";
system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0 system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0
or die "cannot dump `$v'"; or die "cannot dump `$v'";
} }
@ -311,17 +308,17 @@ while (scalar @path > 0) {
elsif ($edge->{type} eq "patch") { elsif ($edge->{type} eq "patch") {
my $patch = $edge->{info}; my $patch = $edge->{info};
print "applying patch `$patch->{url}' to `$u' to create `$v'\n"; print STDERR "applying patch `$patch->{url}' to `$u' to create `$v'\n";
print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n"; print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n";
# Download the patch. # Download the patch.
print " downloading patch...\n"; print STDERR " downloading patch...\n";
my $patchPath = downloadFile "$patch->{url}"; my $patchPath = downloadFile "$patch->{url}";
# Apply the patch to the NAR archive produced in step 1 (for # Apply the patch to the NAR archive produced in step 1 (for
# the already present path) or a later step (for patch sequences). # the already present path) or a later step (for patch sequences).
print " applying patch...\n"; print STDERR " applying patch...\n";
system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0 system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0
or die "cannot apply patch `$patchPath' to $tmpNar"; or die "cannot apply patch `$patchPath' to $tmpNar";
@ -331,7 +328,7 @@ while (scalar @path > 0) {
} else { } else {
# This was the last patch. Unpack the final NAR archive # This was the last patch. Unpack the final NAR archive
# into the target path. # into the target path.
print " unpacking patched archive...\n"; print STDERR " unpacking patched archive...\n";
system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0 system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0
or die "cannot unpack $tmpNar2 into `$v'"; or die "cannot unpack $tmpNar2 into `$v'";
} }
@ -341,13 +338,13 @@ while (scalar @path > 0) {
elsif ($edge->{type} eq "narfile") { elsif ($edge->{type} eq "narfile") {
my $narFile = $edge->{info}; my $narFile = $edge->{info};
print "downloading `$narFile->{url}' into `$v'\n"; print STDERR "downloading `$narFile->{url}' into `$v'\n";
my $size = $narFile->{size} || -1; my $size = $narFile->{size} || -1;
print LOGFILE "$$ narfile $narFile->{url} $size $v\n"; print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
# Download the archive. # Download the archive.
print " downloading archive...\n"; print STDERR " downloading archive...\n";
my $narFilePath = downloadFile "$narFile->{url}"; my $narFilePath = downloadFile "$narFile->{url}";
if ($curStep < $maxStep) { if ($curStep < $maxStep) {
@ -356,7 +353,7 @@ while (scalar @path > 0) {
or die "cannot unpack `$narFilePath' into `$v'"; or die "cannot unpack `$narFilePath' into `$v'";
} else { } else {
# Unpack the archive into the target path. # Unpack the archive into the target path.
print " unpacking archive...\n"; print STDERR " unpacking archive...\n";
system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0 system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0
or die "cannot unpack `$narFilePath' into `$v'"; or die "cannot unpack `$narFilePath' into `$v'";
} }
@ -376,20 +373,15 @@ if (defined $finalNarHash) {
# The hash in the manifest can be either in base-16 or base-32. # The hash in the manifest can be either in base-16 or base-32.
# Handle both. # Handle both.
my $extraFlag = my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath);
($hashAlgo eq "sha256" && length($hash) != 64)
? "--base32" : "";
my $hash2 = `$Nix::Config::binDir/nix-hash --type $hashAlgo $extraFlag $targetPath` die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n"
or die "cannot compute hash of path `$targetPath'";
chomp $hash2;
die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2"
if $hash ne $hash2; if $hash ne $hash2;
} else { } else {
die "cannot check integrity of the downloaded path since its hash is not known"; die "cannot check integrity of the downloaded path since its hash is not known\n";
} }
print STDERR "\n";
print LOGFILE "$$ success\n"; print LOGFILE "$$ success\n";
close LOGFILE; close LOGFILE;

View file

@ -3,11 +3,12 @@
use Nix::SSH; use Nix::SSH;
use Nix::Config; use Nix::Config;
use Nix::Store; use Nix::Store;
use Nix::CopyClosure;
if (scalar @ARGV < 1) { if (scalar @ARGV < 1) {
print STDERR <<EOF print STDERR <<EOF
Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] PATHS... Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] [--bzip2] [--xz] PATHS...
EOF EOF
; ;
exit 1; exit 1;
@ -39,8 +40,16 @@ while (@ARGV) {
$sign = 1; $sign = 1;
} }
elsif ($arg eq "--gzip") { elsif ($arg eq "--gzip") {
$compressor = "| gzip"; $compressor = "gzip";
$decompressor = "gunzip |"; $decompressor = "gunzip";
}
elsif ($arg eq "--bzip2") {
$compressor = "bzip2";
$decompressor = "bunzip2";
}
elsif ($arg eq "--xz") {
$compressor = "xz";
$decompressor = "xz -d";
} }
elsif ($arg eq "--from") { elsif ($arg eq "--from") {
$toMode = 0; $toMode = 0;
@ -67,30 +76,7 @@ openSSHConnection $sshHost or die "$0: unable to start SSH\n";
if ($toMode) { # Copy TO the remote machine. if ($toMode) { # Copy TO the remote machine.
Nix::CopyClosure::copyTo($sshHost, [ @sshOpts ], [ @storePaths ], $compressor, $decompressor, $includeOutputs, $dryRun, $sign);
# Get the closure of this path.
my @allStorePaths = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs, map { followLinksToStorePath $_ } @storePaths)));
# Ask the remote host which paths are invalid.
open(READ, "set -f; ssh $sshHost @sshOpts nix-store --check-validity --print-invalid @allStorePaths|");
my @missing = ();
while (<READ>) {
chomp;
push @missing, $_;
}
close READ or die;
# Export the store paths and import them on the remote machine.
if (scalar @missing > 0) {
print STDERR "copying these missing paths:\n";
print STDERR " $_\n" foreach @missing;
unless ($dryRun) {
my $extraOpts = $sign ? "--sign" : "";
system("set -f; nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0
or die "copying store paths to remote machine `$sshHost' failed: $?";
}
}
} }
else { # Copy FROM the remote machine. else { # Copy FROM the remote machine.
@ -110,10 +96,12 @@ else { # Copy FROM the remote machine.
close READ or die "nix-store on remote machine `$sshHost' failed: $?"; close READ or die "nix-store on remote machine `$sshHost' failed: $?";
# Export the store paths on the remote machine and import them on locally. # Export the store paths on the remote machine and import them locally.
if (scalar @missing > 0) { if (scalar @missing > 0) {
print STDERR "copying these missing paths:\n"; print STDERR "copying ", scalar @missing, " missing paths from $sshHost...\n";
print STDERR " $_\n" foreach @missing; #print STDERR " $_\n" foreach @missing;
$compressor = "| $compressor" if $compressor ne "";
$decompressor = "$decompressor |" if $decompressor ne "";
unless ($dryRun) { unless ($dryRun) {
my $extraOpts = $sign ? "--sign" : ""; my $extraOpts = $sign ? "--sign" : "";
system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $Nix::Config::binDir/nix-store --import") == 0 system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $Nix::Config::binDir/nix-store --import") == 0

View file

@ -1,165 +1,128 @@
#! @shell@ -e #! @perl@ -w @perlFlags@
url=$1 use strict;
expHash=$2 use File::Basename;
use File::Temp qw(tempdir);
use File::stat;
use Nix::Store;
use Nix::Config;
binDir=@bindir@ my $url = shift;
if [ -n "$NIX_BIN_DIR" ]; then binDir="$NIX_BIN_DIR"; fi my $expHash = shift;
my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256";
my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'};
# needed to make it work on NixOS if (!defined $url || $url eq "") {
export PATH=$PATH:@coreutils@ print STDERR <<EOF
Usage: nix-prefetch-url URL [EXPECTED-HASH]
EOF
;
exit 1;
}
hashType=$NIX_HASH_ALGO sub writeFile {
if test -z "$hashType"; then my ($fn, $s) = @_;
hashType=sha256 open TMP, ">$fn" or die;
fi print TMP "$s" or die;
close TMP or die;
}
hashFormat= sub readFile {
if test "$hashType" != "md5"; then local $/ = undef;
hashFormat=--base32 my ($fn) = @_;
fi open TMP, "<$fn" or die;
my $s = <TMP>;
close TMP or die;
return $s;
}
if test -z "$url"; then my $tmpDir = tempdir("nix-prefetch-url.XXXXXX", CLEANUP => 1, TMPDIR => 1)
echo "syntax: nix-prefetch-url URL [EXPECTED-HASH]" >&2 or die "cannot create a temporary directory";
exit 1
fi # Hack to support the mirror:// scheme from Nixpkgs.
if ($url =~ /^mirror:\/\//) {
system("$Nix::Config::binDir/nix-build '<nixpkgs>' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0
or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n";
my @expanded = split ' ', readFile("$tmpDir/urls");
die "$0: cannot resolve $url" unless scalar @expanded > 0;
print STDERR "$url expands to $expanded[0]\n";
$url = $expanded[0];
}
# Handle escaped characters in the URI. `+', `=' and `?' are the only # Handle escaped characters in the URI. `+', `=' and `?' are the only
# characters that are valid in Nix store path names but have a special # characters that are valid in Nix store path names but have a special
# meaning in URIs. # meaning in URIs.
name=$(basename "$url" | @sed@ -e 's/%2b/+/g' -e 's/%3d/=/g' -e 's/%3f/\?/g') my $name = basename $url;
if test -z "$name"; then echo "invalid url"; exit 1; fi die "cannot figure out file name for $url\n" if $name eq "";
$name =~ s/%2b/+/g;
$name =~ s/%3d/=/g;
$name =~ s/%3f/?/g;
my $finalPath;
my $hash;
# If the hash was given, a file with that hash may already be in the # If the hash was given, a file with that hash may already be in the
# store. # store.
if test -n "$expHash"; then if (defined $expHash) {
finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$expHash" "$name") $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name);
if ! $bindir/nix-store --check-validity "$finalPath" 2> /dev/null; then if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; }
finalPath=
fi
hash=$expHash
fi
mkTempDir() {
if test -n "$tmpPath"; then return; fi
local i=0
while true; do
if test -z "$TMPDIR"; then TMPDIR=/tmp; fi
tmpPath=$TMPDIR/nix-prefetch-url-$$-$i
if mkdir "$tmpPath"; then break; fi
# !!! to bad we can't check for ENOENT in mkdir, so this check
# is slightly racy (it bombs out if somebody just removed
# $tmpPath...).
if ! test -e "$tmpPath"; then exit 1; fi
i=$((i + 1))
done
trap removeTempDir EXIT SIGINT SIGQUIT
} }
removeTempDir() {
if test -n "$tmpPath"; then
rm -rf "$tmpPath" || true
fi
}
doDownload() {
@curl@ $cacheFlags --fail --location --max-redirs 20 --disable-epsv \
--cookie-jar $tmpPath/cookies "$url" -o $tmpFile
}
# Hack to support the mirror:// scheme from Nixpkgs.
if test "${url:0:9}" = "mirror://"; then
if test -z "$NIXPKGS_ALL"; then
echo "Resolving mirror:// URLs requires Nixpkgs. Please point \$NIXPKGS_ALL at a Nixpkgs tree." >&2
exit 1
fi
mkTempDir
nix-build "$NIXPKGS_ALL" -A resolveMirrorURLs --argstr url "$url" -o $tmpPath/urls > /dev/null
expanded=($(cat $tmpPath/urls))
if test "${#expanded[*]}" = 0; then
echo "$0: cannot resolve $url." >&2
exit 1
fi
echo "$url expands to ${expanded[*]} (using ${expanded[0]})" >&2
url="${expanded[0]}"
fi
# If we don't know the hash or a file with that hash doesn't exist, # If we don't know the hash or a file with that hash doesn't exist,
# download the file and add it to the store. # download the file and add it to the store.
if test -z "$finalPath"; then if (!defined $finalPath) {
mkTempDir my $tmpFile = "$tmpDir/$name";
tmpFile=$tmpPath/$name
# Optionally do timestamp-based caching of the download. # Optionally do timestamp-based caching of the download.
# Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is # Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is
# the hash and the timestamp of the file at $url. The caching of # the hash and the timestamp of the file at $url. The caching of
# the file *contents* is done in Nix store, where it can be # the file *contents* is done in Nix store, where it can be
# garbage-collected independently. # garbage-collected independently.
if test -n "$NIX_DOWNLOAD_CACHE"; then my ($cachedTimestampFN, $cachedHashFN, @cacheFlags);
echo -n "$url" > $tmpPath/url if (defined $cacheDir) {
urlHash=$($binDir/nix-hash --type sha256 --base32 --flat $tmpPath/url) my $urlHash = hashString("sha256", 1, $url);
echo "$url" > "$NIX_DOWNLOAD_CACHE/$urlHash.url" writeFile "$cacheDir/$urlHash.url", $url;
cachedHashFN="$NIX_DOWNLOAD_CACHE/$urlHash.$hashType" $cachedHashFN = "$cacheDir/$urlHash.$hashType";
cachedTimestampFN="$NIX_DOWNLOAD_CACHE/$urlHash.stamp" $cachedTimestampFN = "$cacheDir/$urlHash.stamp";
cacheFlags="--remote-time" @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN;
if test -e "$cachedTimestampFN" -a -e "$cachedHashFN"; then }
# Only download the file if it is newer than the cached version.
cacheFlags="$cacheFlags --time-cond $cachedTimestampFN"
fi
fi
# Perform the download. # Perform the download.
doDownload my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || "")));
(system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of $url failed\n";
if test -n "$NIX_DOWNLOAD_CACHE" -a ! -e $tmpFile; then if (defined $cacheDir && ! -e $tmpFile) {
# Curl didn't create $tmpFile, so apparently there's no newer # Curl didn't create $tmpFile, so apparently there's no newer
# file on the server. # file on the server.
hash=$(cat $cachedHashFN) $hash = readFile $cachedHashFN or die;
finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$hash" "$name") $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name);
if ! $binDir/nix-store --check-validity "$finalPath" 2> /dev/null; then unless (isValidPath $finalPath) {
echo "cached contents of \`$url' disappeared, redownloading..." >&2 print STDERR "cached contents of $url disappeared, redownloading...\n";
finalPath= $finalPath = undef;
cacheFlags="--remote-time" (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of $url failed\n";
doDownload }
fi }
fi
if test -z "$finalPath"; then if (!defined $finalPath) {
# Compute the hash. # Compute the hash.
hash=$($binDir/nix-hash --type "$hashType" $hashFormat --flat $tmpFile) $hash = hashFile($hashType, $hashType ne "md5", $tmpFile);
if ! test -n "$QUIET"; then echo "hash is $hash" >&2; fi
if test -n "$NIX_DOWNLOAD_CACHE"; then if (defined $cacheDir) {
echo $hash > $cachedHashFN writeFile $cachedHashFN, $hash;
touch -r $tmpFile $cachedTimestampFN my $st = stat($tmpFile) or die;
fi open STAMP, ">$cachedTimestampFN" or die; close STAMP;
utime($st->atime, $st->mtime, $cachedTimestampFN) or die;
}
# Add the downloaded file to the Nix store. # Add the downloaded file to the Nix store.
finalPath=$($binDir/nix-store --add-fixed "$hashType" $tmpFile) $finalPath = addToStore($tmpFile, 0, $hashType);
}
if test -n "$expHash" -a "$expHash" != "$hash"; then die "$0: hash mismatch for $url\n" if defined $expHash && $expHash ne $hash;
echo "hash mismatch for URL \`$url'" >&2 }
exit 1
fi
fi print STDERR "path is $finalPath\n" unless $ENV{'QUIET'};
fi print "$hash\n";
print "$finalPath\n" if $ENV{'PRINT_PATH'};
if ! test -n "$QUIET"; then echo "path is $finalPath" >&2; fi
echo $hash
if test -n "$PRINT_PATH"; then
echo $finalPath
fi

View file

@ -33,10 +33,6 @@ if (! -l $manifestDirLink) {
# Process the URLs specified on the command line. # Process the URLs specified on the command line.
my %narFiles;
my %patches;
my $skipWrongStore = 0;
sub downloadFile { sub downloadFile {
my $url = shift; my $url = shift;
@ -59,16 +55,7 @@ sub processURL {
# First see if a bzipped manifest is available. # First see if a bzipped manifest is available.
if (system("$Nix::Config::curl --fail --silent --head '$url'.bz2 > /dev/null") == 0) { if (system("$Nix::Config::curl --fail --silent --head '$url'.bz2 > /dev/null") == 0) {
print "fetching list of Nix archives at `$url.bz2'...\n"; print "fetching list of Nix archives at `$url.bz2'...\n";
my $bzipped = downloadFile "$url.bz2"; $manifest = downloadFile "$url.bz2";
$manifest = "$tmpDir/MANIFEST";
system("$Nix::Config::bzip2 -d < $bzipped > $manifest") == 0
or die "cannot decompress manifest";
$manifest = (`$Nix::Config::binDir/nix-store --add $manifest`
or die "cannot copy $manifest to the store");
chomp $manifest;
} }
# Otherwise, just get the uncompressed manifest. # Otherwise, just get the uncompressed manifest.
@ -77,20 +64,6 @@ sub processURL {
$manifest = downloadFile $url; $manifest = downloadFile $url;
} }
my $version = readManifest($manifest, \%narFiles, \%patches);
die "`$url' is not a manifest or it is too old (i.e., for Nix <= 0.7)\n" if $version < 3;
die "manifest `$url' is too new\n" if $version >= 5;
if ($skipWrongStore) {
foreach my $path (keys %narFiles) {
if (substr($path, 0, length($storeDir) + 1) ne "$storeDir/") {
print STDERR "warning: manifest `$url' assumes a Nix store at a different location than $storeDir, skipping...\n";
exit 0;
}
}
}
my $baseName = "unnamed"; my $baseName = "unnamed";
if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component
$baseName = $1; $baseName = $1;
@ -129,12 +102,12 @@ sub processURL {
while (@ARGV) { while (@ARGV) {
my $url = shift @ARGV; my $url = shift @ARGV;
if ($url eq "--skip-wrong-store") { if ($url eq "--skip-wrong-store") {
$skipWrongStore = 1; # No-op, no longer supported.
} else { } else {
processURL $url; processURL $url;
} }
} }
my $size = scalar (keys %narFiles); # Update the cache.
print "$size store paths in manifest\n"; updateManifestDB();

View file

@ -198,8 +198,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
# In some exceptional cases (such as VM tests that use the Nix # In some exceptional cases (such as VM tests that use the Nix
# store of the host), the database doesn't contain the hash. So # store of the host), the database doesn't contain the hash. So
# compute it. # compute it.
if ($narHash eq "sha256:0000000000000000000000000000000000000000000000000000") { if ($narHash =~ /^sha256:0*$/) {
$narHash = `$binDir/nix-hash --type sha256 '$storePath'`; $narHash = `$binDir/nix-hash --type sha256 --base32 '$storePath'`;
die "cannot hash `$storePath'" if $? != 0; die "cannot hash `$storePath'" if $? != 0;
chomp $narHash; chomp $narHash;
$narHash = "sha256:$narHash"; $narHash = "sha256:$narHash";

View file

@ -44,4 +44,15 @@ bool parseSearchPathArg(const string & arg, Strings::iterator & i,
} }
Path lookupFileArg(EvalState & state, string s)
{
if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
Path p = state.findFile(s.substr(1, s.size() - 2));
if (p == "") throw Error(format("file `%1%' was not found in the Nix search path (add it using $NIX_PATH or -I)") % p);
return p;
} else
return absPath(s);
}
} }

View file

@ -14,6 +14,8 @@ bool parseOptionArg(const string & arg, Strings::iterator & i,
bool parseSearchPathArg(const string & arg, Strings::iterator & i, bool parseSearchPathArg(const string & arg, Strings::iterator & i,
const Strings::iterator & argsEnd, EvalState & state); const Strings::iterator & argsEnd, EvalState & state);
Path lookupFileArg(EvalState & state, string s);
} }

View file

@ -365,7 +365,6 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
if (isDerivation(*j)) if (isDerivation(*j))
drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j); drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j);
} }
explicitlyPassed = true; explicitlyPassed = true;
} else if (path.at(0) == '!') { } else if (path.at(0) == '!') {
size_t index; size_t index;
@ -416,17 +415,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
HashType ht = parseHashType(outputHashAlgo); HashType ht = parseHashType(outputHashAlgo);
if (ht == htUnknown) if (ht == htUnknown)
throw EvalError(format("unknown hash algorithm `%1%'") % outputHashAlgo); throw EvalError(format("unknown hash algorithm `%1%'") % outputHashAlgo);
Hash h(ht); Hash h = parseHash16or32(ht, outputHash);
if (outputHash.size() == h.hashSize * 2)
/* hexadecimal representation */
h = parseHash(ht, outputHash);
else if (outputHash.size() == hashLength32(h))
/* base-32 representation */
h = parseHash32(ht, outputHash);
else
throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
% outputHash % outputHashAlgo);
string s = outputHash;
outputHash = printHash(h); outputHash = printHash(h);
if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo; if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;

View file

@ -7,13 +7,6 @@ libmain_la_LIBADD = ../libstore/libstore.la @BDW_GC_LIBS@
pkginclude_HEADERS = shared.hh pkginclude_HEADERS = shared.hh
AM_CXXFLAGS = \ AM_CXXFLAGS = \
-DNIX_STORE_DIR=\"$(storedir)\" \
-DNIX_DATA_DIR=\"$(datadir)\" \
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
-DNIX_BIN_DIR=\"$(bindir)\" \
-DNIX_VERSION=\"$(VERSION)\" \ -DNIX_VERSION=\"$(VERSION)\" \
-I$(srcdir)/.. -I$(srcdir)/../libutil \ -I$(srcdir)/.. -I$(srcdir)/../libutil \
-I$(srcdir)/../libstore -I$(srcdir)/../libstore

View file

@ -65,7 +65,7 @@ void printMissing(StoreAPI & store, const PathSet & paths)
} }
if (!willSubstitute.empty()) { if (!willSubstitute.empty()) {
printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):") printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
% (downloadSize / (1024.0 * 1024.0)) % (downloadSize / (1024.0 * 1024.0))
% (narSize / (1024.0 * 1024.0))); % (narSize / (1024.0 * 1024.0)));
foreach (PathSet::iterator, i, willSubstitute) foreach (PathSet::iterator, i, willSubstitute)
@ -90,23 +90,6 @@ static void setLogType(string lt)
} }
static void closeStore()
{
try {
throw;
} catch (std::exception & e) {
printMsg(lvlError,
format("FATAL: unexpected exception (closing store and aborting): %1%") % e.what());
}
try {
store.reset((StoreAPI *) 0);
} catch (...) {
ignoreException();
}
abort();
}
RemoveTempRoots::~RemoveTempRoots() RemoveTempRoots::~RemoveTempRoots()
{ {
removeTempRoots(); removeTempRoots();
@ -120,29 +103,7 @@ static bool showTrace = false;
processor. */ processor. */
static void initAndRun(int argc, char * * argv) static void initAndRun(int argc, char * * argv)
{ {
/* Setup Nix paths. */ setDefaultsFromEnvironment();
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
string subs = getEnv("NIX_SUBSTITUTERS", "default");
if (subs == "default") {
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
} else
substituters = tokenizeString(subs, ":");
/* Get some settings from the configuration file. */
thisSystem = querySetting("system", SYSTEM);
maxBuildJobs = queryIntSetting("build-max-jobs", 1);
buildCores = queryIntSetting("build-cores", 1);
maxSilentTime = queryIntSetting("build-max-silent-time", 0);
buildTimeout = queryIntSetting("build-timeout", 0);
/* Catch SIGINT. */ /* Catch SIGINT. */
struct sigaction act; struct sigaction act;
@ -260,12 +221,6 @@ static void initAndRun(int argc, char * * argv)
exit. */ exit. */
RemoveTempRoots removeTempRoots __attribute__((unused)); RemoveTempRoots removeTempRoots __attribute__((unused));
/* Make sure that the database gets closed properly, even if
terminate() is called (which happens sometimes due to bugs in
destructor/exceptions interaction, but that needn't preclude a
clean shutdown of the database). */
std::set_terminate(closeStore);
run(remaining); run(remaining);
/* Close the Nix database. */ /* Close the Nix database. */

View file

@ -15,7 +15,16 @@ libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_
EXTRA_DIST = schema.sql EXTRA_DIST = schema.sql
AM_CXXFLAGS = -Wall \ AM_CXXFLAGS = -Wall \
${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil \
-DNIX_STORE_DIR=\"$(storedir)\" \
-DNIX_DATA_DIR=\"$(datadir)\" \
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
-DNIX_BIN_DIR=\"$(bindir)\" \
-I$(srcdir)/.. -I$(srcdir)/../libutil \
-I$(srcdir)/../libstore
local-store.lo: schema.sql.hh local-store.lo: schema.sql.hh

View file

@ -1650,6 +1650,9 @@ void DerivationGoal::startBuilder()
(format("nixbld:!:%1%:\n") (format("nixbld:!:%1%:\n")
% (buildUser.enabled() ? buildUser.getGID() : getgid())).str()); % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
/* Create /etc/hosts with localhost entry. */
writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
/* Bind-mount a user-configurable set of directories from the /* Bind-mount a user-configurable set of directories from the
host file system. The `/dev/pts' directory must be mounted host file system. The `/dev/pts' directory must be mounted
separately so that newly-created pseudo-terminals show separately so that newly-created pseudo-terminals show
@ -2199,9 +2202,7 @@ void SubstitutionGoal::tryNext()
if (subs.size() == 0) { if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal /* None left. Terminate this goal and let someone else deal
with it. */ with it. */
printMsg(lvlError, debug(format("path `%1%' is required, but there is no substituter that can build it") % storePath);
format("path `%1%' is required, but there is no substituter that can build it")
% storePath);
amDone(ecFailed); amDone(ecFailed);
return; return;
} }
@ -2232,8 +2233,7 @@ void SubstitutionGoal::referencesValid()
trace("all references realised"); trace("all references realised");
if (nrFailed > 0) { if (nrFailed > 0) {
printMsg(lvlError, debug(format("some references of path `%1%' could not be realised") % storePath);
format("some references of path `%1%' could not be realised") % storePath);
amDone(ecFailed); amDone(ecFailed);
return; return;
} }
@ -2286,9 +2286,7 @@ void SubstitutionGoal::tryToRun()
return; return;
} }
printMsg(lvlInfo, printMsg(lvlInfo, format("fetching path `%1%'...") % storePath);
format("substituting path `%1%' using substituter `%2%'")
% storePath % sub);
logPipe.create(); logPipe.create();
@ -2364,19 +2362,15 @@ void SubstitutionGoal::finished()
try { try {
if (!statusOk(status)) if (!statusOk(status))
throw SubstError(format("builder for `%1%' %2%") throw SubstError(format("fetching path `%1%' %2%")
% storePath % statusToString(status)); % storePath % statusToString(status));
if (!pathExists(storePath)) if (!pathExists(storePath))
throw SubstError( throw SubstError(format("substitute did not produce path `%1%'") % storePath);
format("substitute did not produce path `%1%'")
% storePath);
} catch (SubstError & e) { } catch (SubstError & e) {
printMsg(lvlInfo, printMsg(lvlInfo, e.msg());
format("substitution of path `%1%' using substituter `%2%' failed: %3%")
% storePath % sub % e.msg());
if (printBuildTrace) { if (printBuildTrace) {
printMsg(lvlError, format("@ substituter-failed %1% %2% %3%") printMsg(lvlError, format("@ substituter-failed %1% %2% %3%")

View file

@ -1,3 +1,5 @@
#include "config.h"
#include "globals.hh" #include "globals.hh"
#include "util.hh" #include "util.hh"
@ -139,4 +141,32 @@ void reloadSettings()
} }
void setDefaultsFromEnvironment()
{
/* Setup Nix paths. */
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
string subs = getEnv("NIX_SUBSTITUTERS", "default");
if (subs == "default") {
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
} else
substituters = tokenizeString(subs, ":");
/* Get some settings from the configuration file. */
thisSystem = querySetting("system", SYSTEM);
maxBuildJobs = queryIntSetting("build-max-jobs", 1);
buildCores = queryIntSetting("build-cores", 1);
maxSilentTime = queryIntSetting("build-max-silent-time", 0);
buildTimeout = queryIntSetting("build-timeout", 0);
}
} }

View file

@ -114,6 +114,8 @@ void overrideSetting(const string & name, const Strings & value);
void reloadSettings(); void reloadSettings();
void setDefaultsFromEnvironment();
} }

View file

@ -327,10 +327,9 @@ void LocalStore::openDB(bool create)
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK) if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "setting synchronous mode"); throwSQLiteError(db, "setting synchronous mode");
/* Set the SQLite journal mode. WAL mode is fastest, but doesn't /* Set the SQLite journal mode. WAL mode is fastest, so it's the
seem entirely stable at the moment (Oct. 2010). Thus, use default. */
truncate mode by default. */ string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate";
string mode = queryBoolSetting("use-sqlite-wal", false) ? "wal" : "truncate";
string prevMode; string prevMode;
{ {
SQLiteStmt stmt; SQLiteStmt stmt;
@ -367,7 +366,7 @@ void LocalStore::openDB(bool create)
stmtRegisterValidPath.create(db, stmtRegisterValidPath.create(db,
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);"); "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
stmtUpdatePathInfo.create(db, stmtUpdatePathInfo.create(db,
"update ValidPaths set narSize = ? where path = ?;"); "update ValidPaths set narSize = ?, hash = ? where path = ?;");
stmtAddReference.create(db, stmtAddReference.create(db,
"insert or replace into Refs (referrer, reference) values (?, ?);"); "insert or replace into Refs (referrer, reference) values (?, ?);");
stmtQueryPathInfo.create(db, stmtQueryPathInfo.create(db,
@ -684,7 +683,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path)
} }
/* Update path info in the database. Currently only updated the /* Update path info in the database. Currently only updates the
narSize field. */ narSize field. */
void LocalStore::updatePathInfo(const ValidPathInfo & info) void LocalStore::updatePathInfo(const ValidPathInfo & info)
{ {
@ -693,6 +692,7 @@ void LocalStore::updatePathInfo(const ValidPathInfo & info)
stmtUpdatePathInfo.bind64(info.narSize); stmtUpdatePathInfo.bind64(info.narSize);
else else
stmtUpdatePathInfo.bind(); // null stmtUpdatePathInfo.bind(); // null
stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
stmtUpdatePathInfo.bind(info.path); stmtUpdatePathInfo.bind(info.path);
if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE) if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path); throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path);
@ -1125,16 +1125,14 @@ struct HashAndWriteSink : Sink
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256) HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
{ {
} }
virtual void operator () virtual void operator () (const unsigned char * data, size_t len)
(const unsigned char * data, unsigned int len)
{ {
writeSink(data, len); writeSink(data, len);
hashSink(data, len); hashSink(data, len);
} }
Hash currentHash() Hash currentHash()
{ {
HashSink hashSinkClone(hashSink); return hashSink.currentHash().first;
return hashSinkClone.finish().first;
} }
}; };
@ -1180,7 +1178,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
PathSet references; PathSet references;
queryReferences(path, references); queryReferences(path, references);
writeStringSet(references, hashAndWriteSink); writeStrings(references, hashAndWriteSink);
Path deriver = queryDeriver(path); Path deriver = queryDeriver(path);
writeString(deriver, hashAndWriteSink); writeString(deriver, hashAndWriteSink);
@ -1223,11 +1221,11 @@ struct HashAndReadSource : Source
{ {
hashing = true; hashing = true;
} }
virtual void operator () size_t read(unsigned char * data, size_t len)
(unsigned char * data, unsigned int len)
{ {
readSource(data, len); size_t n = readSource.read(data, len);
if (hashing) hashSink(data, len); if (hashing) hashSink(data, n);
return n;
} }
}; };
@ -1267,7 +1265,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
Path dstPath = readStorePath(hashAndReadSource); Path dstPath = readStorePath(hashAndReadSource);
PathSet references = readStorePaths(hashAndReadSource); PathSet references = readStorePaths<PathSet>(hashAndReadSource);
Path deriver = readString(hashAndReadSource); Path deriver = readString(hashAndReadSource);
if (deriver != "") assertStorePath(deriver); if (deriver != "") assertStorePath(deriver);
@ -1278,7 +1276,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
bool haveSignature = readInt(hashAndReadSource) == 1; bool haveSignature = readInt(hashAndReadSource) == 1;
if (requireSignature && !haveSignature) if (requireSignature && !haveSignature)
throw Error("imported archive lacks a signature"); throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
if (haveSignature) { if (haveSignature) {
string signature = readString(hashAndReadSource); string signature = readString(hashAndReadSource);
@ -1354,6 +1352,19 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
} }
Paths LocalStore::importPaths(bool requireSignature, Source & source)
{
Paths res;
while (true) {
unsigned long long n = readLongLong(source);
if (n == 0) break;
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
res.push_back(importPath(requireSignature, source));
}
return res;
}
void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed, void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed,
unsigned long long & blocksFreed) unsigned long long & blocksFreed)
{ {
@ -1369,7 +1380,7 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr
PathSet referrers; queryReferrers(path, referrers); PathSet referrers; queryReferrers(path, referrers);
referrers.erase(path); /* ignore self-references */ referrers.erase(path); /* ignore self-references */
if (!referrers.empty()) if (!referrers.empty())
throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'") throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%")
% path % showPaths(referrers)); % path % showPaths(referrers));
invalidatePath(path); invalidatePath(path);
} }
@ -1409,6 +1420,8 @@ void LocalStore::verifyStore(bool checkContents)
if (checkContents) { if (checkContents) {
printMsg(lvlInfo, "checking hashes..."); printMsg(lvlInfo, "checking hashes...");
Hash nullHash(htSHA256);
foreach (PathSet::iterator, i, validPaths) { foreach (PathSet::iterator, i, validPaths) {
try { try {
ValidPathInfo info = queryPathInfo(*i); ValidPathInfo info = queryPathInfo(*i);
@ -1417,17 +1430,30 @@ void LocalStore::verifyStore(bool checkContents)
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i); printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
HashResult current = hashPath(info.hash.type, *i); HashResult current = hashPath(info.hash.type, *i);
if (current.first != info.hash) { if (info.hash != nullHash && info.hash != current.first) {
printMsg(lvlError, format("path `%1%' was modified! " printMsg(lvlError, format("path `%1%' was modified! "
"expected hash `%2%', got `%3%'") "expected hash `%2%', got `%3%'")
% *i % printHash(info.hash) % printHash(current.first)); % *i % printHash(info.hash) % printHash(current.first));
} else { } else {
bool update = false;
/* Fill in missing hashes. */
if (info.hash == nullHash) {
printMsg(lvlError, format("fixing missing hash on `%1%'") % *i);
info.hash = current.first;
update = true;
}
/* Fill in missing narSize fields (from old stores). */ /* Fill in missing narSize fields (from old stores). */
if (info.narSize == 0) { if (info.narSize == 0) {
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second); printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
info.narSize = current.second; info.narSize = current.second;
updatePathInfo(info); update = true;
} }
if (update) updatePathInfo(info);
} }
} catch (Error & e) { } catch (Error & e) {

View file

@ -148,7 +148,7 @@ public:
void exportPath(const Path & path, bool sign, void exportPath(const Path & path, bool sign,
Sink & sink); Sink & sink);
Path importPath(bool requireSignature, Source & source); Paths importPaths(bool requireSignature, Source & source);
void buildDerivations(const PathSet & drvPaths); void buildDerivations(const PathSet & drvPaths);
@ -261,6 +261,8 @@ private:
Path createTempDirInStore(); Path createTempDirInStore();
Path importPath(bool requireSignature, Source & source);
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv); void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
}; };

View file

@ -57,11 +57,11 @@ struct RefScanSink : Sink
RefScanSink() : hashSink(htSHA256) { } RefScanSink() : hashSink(htSHA256) { }
void operator () (const unsigned char * data, unsigned int len); void operator () (const unsigned char * data, size_t len);
}; };
void RefScanSink::operator () (const unsigned char * data, unsigned int len) void RefScanSink::operator () (const unsigned char * data, size_t len)
{ {
hashSink(data, len); hashSink(data, len);

View file

@ -27,13 +27,15 @@ Path readStorePath(Source & from)
} }
PathSet readStorePaths(Source & from) template<class T> T readStorePaths(Source & from)
{ {
PathSet paths = readStringSet(from); T paths = readStrings<T>(from);
foreach (PathSet::iterator, i, paths) assertStorePath(*i); foreach (typename T::iterator, i, paths) assertStorePath(*i);
return paths; return paths;
} }
template PathSet readStorePaths(Source & from);
RemoteStore::RemoteStore() RemoteStore::RemoteStore()
{ {
@ -65,6 +67,7 @@ void RemoteStore::openConnection()
/* Send the magic greeting, check for the reply. */ /* Send the magic greeting, check for the reply. */
try { try {
writeInt(WORKER_MAGIC_1, to); writeInt(WORKER_MAGIC_1, to);
to.flush();
unsigned int magic = readInt(from); unsigned int magic = readInt(from);
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
@ -166,6 +169,7 @@ void RemoteStore::connectToDaemon()
RemoteStore::~RemoteStore() RemoteStore::~RemoteStore()
{ {
try { try {
to.flush();
fdSocket.close(); fdSocket.close();
if (child != -1) if (child != -1)
child.wait(true); child.wait(true);
@ -213,7 +217,7 @@ PathSet RemoteStore::queryValidPaths()
openConnection(); openConnection();
writeInt(wopQueryValidPaths, to); writeInt(wopQueryValidPaths, to);
processStderr(); processStderr();
return readStorePaths(from); return readStorePaths<PathSet>(from);
} }
@ -240,7 +244,7 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path,
if (reply == 0) return false; if (reply == 0) return false;
info.deriver = readString(from); info.deriver = readString(from);
if (info.deriver != "") assertStorePath(info.deriver); if (info.deriver != "") assertStorePath(info.deriver);
info.references = readStorePaths(from); info.references = readStorePaths<PathSet>(from);
info.downloadSize = readLongLong(from); info.downloadSize = readLongLong(from);
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
return true; return true;
@ -258,7 +262,7 @@ ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
info.deriver = readString(from); info.deriver = readString(from);
if (info.deriver != "") assertStorePath(info.deriver); if (info.deriver != "") assertStorePath(info.deriver);
info.hash = parseHash(htSHA256, readString(from)); info.hash = parseHash(htSHA256, readString(from));
info.references = readStorePaths(from); info.references = readStorePaths<PathSet>(from);
info.registrationTime = readInt(from); info.registrationTime = readInt(from);
info.narSize = readLongLong(from); info.narSize = readLongLong(from);
return info; return info;
@ -283,7 +287,7 @@ void RemoteStore::queryReferences(const Path & path,
writeInt(wopQueryReferences, to); writeInt(wopQueryReferences, to);
writeString(path, to); writeString(path, to);
processStderr(); processStderr();
PathSet references2 = readStorePaths(from); PathSet references2 = readStorePaths<PathSet>(from);
references.insert(references2.begin(), references2.end()); references.insert(references2.begin(), references2.end());
} }
@ -295,7 +299,7 @@ void RemoteStore::queryReferrers(const Path & path,
writeInt(wopQueryReferrers, to); writeInt(wopQueryReferrers, to);
writeString(path, to); writeString(path, to);
processStderr(); processStderr();
PathSet referrers2 = readStorePaths(from); PathSet referrers2 = readStorePaths<PathSet>(from);
referrers.insert(referrers2.begin(), referrers2.end()); referrers.insert(referrers2.begin(), referrers2.end());
} }
@ -318,7 +322,7 @@ PathSet RemoteStore::queryDerivationOutputs(const Path & path)
writeInt(wopQueryDerivationOutputs, to); writeInt(wopQueryDerivationOutputs, to);
writeString(path, to); writeString(path, to);
processStderr(); processStderr();
return readStorePaths(from); return readStorePaths<PathSet>(from);
} }
@ -358,7 +362,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
writeInt(wopAddTextToStore, to); writeInt(wopAddTextToStore, to);
writeString(name, to); writeString(name, to);
writeString(s, to); writeString(s, to);
writeStringSet(references, to); writeStrings(references, to);
processStderr(); processStderr();
return readStorePath(from); return readStorePath(from);
@ -377,14 +381,14 @@ void RemoteStore::exportPath(const Path & path, bool sign,
} }
Path RemoteStore::importPath(bool requireSignature, Source & source) Paths RemoteStore::importPaths(bool requireSignature, Source & source)
{ {
openConnection(); openConnection();
writeInt(wopImportPath, to); writeInt(wopImportPaths, to);
/* We ignore requireSignature, since the worker forces it to true /* We ignore requireSignature, since the worker forces it to true
anyway. */ anyway. */
processStderr(0, &source); processStderr(0, &source);
return readStorePath(from); return readStorePaths<Paths>(from);
} }
@ -392,7 +396,7 @@ void RemoteStore::buildDerivations(const PathSet & drvPaths)
{ {
openConnection(); openConnection();
writeInt(wopBuildDerivations, to); writeInt(wopBuildDerivations, to);
writeStringSet(drvPaths, to); writeStrings(drvPaths, to);
processStderr(); processStderr();
readInt(from); readInt(from);
} }
@ -459,7 +463,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
writeInt(wopCollectGarbage, to); writeInt(wopCollectGarbage, to);
writeInt(options.action, to); writeInt(options.action, to);
writeStringSet(options.pathsToDelete, to); writeStrings(options.pathsToDelete, to);
writeInt(options.ignoreLiveness, to); writeInt(options.ignoreLiveness, to);
writeLongLong(options.maxFreed, to); writeLongLong(options.maxFreed, to);
writeInt(options.maxLinks, to); writeInt(options.maxLinks, to);
@ -471,7 +475,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
processStderr(); processStderr();
results.paths = readStringSet(from); results.paths = readStrings<PathSet>(from);
results.bytesFreed = readLongLong(from); results.bytesFreed = readLongLong(from);
results.blocksFreed = readLongLong(from); results.blocksFreed = readLongLong(from);
} }
@ -482,7 +486,7 @@ PathSet RemoteStore::queryFailedPaths()
openConnection(); openConnection();
writeInt(wopQueryFailedPaths, to); writeInt(wopQueryFailedPaths, to);
processStderr(); processStderr();
return readStorePaths(from); return readStorePaths<PathSet>(from);
} }
@ -490,7 +494,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
{ {
openConnection(); openConnection();
writeInt(wopClearFailedPaths, to); writeInt(wopClearFailedPaths, to);
writeStringSet(paths, to); writeStrings(paths, to);
processStderr(); processStderr();
readInt(from); readInt(from);
} }
@ -498,6 +502,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
void RemoteStore::processStderr(Sink * sink, Source * source) void RemoteStore::processStderr(Sink * sink, Source * source)
{ {
to.flush();
unsigned int msg; unsigned int msg;
while ((msg = readInt(from)) == STDERR_NEXT while ((msg = readInt(from)) == STDERR_NEXT
|| msg == STDERR_READ || msg == STDERR_WRITE) { || msg == STDERR_READ || msg == STDERR_WRITE) {
@ -508,11 +513,11 @@ void RemoteStore::processStderr(Sink * sink, Source * source)
} }
else if (msg == STDERR_READ) { else if (msg == STDERR_READ) {
if (!source) throw Error("no source"); if (!source) throw Error("no source");
unsigned int len = readInt(from); size_t len = readInt(from);
unsigned char * buf = new unsigned char[len]; unsigned char * buf = new unsigned char[len];
AutoDeleteArray<unsigned char> d(buf); AutoDeleteArray<unsigned char> d(buf);
(*source)(buf, len); writeString(buf, source->read(buf, len), to);
writeString(string((const char *) buf, len), to); to.flush();
} }
else { else {
string s = readString(from); string s = readString(from);

View file

@ -58,7 +58,7 @@ public:
void exportPath(const Path & path, bool sign, void exportPath(const Path & path, bool sign,
Sink & sink); Sink & sink);
Path importPath(bool requireSignature, Source & source); Paths importPaths(bool requireSignature, Source & source);
void buildDerivations(const PathSet & drvPaths); void buildDerivations(const PathSet & drvPaths);

View file

@ -298,6 +298,17 @@ string showPaths(const PathSet & paths)
} }
void exportPaths(StoreAPI & store, const Paths & paths,
bool sign, Sink & sink)
{
foreach (Paths::const_iterator, i, paths) {
writeInt(1, sink);
store.exportPath(*i, sign, sink);
}
writeInt(0, sink);
}
} }

View file

@ -154,9 +154,7 @@ public:
/* Copy the contents of a path to the store and register the /* Copy the contents of a path to the store and register the
validity the resulting path. The resulting path is returned. validity the resulting path. The resulting path is returned.
If `fixed' is true, then the output of a fixed-output The function object `filter' can be used to exclude files (see
derivation is pre-loaded into the Nix store. The function
object `filter' can be used to exclude files (see
libutil/archive.hh). */ libutil/archive.hh). */
virtual Path addToStore(const Path & srcPath, virtual Path addToStore(const Path & srcPath,
bool recursive = true, HashType hashAlgo = htSHA256, bool recursive = true, HashType hashAlgo = htSHA256,
@ -174,9 +172,9 @@ public:
virtual void exportPath(const Path & path, bool sign, virtual void exportPath(const Path & path, bool sign,
Sink & sink) = 0; Sink & sink) = 0;
/* Import a NAR dump created by exportPath() into the Nix /* Import a sequence of NAR dumps created by exportPaths() into
store. */ the Nix store. */
virtual Path importPath(bool requireSignature, Source & source) = 0; virtual Paths importPaths(bool requireSignature, Source & source) = 0;
/* Ensure that the output paths of the derivation are valid. If /* Ensure that the output paths of the derivation are valid. If
they are already valid, this is a no-op. Otherwise, validity they are already valid, this is a no-op. Otherwise, validity
@ -345,6 +343,12 @@ ValidPathInfo decodeValidPathInfo(std::istream & str,
bool hashGiven = false); bool hashGiven = false);
/* Export multiple paths in the format expected by nix-store
--import. */
void exportPaths(StoreAPI & store, const Paths & paths,
bool sign, Sink & sink);
} }

View file

@ -8,7 +8,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f #define WORKER_MAGIC_2 0x6478696f
#define PROTOCOL_VERSION 0x108 #define PROTOCOL_VERSION 0x109
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@ -29,7 +29,6 @@ typedef enum {
wopSyncWithGC = 13, wopSyncWithGC = 13,
wopFindRoots = 14, wopFindRoots = 14,
wopExportPath = 16, wopExportPath = 16,
wopImportPath = 17,
wopQueryDeriver = 18, wopQueryDeriver = 18,
wopSetOptions = 19, wopSetOptions = 19,
wopCollectGarbage = 20, wopCollectGarbage = 20,
@ -39,7 +38,8 @@ typedef enum {
wopQueryFailedPaths = 24, wopQueryFailedPaths = 24,
wopClearFailedPaths = 25, wopClearFailedPaths = 25,
wopQueryPathInfo = 26, wopQueryPathInfo = 26,
wopQueryDerivationOutputNames = 27, wopImportPaths = 27,
wopQueryDerivationOutputNames = 28,
} WorkerOp; } WorkerOp;
@ -59,7 +59,7 @@ typedef enum {
Path readStorePath(Source & from); Path readStorePath(Source & from);
PathSet readStorePaths(Source & from); template<class T> T readStorePaths(Source & from);
} }

View file

@ -204,6 +204,22 @@ Hash parseHash32(HashType ht, const string & s)
} }
Hash parseHash16or32(HashType ht, const string & s)
{
Hash hash(ht);
if (s.size() == hash.hashSize * 2)
/* hexadecimal representation */
hash = parseHash(ht, s);
else if (s.size() == hashLength32(hash))
/* base-32 representation */
hash = parseHash32(ht, s);
else
throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
% s % printHashType(ht));
return hash;
}
bool isHash(const string & s) bool isHash(const string & s)
{ {
if (s.length() != 32) return false; if (s.length() != 32) return false;
@ -290,21 +306,13 @@ HashSink::HashSink(HashType ht) : ht(ht)
start(ht, *ctx); start(ht, *ctx);
} }
HashSink::HashSink(const HashSink & h)
{
ht = h.ht;
bytes = h.bytes;
ctx = new Ctx;
*ctx = *h.ctx;
}
HashSink::~HashSink() HashSink::~HashSink()
{ {
bufPos = 0;
delete ctx; delete ctx;
} }
void HashSink::operator () void HashSink::write(const unsigned char * data, size_t len)
(const unsigned char * data, unsigned int len)
{ {
bytes += len; bytes += len;
update(ht, *ctx, data, len); update(ht, *ctx, data, len);
@ -312,11 +320,21 @@ void HashSink::operator ()
HashResult HashSink::finish() HashResult HashSink::finish()
{ {
flush();
Hash hash(ht); Hash hash(ht);
nix::finish(ht, *ctx, hash.hash); nix::finish(ht, *ctx, hash.hash);
return HashResult(hash, bytes); return HashResult(hash, bytes);
} }
HashResult HashSink::currentHash()
{
flush();
Ctx ctx2 = *ctx;
Hash hash(ht);
nix::finish(ht, ctx2, hash.hash);
return HashResult(hash, bytes);
}
HashResult hashPath( HashResult hashPath(
HashType ht, const Path & path, PathFilter & filter) HashType ht, const Path & path, PathFilter & filter)

View file

@ -58,6 +58,9 @@ string printHash32(const Hash & hash);
/* Parse a base-32 representation of a hash code. */ /* Parse a base-32 representation of a hash code. */
Hash parseHash32(HashType ht, const string & s); Hash parseHash32(HashType ht, const string & s);
/* Parse a base-16 or base-32 representation of a hash code. */
Hash parseHash16or32(HashType ht, const string & s);
/* Verify that the given string is a valid hash code. */ /* Verify that the given string is a valid hash code. */
bool isHash(const string & s); bool isHash(const string & s);
@ -88,7 +91,7 @@ string printHashType(HashType ht);
union Ctx; union Ctx;
class HashSink : public Sink class HashSink : public BufferedSink
{ {
private: private:
HashType ht; HashType ht;
@ -99,8 +102,9 @@ public:
HashSink(HashType ht); HashSink(HashType ht);
HashSink(const HashSink & h); HashSink(const HashSink & h);
~HashSink(); ~HashSink();
virtual void operator () (const unsigned char * data, unsigned int len); void write(const unsigned char * data, size_t len);
HashResult finish(); HashResult finish();
HashResult currentHash();
}; };

View file

@ -2,24 +2,117 @@
#include "util.hh" #include "util.hh"
#include <cstring> #include <cstring>
#include <cerrno>
namespace nix { namespace nix {
void FdSink::operator () (const unsigned char * data, unsigned int len) BufferedSink::~BufferedSink()
{
/* We can't call flush() here, because C++ for some insane reason
doesn't allow you to call virtual methods from a destructor. */
assert(!bufPos);
if (buffer) delete[] buffer;
}
void BufferedSink::operator () (const unsigned char * data, size_t len)
{
if (!buffer) buffer = new unsigned char[bufSize];
while (len) {
/* Optimisation: bypass the buffer if the data exceeds the
buffer size. */
if (bufPos + len >= bufSize) {
flush();
write(data, len);
break;
}
/* Otherwise, copy the bytes to the buffer. Flush the buffer
when it's full. */
size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
memcpy(buffer + bufPos, data, n);
data += n; bufPos += n; len -= n;
if (bufPos == bufSize) flush();
}
}
void BufferedSink::flush()
{
if (bufPos == 0) return;
size_t n = bufPos;
bufPos = 0; // don't trigger the assert() in ~BufferedSink()
write(buffer, n);
}
FdSink::~FdSink()
{
try { flush(); } catch (...) { ignoreException(); }
}
void FdSink::write(const unsigned char * data, size_t len)
{ {
writeFull(fd, data, len); writeFull(fd, data, len);
} }
void FdSource::operator () (unsigned char * data, unsigned int len) void Source::operator () (unsigned char * data, size_t len)
{ {
readFull(fd, data, len); while (len) {
size_t n = read(data, len);
data += n; len -= n;
}
} }
void writePadding(unsigned int len, Sink & sink) BufferedSource::~BufferedSource()
{
if (buffer) delete[] buffer;
}
size_t BufferedSource::read(unsigned char * data, size_t len)
{
if (!buffer) buffer = new unsigned char[bufSize];
if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize);
/* Copy out the data in the buffer. */
size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
memcpy(data, buffer + bufPosOut, n);
bufPosOut += n;
if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
return n;
}
size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
{
ssize_t n;
do {
checkInterrupt();
n = ::read(fd, (char *) data, bufSize);
} while (n == -1 && errno == EINTR);
if (n == -1) throw SysError("reading from file");
if (n == 0) throw EndOfFile("unexpected end-of-file");
return n;
}
size_t StringSource::read(unsigned char * data, size_t len)
{
if (pos == s.size()) throw EndOfFile("end of string reached");
size_t n = s.copy((char *) data, len, pos);
pos += n;
return n;
}
void writePadding(size_t len, Sink & sink)
{ {
if (len % 8) { if (len % 8) {
unsigned char zero[8]; unsigned char zero[8];
@ -56,28 +149,36 @@ void writeLongLong(unsigned long long n, Sink & sink)
} }
void writeString(const string & s, Sink & sink) void writeString(const unsigned char * buf, size_t len, Sink & sink)
{ {
unsigned int len = s.length();
writeInt(len, sink); writeInt(len, sink);
sink((const unsigned char *) s.c_str(), len); sink(buf, len);
writePadding(len, sink); writePadding(len, sink);
} }
void writeStringSet(const StringSet & ss, Sink & sink) void writeString(const string & s, Sink & sink)
{ {
writeInt(ss.size(), sink); writeString((const unsigned char *) s.c_str(), s.size(), sink);
for (StringSet::iterator i = ss.begin(); i != ss.end(); ++i)
writeString(*i, sink);
} }
void readPadding(unsigned int len, Source & source) template<class T> void writeStrings(const T & ss, Sink & sink)
{
writeInt(ss.size(), sink);
foreach (typename T::const_iterator, i, ss)
writeString(*i, sink);
}
template void writeStrings(const Paths & ss, Sink & sink);
template void writeStrings(const PathSet & ss, Sink & sink);
void readPadding(size_t len, Source & source)
{ {
if (len % 8) { if (len % 8) {
unsigned char zero[8]; unsigned char zero[8];
unsigned int n = 8 - (len % 8); size_t n = 8 - (len % 8);
source(zero, n); source(zero, n);
for (unsigned int i = 0; i < n; i++) for (unsigned int i = 0; i < n; i++)
if (zero[i]) throw SerialisationError("non-zero padding"); if (zero[i]) throw SerialisationError("non-zero padding");
@ -115,9 +216,19 @@ unsigned long long readLongLong(Source & source)
} }
size_t readString(unsigned char * buf, size_t max, Source & source)
{
size_t len = readInt(source);
if (len > max) throw Error("string is too long");
source(buf, len);
readPadding(len, source);
return len;
}
string readString(Source & source) string readString(Source & source)
{ {
unsigned int len = readInt(source); size_t len = readInt(source);
unsigned char * buf = new unsigned char[len]; unsigned char * buf = new unsigned char[len];
AutoDeleteArray<unsigned char> d(buf); AutoDeleteArray<unsigned char> d(buf);
source(buf, len); source(buf, len);
@ -126,14 +237,17 @@ string readString(Source & source)
} }
StringSet readStringSet(Source & source) template<class T> T readStrings(Source & source)
{ {
unsigned int count = readInt(source); unsigned int count = readInt(source);
StringSet ss; T ss;
while (count--) while (count--)
ss.insert(readString(source)); ss.insert(ss.end(), readString(source));
return ss; return ss;
} }
template Paths readStrings(Source & source);
template PathSet readStrings(Source & source);
} }

View file

@ -11,7 +11,25 @@ namespace nix {
struct Sink struct Sink
{ {
virtual ~Sink() { } virtual ~Sink() { }
virtual void operator () (const unsigned char * data, unsigned int len) = 0; virtual void operator () (const unsigned char * data, size_t len) = 0;
};
/* A buffered abstract sink. */
struct BufferedSink : Sink
{
size_t bufSize, bufPos;
unsigned char * buffer;
BufferedSink(size_t bufSize = 32 * 1024)
: bufSize(bufSize), bufPos(0), buffer(0) { }
~BufferedSink();
void operator () (const unsigned char * data, size_t len);
void flush();
virtual void write(const unsigned char * data, size_t len) = 0;
}; };
@ -20,49 +38,55 @@ struct Source
{ {
virtual ~Source() { } virtual ~Source() { }
/* The callee should store exactly *len bytes in the buffer /* Store exactly len bytes in the buffer pointed to by data.
pointed to by data. It should block if that much data is not It blocks until all the requested data is available, or throws
yet available, or throw an error if it is not going to be an error if it is not going to be available. */
available. */ void operator () (unsigned char * data, size_t len);
virtual void operator () (unsigned char * data, unsigned int len) = 0;
/* Store up to len in the buffer pointed to by data, and
return the number of bytes stored. If blocks until at least
one byte is available. */
virtual size_t read(unsigned char * data, size_t len) = 0;
};
/* A buffered abstract source. */
struct BufferedSource : Source
{
size_t bufSize, bufPosIn, bufPosOut;
unsigned char * buffer;
BufferedSource(size_t bufSize = 32 * 1024)
: bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { }
~BufferedSource();
size_t read(unsigned char * data, size_t len);
/* Underlying read call, to be overriden. */
virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
}; };
/* A sink that writes data to a file descriptor. */ /* A sink that writes data to a file descriptor. */
struct FdSink : Sink struct FdSink : BufferedSink
{ {
int fd; int fd;
FdSink() FdSink() : fd(-1) { }
{ FdSink(int fd) : fd(fd) { }
fd = -1; ~FdSink();
}
FdSink(int fd) void write(const unsigned char * data, size_t len);
{
this->fd = fd;
}
void operator () (const unsigned char * data, unsigned int len);
}; };
/* A source that reads data from a file descriptor. */ /* A source that reads data from a file descriptor. */
struct FdSource : Source struct FdSource : BufferedSource
{ {
int fd; int fd;
FdSource() : fd(-1) { }
FdSource() FdSource(int fd) : fd(fd) { }
{ size_t readUnbuffered(unsigned char * data, size_t len);
fd = -1;
}
FdSource(int fd)
{
this->fd = fd;
}
void operator () (unsigned char * data, unsigned int len);
}; };
@ -70,7 +94,7 @@ struct FdSource : Source
struct StringSink : Sink struct StringSink : Sink
{ {
string s; string s;
virtual void operator () (const unsigned char * data, unsigned int len) void operator () (const unsigned char * data, size_t len)
{ {
s.append((const char *) data, len); s.append((const char *) data, len);
} }
@ -81,29 +105,25 @@ struct StringSink : Sink
struct StringSource : Source struct StringSource : Source
{ {
const string & s; const string & s;
unsigned int pos; size_t pos;
StringSource(const string & _s) : s(_s), pos(0) { } StringSource(const string & _s) : s(_s), pos(0) { }
virtual void operator () (unsigned char * data, unsigned int len) size_t read(unsigned char * data, size_t len);
{
s.copy((char *) data, len, pos);
pos += len;
if (pos > s.size())
throw Error("end of string reached");
}
}; };
void writePadding(unsigned int len, Sink & sink); void writePadding(size_t len, Sink & sink);
void writeInt(unsigned int n, Sink & sink); void writeInt(unsigned int n, Sink & sink);
void writeLongLong(unsigned long long n, Sink & sink); void writeLongLong(unsigned long long n, Sink & sink);
void writeString(const unsigned char * buf, size_t len, Sink & sink);
void writeString(const string & s, Sink & sink); void writeString(const string & s, Sink & sink);
void writeStringSet(const StringSet & ss, Sink & sink); template<class T> void writeStrings(const T & ss, Sink & sink);
void readPadding(unsigned int len, Source & source); void readPadding(size_t len, Source & source);
unsigned int readInt(Source & source); unsigned int readInt(Source & source);
unsigned long long readLongLong(Source & source); unsigned long long readLongLong(Source & source);
size_t readString(unsigned char * buf, size_t max, Source & source);
string readString(Source & source); string readString(Source & source);
StringSet readStringSet(Source & source); template<class T> T readStrings(Source & source);
MakeError(SerialisationError, Error) MakeError(SerialisationError, Error)

View file

@ -1270,7 +1270,7 @@ void run(Strings args)
else if (arg == "--profile" || arg == "-p") else if (arg == "--profile" || arg == "-p")
globals.profile = absPath(needArg(i, args, arg)); globals.profile = absPath(needArg(i, args, arg));
else if (arg == "--file" || arg == "-f") else if (arg == "--file" || arg == "-f")
globals.instSource.nixExprPath = absPath(needArg(i, args, arg)); globals.instSource.nixExprPath = lookupFileArg(globals.state, needArg(i, args, arg));
else if (arg == "--switch-profile" || arg == "-S") else if (arg == "--switch-profile" || arg == "-S")
op = opSwitchProfile; op = opSwitchProfile;
else if (arg == "--switch-generation" || arg == "-G") else if (arg == "--switch-generation" || arg == "-G")

View file

@ -43,7 +43,7 @@ void run(Strings args)
} }
if (op == opHash) { if (op == opHash) {
for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) { foreach (Strings::iterator, i, ss) {
Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first; Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first;
if (truncate && h.hashSize > 20) h = compressHash(h, 20); if (truncate && h.hashSize > 20) h = compressHash(h, 20);
std::cout << format("%1%\n") % std::cout << format("%1%\n") %
@ -52,8 +52,8 @@ void run(Strings args)
} }
else { else {
for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) { foreach (Strings::iterator, i, ss) {
Hash h = op == opTo16 ? parseHash32(ht, *i) : parseHash(ht, *i); Hash h = parseHash16or32(ht, *i);
std::cout << format("%1%\n") % std::cout << format("%1%\n") %
(op == opTo16 ? printHash(h) : printHash32(h)); (op == opTo16 ? printHash(h) : printHash32(h));
} }

View file

@ -138,8 +138,7 @@ void run(Strings args)
} }
foreach (Strings::iterator, i, files) { foreach (Strings::iterator, i, files) {
Path path = absPath(*i); Expr * e = state.parseExprFromFile(lookupFileArg(state, *i));
Expr * e = state.parseExprFromFile(path);
processExpr(state, attrPaths, parseOnly, strict, autoArgs, processExpr(state, attrPaths, parseOnly, strict, autoArgs,
evalOnly, xmlOutput, xmlOutputSourceLocation, e); evalOnly, xmlOutput, xmlOutputSourceLocation, e);
} }

View file

@ -133,14 +133,6 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
} }
static Hash parseHash16or32(HashType ht, const string & s)
{
return s.size() == Hash(ht).hashSize * 2
? parseHash(ht, s)
: parseHash32(ht, s);
}
/* Hack to support caching in `nix-prefetch-url'. */ /* Hack to support caching in `nix-prefetch-url'. */
static void opPrintFixedPath(Strings opFlags, Strings opArgs) static void opPrintFixedPath(Strings opFlags, Strings opArgs)
{ {
@ -594,11 +586,7 @@ static void opExport(Strings opFlags, Strings opArgs)
else throw UsageError(format("unknown flag `%1%'") % *i); else throw UsageError(format("unknown flag `%1%'") % *i);
FdSink sink(STDOUT_FILENO); FdSink sink(STDOUT_FILENO);
for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i) { exportPaths(*store, opArgs, sign, sink);
writeInt(1, sink);
store->exportPath(*i, sign, sink);
}
writeInt(0, sink);
} }
@ -612,12 +600,10 @@ static void opImport(Strings opFlags, Strings opArgs)
if (!opArgs.empty()) throw UsageError("no arguments expected"); if (!opArgs.empty()) throw UsageError("no arguments expected");
FdSource source(STDIN_FILENO); FdSource source(STDIN_FILENO);
while (true) { Paths paths = store->importPaths(requireSignature, source);
unsigned long long n = readLongLong(source);
if (n == 0) break; foreach (Paths::iterator, i, paths)
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'"); cout << format("%1%\n") % *i << std::flush;
cout << format("%1%\n") % store->importPath(requireSignature, source) << std::flush;
}
} }

View file

@ -56,7 +56,8 @@ static void tunnelStderr(const unsigned char * buf, size_t count)
if (canSendStderr && myPid == getpid()) { if (canSendStderr && myPid == getpid()) {
try { try {
writeInt(STDERR_NEXT, to); writeInt(STDERR_NEXT, to);
writeString(string((char *) buf, count), to); writeString(buf, count, to);
to.flush();
} catch (...) { } catch (...) {
/* Write failed; that means that the other side is /* Write failed; that means that the other side is
gone. */ gone. */
@ -200,26 +201,20 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int
struct TunnelSink : Sink struct TunnelSink : Sink
{ {
Sink & to; Sink & to;
TunnelSink(Sink & to) : to(to) TunnelSink(Sink & to) : to(to) { }
{ virtual void operator () (const unsigned char * data, size_t len)
}
virtual void operator ()
(const unsigned char * data, unsigned int len)
{ {
writeInt(STDERR_WRITE, to); writeInt(STDERR_WRITE, to);
writeString(string((const char *) data, len), to); writeString(data, len, to);
} }
}; };
struct TunnelSource : Source struct TunnelSource : BufferedSource
{ {
Source & from; Source & from;
TunnelSource(Source & from) : from(from) TunnelSource(Source & from) : from(from) { }
{ size_t readUnbuffered(unsigned char * data, size_t len)
}
virtual void operator ()
(unsigned char * data, unsigned int len)
{ {
/* Careful: we're going to receive data from the client now, /* Careful: we're going to receive data from the client now,
so we have to disable the SIGPOLL handler. */ so we have to disable the SIGPOLL handler. */
@ -228,11 +223,12 @@ struct TunnelSource : Source
writeInt(STDERR_READ, to); writeInt(STDERR_READ, to);
writeInt(len, to); writeInt(len, to);
string s = readString(from); to.flush();
if (s.size() != len) throw Error("not enough data"); size_t n = readString(data, len, from);
memcpy(data, (const unsigned char *) s.c_str(), len);
startWork(); startWork();
if (n == 0) throw EndOfFile("unexpected end-of-file");
return n;
} }
}; };
@ -241,11 +237,14 @@ struct TunnelSource : Source
the contents of the file to `s'. Otherwise barf. */ the contents of the file to `s'. Otherwise barf. */
struct RetrieveRegularNARSink : ParseSink struct RetrieveRegularNARSink : ParseSink
{ {
bool regular;
string s; string s;
RetrieveRegularNARSink() : regular(true) { }
void createDirectory(const Path & path) void createDirectory(const Path & path)
{ {
throw Error("regular file expected"); regular = false;
} }
void receiveContents(unsigned char * data, unsigned int len) void receiveContents(unsigned char * data, unsigned int len)
@ -255,7 +254,7 @@ struct RetrieveRegularNARSink : ParseSink
void createSymlink(const Path & path, const string & target) void createSymlink(const Path & path, const string & target)
{ {
throw Error("regular file expected"); regular = false;
} }
}; };
@ -266,10 +265,11 @@ struct SavingSourceAdapter : Source
Source & orig; Source & orig;
string s; string s;
SavingSourceAdapter(Source & orig) : orig(orig) { } SavingSourceAdapter(Source & orig) : orig(orig) { }
void operator () (unsigned char * data, unsigned int len) size_t read(unsigned char * data, size_t len)
{ {
orig(data, len); size_t n = orig.read(data, len);
s.append((const char *) data, len); s.append((const char *) data, n);
return n;
} }
}; };
@ -327,7 +327,7 @@ static void performOp(unsigned int clientVersion,
store->queryReferrers(path, paths); store->queryReferrers(path, paths);
else paths = store->queryDerivationOutputs(path); else paths = store->queryDerivationOutputs(path);
stopWork(); stopWork();
writeStringSet(paths, to); writeStrings(paths, to);
break; break;
} }
@ -371,11 +371,11 @@ static void performOp(unsigned int clientVersion,
addToStoreFromDump(). */ addToStoreFromDump(). */
ParseSink sink; /* null sink; just parse the NAR */ ParseSink sink; /* null sink; just parse the NAR */
parseDump(sink, savedNAR); parseDump(sink, savedNAR);
} else { } else
parseDump(savedRegular, from); parseDump(savedRegular, from);
}
startWork(); startWork();
if (!savedRegular.regular) throw Error("regular file expected");
Path path = dynamic_cast<LocalStore *>(store.get()) Path path = dynamic_cast<LocalStore *>(store.get())
->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo); ->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
stopWork(); stopWork();
@ -387,7 +387,7 @@ static void performOp(unsigned int clientVersion,
case wopAddTextToStore: { case wopAddTextToStore: {
string suffix = readString(from); string suffix = readString(from);
string s = readString(from); string s = readString(from);
PathSet refs = readStorePaths(from); PathSet refs = readStorePaths<PathSet>(from);
startWork(); startWork();
Path path = store->addTextToStore(suffix, s, refs); Path path = store->addTextToStore(suffix, s, refs);
stopWork(); stopWork();
@ -406,17 +406,17 @@ static void performOp(unsigned int clientVersion,
break; break;
} }
case wopImportPath: { case wopImportPaths: {
startWork(); startWork();
TunnelSource source(from); TunnelSource source(from);
Path path = store->importPath(true, source); Paths paths = store->importPaths(true, source);
stopWork(); stopWork();
writeString(path, to); writeStrings(paths, to);
break; break;
} }
case wopBuildDerivations: { case wopBuildDerivations: {
PathSet drvs = readStorePaths(from); PathSet drvs = readStorePaths<PathSet>(from);
startWork(); startWork();
store->buildDerivations(drvs); store->buildDerivations(drvs);
stopWork(); stopWork();
@ -474,7 +474,7 @@ static void performOp(unsigned int clientVersion,
case wopCollectGarbage: { case wopCollectGarbage: {
GCOptions options; GCOptions options;
options.action = (GCOptions::GCAction) readInt(from); options.action = (GCOptions::GCAction) readInt(from);
options.pathsToDelete = readStorePaths(from); options.pathsToDelete = readStorePaths<PathSet>(from);
options.ignoreLiveness = readInt(from); options.ignoreLiveness = readInt(from);
options.maxFreed = readLongLong(from); options.maxFreed = readLongLong(from);
options.maxLinks = readInt(from); options.maxLinks = readInt(from);
@ -492,7 +492,7 @@ static void performOp(unsigned int clientVersion,
store->collectGarbage(options, results); store->collectGarbage(options, results);
stopWork(); stopWork();
writeStringSet(results.paths, to); writeStrings(results.paths, to);
writeLongLong(results.bytesFreed, to); writeLongLong(results.bytesFreed, to);
writeLongLong(results.blocksFreed, to); writeLongLong(results.blocksFreed, to);
@ -530,7 +530,7 @@ static void performOp(unsigned int clientVersion,
writeInt(res ? 1 : 0, to); writeInt(res ? 1 : 0, to);
if (res) { if (res) {
writeString(info.deriver, to); writeString(info.deriver, to);
writeStringSet(info.references, to); writeStrings(info.references, to);
writeLongLong(info.downloadSize, to); writeLongLong(info.downloadSize, to);
if (GET_PROTOCOL_MINOR(clientVersion) >= 7) if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
writeLongLong(info.narSize, to); writeLongLong(info.narSize, to);
@ -542,7 +542,7 @@ static void performOp(unsigned int clientVersion,
startWork(); startWork();
PathSet paths = store->queryValidPaths(); PathSet paths = store->queryValidPaths();
stopWork(); stopWork();
writeStringSet(paths, to); writeStrings(paths, to);
break; break;
} }
@ -550,12 +550,12 @@ static void performOp(unsigned int clientVersion,
startWork(); startWork();
PathSet paths = store->queryFailedPaths(); PathSet paths = store->queryFailedPaths();
stopWork(); stopWork();
writeStringSet(paths, to); writeStrings(paths, to);
break; break;
} }
case wopClearFailedPaths: { case wopClearFailedPaths: {
PathSet paths = readStringSet(from); PathSet paths = readStrings<PathSet>(from);
startWork(); startWork();
store->clearFailedPaths(paths); store->clearFailedPaths(paths);
stopWork(); stopWork();
@ -570,7 +570,7 @@ static void performOp(unsigned int clientVersion,
stopWork(); stopWork();
writeString(info.deriver, to); writeString(info.deriver, to);
writeString(printHash(info.hash), to); writeString(printHash(info.hash), to);
writeStringSet(info.references, to); writeStrings(info.references, to);
writeInt(info.registrationTime, to); writeInt(info.registrationTime, to);
writeLongLong(info.narSize, to); writeLongLong(info.narSize, to);
break; break;
@ -603,8 +603,8 @@ static void processConnection()
unsigned int magic = readInt(from); unsigned int magic = readInt(from);
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch"); if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
writeInt(WORKER_MAGIC_2, to); writeInt(WORKER_MAGIC_2, to);
writeInt(PROTOCOL_VERSION, to); writeInt(PROTOCOL_VERSION, to);
to.flush();
unsigned int clientVersion = readInt(from); unsigned int clientVersion = readInt(from);
/* Send startup error messages to the client. */ /* Send startup error messages to the client. */
@ -626,9 +626,11 @@ static void processConnection()
store = boost::shared_ptr<StoreAPI>(new LocalStore()); store = boost::shared_ptr<StoreAPI>(new LocalStore());
stopWork(); stopWork();
to.flush();
} catch (Error & e) { } catch (Error & e) {
stopWork(false, e.msg()); stopWork(false, e.msg());
to.flush();
return; return;
} }
@ -648,9 +650,19 @@ static void processConnection()
try { try {
performOp(clientVersion, from, to, op); performOp(clientVersion, from, to, op);
} catch (Error & e) { } catch (Error & e) {
/* If we're not in a state were we can send replies, then
something went wrong processing the input of the
client. This can happen especially if I/O errors occur
during addTextToStore() / importPath(). If that
happens, just send the error message and exit. */
bool errorAllowed = canSendStderr;
if (!errorAllowed) printMsg(lvlError, format("error processing client input: %1%") % e.msg());
stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0); stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
if (!errorAllowed) break;
} }
to.flush();
assert(!canSendStderr); assert(!canSendStderr);
}; };

View file

@ -1,4 +1,4 @@
TESTS_ENVIRONMENT = $(bash) -e TESTS_ENVIRONMENT = NIX_REMOTE= $(bash) -e
extra1 = $(shell pwd)/test-tmp/shared extra1 = $(shell pwd)/test-tmp/shared

View file

@ -23,8 +23,6 @@ export SHARED=$TEST_ROOT/shared
export PATH=$NIX_BIN_DIR:$TOP/scripts:$PATH export PATH=$NIX_BIN_DIR:$TOP/scripts:$PATH
export NIX_REMOTE=
export REAL_BIN_DIR=@bindir@ export REAL_BIN_DIR=@bindir@
export REAL_LIBEXEC_DIR=@libexecdir@ export REAL_LIBEXEC_DIR=@libexecdir@
export REAL_LOCALSTATE_DIR=@localstatedir@ export REAL_LOCALSTATE_DIR=@localstatedir@

View file

@ -72,6 +72,7 @@ in
$client->succeed("chmod 600 /root/.ssh/id_dsa"); $client->succeed("chmod 600 /root/.ssh/id_dsa");
# Install the SSH key on the slaves. # Install the SSH key on the slaves.
$client->waitForJob("network-interfaces");
foreach my $slave ($slave1, $slave2) { foreach my $slave ($slave1, $slave2) {
$slave->succeed("mkdir -m 700 /root/.ssh"); $slave->succeed("mkdir -m 700 /root/.ssh");
$slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); $slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");

View file

@ -36,7 +36,7 @@ nix-env -p $profiles/test -q '*' | grep -q foo-2.0pre1
test "$($profiles/test/bin/foo)" = "foo-2.0pre1" test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
# Upgrade "foo": should install foo-2.0. # Upgrade "foo": should install foo-2.0.
nix-env -p $profiles/test -f ./user-envs.nix -u foo NIX_PATH=nixpkgs=./user-envs.nix nix-env -p $profiles/test -f '<nixpkgs>' -u foo
# Query installed: should contain foo-2.0 now. # Query installed: should contain foo-2.0 now.
test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1 test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1