From c0b706213dad330bd51607ff73059c87f0ec5b93 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 7 Nov 2011 21:11:59 +0000 Subject: [PATCH 01/46] * Boldly make SQLite WAL mode the default again. Hopefully the intermittent problems are gone by now. WAL mode is preferrable because it does way fewer fsyncs. --- configure.ac | 2 +- src/libstore/local-store.cc | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/configure.ac b/configure.ac index 876e0a862..6a1a0e913 100644 --- a/configure.ac +++ b/configure.ac @@ -255,7 +255,7 @@ AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH], [prefix of SQLite]), sqlite=$withval, sqlite=) AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite") -SQLITE_VERSION=3070701 +SQLITE_VERSION=3070900 AC_SUBST(SQLITE_VERSION) if test -z "$sqlite"; then sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la' diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 702ff67e7..06cadcb0f 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -327,10 +327,9 @@ void LocalStore::openDB(bool create) if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK) throwSQLiteError(db, "setting synchronous mode"); - /* Set the SQLite journal mode. WAL mode is fastest, but doesn't - seem entirely stable at the moment (Oct. 2010). Thus, use - truncate mode by default. */ - string mode = queryBoolSetting("use-sqlite-wal", false) ? "wal" : "truncate"; + /* Set the SQLite journal mode. WAL mode is fastest, so it's the + default. */ + string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate"; string prevMode; { SQLiteStmt stmt; From a5952405d2803ae0d29955fe6725cd9195327a07 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Nov 2011 11:37:40 +0000 Subject: [PATCH 02/46] * Re-use prepared statements across insertions into the manifest cache DB. This speeds up creating the cache from 16.1s to 7.9s on my system. --- perl/lib/Nix/Manifest.pm | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm index 7790cfe3b..f042e1f88 100644 --- a/perl/lib/Nix/Manifest.pm +++ b/perl/lib/Nix/Manifest.pm @@ -286,6 +286,14 @@ EOF open MAINLOCK, ">>$lockFile" or die "unable to acquire lock ‘$lockFile’: $!\n"; flock(MAINLOCK, LOCK_EX) or die; + our $insertNAR = $dbh->prepare( + "insert into NARs(manifest, storePath, url, hash, size, narHash, " . + "narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die; + + our $insertPatch = $dbh->prepare( + "insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " . + "size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + $dbh->begin_work; # Read each manifest in $manifestDir and add it to the database, @@ -312,20 +320,16 @@ EOF sub addNARToDB { my ($storePath, $narFile) = @_; - $dbh->do( - "insert into NARs(manifest, storePath, url, hash, size, narHash, " . - "narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - {}, $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size}, + $insertNAR->execute( + $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size}, $narFile->{narHash}, $narFile->{narSize}, $narFile->{references}, $narFile->{deriver}, $narFile->{system}); }; sub addPatchToDB { my ($storePath, $patch) = @_; - $dbh->do( - "insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " . - "size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - {}, $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url}, + $insertPatch->execute( + $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url}, $patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize}, $patch->{patchType}); }; From 63ee5e4d2a46c3619c59307d7dbb08f25d32c3e8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Nov 2011 11:56:19 +0000 Subject: [PATCH 03/46] * Remove obsolete line. --- perl/lib/Nix/Manifest.pm | 1 - 1 file changed, 1 deletion(-) diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm index f042e1f88..21c61d284 100644 --- a/perl/lib/Nix/Manifest.pm +++ b/perl/lib/Nix/Manifest.pm @@ -120,7 +120,6 @@ sub readManifest_ { elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; } elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; } elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; } - elsif (/^\s*SuccOf:\s*(\/\S+)\s*$/) { } # obsolete elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; } elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; } elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; } From d7d7910ba48d898bda2db92a4f16a6179c855f7d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Nov 2011 16:25:38 +0000 Subject: [PATCH 04/46] * Don't decompress the manifests in /nix/var/nix/manifest. This saves disk space, and, since they're typically only decompressed once (to fill the manifest cache), doesn't make things slower. --- perl/lib/Nix/Manifest.pm | 10 ++++++++-- scripts/nix-pull.in | 11 +---------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm index 21c61d284..9c891abb8 100644 --- a/perl/lib/Nix/Manifest.pm +++ b/perl/lib/Nix/Manifest.pm @@ -53,8 +53,14 @@ sub addPatch { sub readManifest_ { my ($manifest, $addNAR, $addPatch) = @_; - open MANIFEST, "<$manifest" - or die "cannot open `$manifest': $!"; + # Decompress the manifest if necessary. + if ($manifest =~ /\.bz2$/) { + open MANIFEST, "$Nix::Config::bzip2 -d < $manifest |" + or die "cannot decompress `$manifest': $!"; + } else { + open MANIFEST, "<$manifest" + or die "cannot open `$manifest': $!"; + } my $inside = 0; my $type; diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in index f3cba0c02..d8352b277 100755 --- a/scripts/nix-pull.in +++ b/scripts/nix-pull.in @@ -59,16 +59,7 @@ sub processURL { # First see if a bzipped manifest is available. if (system("$Nix::Config::curl --fail --silent --head '$url'.bz2 > /dev/null") == 0) { print "fetching list of Nix archives at `$url.bz2'...\n"; - my $bzipped = downloadFile "$url.bz2"; - - $manifest = "$tmpDir/MANIFEST"; - - system("$Nix::Config::bzip2 -d < $bzipped > $manifest") == 0 - or die "cannot decompress manifest"; - - $manifest = (`$Nix::Config::binDir/nix-store --add $manifest` - or die "cannot copy $manifest to the store"); - chomp $manifest; + $manifest = downloadFile "$url.bz2"; } # Otherwise, just get the uncompressed manifest. From f8e609c3413e38d403d986020079f24a2b82c063 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Nov 2011 16:41:48 +0000 Subject: [PATCH 05/46] =?UTF-8?q?*=20nix-pull:=20update=20the=20Nix=20mani?= =?UTF-8?q?fest=20cache=20if=20necessary.=20=20Also,=20don't=20=20=20read?= =?UTF-8?q?=20the=20manifest=20just=20to=20check=20the=20version=20and=20p?= =?UTF-8?q?rint=20the=20number=20of=20=20=20paths.=20=20This=20makes=20nix?= =?UTF-8?q?-pull=20very=20fast=20for=20the=20cached=20cache=20(speeding=20?= =?UTF-8?q?=20=20up=20nixos-rebuild=20without=20the=20=E2=80=98--no-pull?= =?UTF-8?q?=E2=80=99=20or=20=E2=80=98--fast=E2=80=99=20options).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- perl/lib/Nix/Manifest.pm | 8 ++++---- scripts/nix-pull.in | 24 +++--------------------- 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/perl/lib/Nix/Manifest.pm b/perl/lib/Nix/Manifest.pm index 9c891abb8..d1717a0a8 100644 --- a/perl/lib/Nix/Manifest.pm +++ b/perl/lib/Nix/Manifest.pm @@ -305,8 +305,8 @@ EOF # unless we've already done so on a previous run. my %seen; - for my $manifest (glob "$manifestDir/*.nixmanifest") { - $manifest = Cwd::abs_path($manifest); + for my $manifestLink (glob "$manifestDir/*.nixmanifest") { + my $manifest = Cwd::abs_path($manifestLink); my $timestamp = lstat($manifest)->mtime; $seen{$manifest} = 1; @@ -342,10 +342,10 @@ EOF my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB); if ($version < 3) { - die "you have an old-style manifest `$manifest'; please delete it"; + die "you have an old-style or corrupt manifest `$manifestLink'; please delete it"; } if ($version >= 10) { - die "manifest `$manifest' is too new; please delete it or upgrade Nix"; + die "manifest `$manifestLink' is too new; please delete it or upgrade Nix"; } } diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in index d8352b277..74545a350 100755 --- a/scripts/nix-pull.in +++ b/scripts/nix-pull.in @@ -33,10 +33,6 @@ if (! -l $manifestDirLink) { # Process the URLs specified on the command line. -my %narFiles; -my %patches; - -my $skipWrongStore = 0; sub downloadFile { my $url = shift; @@ -68,20 +64,6 @@ sub processURL { $manifest = downloadFile $url; } - my $version = readManifest($manifest, \%narFiles, \%patches); - - die "`$url' is not a manifest or it is too old (i.e., for Nix <= 0.7)\n" if $version < 3; - die "manifest `$url' is too new\n" if $version >= 5; - - if ($skipWrongStore) { - foreach my $path (keys %narFiles) { - if (substr($path, 0, length($storeDir) + 1) ne "$storeDir/") { - print STDERR "warning: manifest `$url' assumes a Nix store at a different location than $storeDir, skipping...\n"; - exit 0; - } - } - } - my $baseName = "unnamed"; if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component $baseName = $1; @@ -120,12 +102,12 @@ sub processURL { while (@ARGV) { my $url = shift @ARGV; if ($url eq "--skip-wrong-store") { - $skipWrongStore = 1; + # No-op, no longer supported. } else { processURL $url; } } -my $size = scalar (keys %narFiles); -print "$size store paths in manifest\n"; +# Update the cache. +updateManifestDB(); From 45ec69cbdf7e5e7ff19d28c8f25c7f650105b253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ludovic=20Court=C3=A8s?= Date: Wed, 16 Nov 2011 20:39:59 +0000 Subject: [PATCH 06/46] configure: Rely on `AC_CANONICAL_HOST' to determine the Nix system name. This should be more robust and also plays better with cross-compilation---it uses the host name, instead of using the build name. --- configure.ac | 43 +++++++++++++------------------------------ 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/configure.ac b/configure.ac index 6a1a0e913..6840120a4 100644 --- a/configure.ac +++ b/configure.ac @@ -5,42 +5,25 @@ AM_INIT_AUTOMAKE([dist-bzip2 foreign]) AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.]) -AC_CANONICAL_HOST - +AC_PROG_SED # Construct a Nix system name (like "i686-linux"). +AC_CANONICAL_HOST AC_MSG_CHECKING([for the canonical Nix system name]) -cpu_name=$(uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_') -machine_name=$(uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_') - -case $machine_name in - i*86) - machine_name=i686 - ;; - x86_64) - machine_name=x86_64 - ;; - ppc) - machine_name=powerpc - ;; - *) - if test "$cpu_name" != "unknown"; then - machine_name=$cpu_name - fi - ;; -esac - -sys_name=$(uname -s | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_') - -case $sys_name in - cygwin*) - sys_name=cygwin - ;; -esac AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], [Platform identifier (e.g., `i686-linux').]), - system=$withval, system="${machine_name}-${sys_name}") + [system=$withval], + [case "$host_os" in + linux-gnu*) + # For backward compatibility, strip the `-gnu' part. + system="$host_cpu-linux";; + *) + # Strip the version number from names such as `gnu0.3', + # `darwin10.2.0', etc. + system="$host_cpu-`echo $host_os | "$SED" -e's/[0-9.]\+$//g'`";; + esac]) + AC_MSG_RESULT($system) AC_SUBST(system) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')]) From 42164d6de493fa2e0acbf1ebaf9ee0d7502b4f0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ludovic=20Court=C3=A8s?= Date: Wed, 16 Nov 2011 20:58:21 +0000 Subject: [PATCH 07/46] configure: Change i*86 to i686 as has always been done. --- configure.ac | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/configure.ac b/configure.ac index 6840120a4..b3cc8eeea 100644 --- a/configure.ac +++ b/configure.ac @@ -14,14 +14,21 @@ AC_MSG_CHECKING([for the canonical Nix system name]) AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], [Platform identifier (e.g., `i686-linux').]), [system=$withval], - [case "$host_os" in + [case "$host_cpu" in + i*86) + machine_name="i686";; + *) + machine_name="$host_cpu";; + esac + + case "$host_os" in linux-gnu*) # For backward compatibility, strip the `-gnu' part. - system="$host_cpu-linux";; + system="$machine_name-linux";; *) # Strip the version number from names such as `gnu0.3', # `darwin10.2.0', etc. - system="$host_cpu-`echo $host_os | "$SED" -e's/[0-9.]\+$//g'`";; + system="$machine_name-`echo $host_os | "$SED" -e's/[0-9.]\+$//g'`";; esac]) AC_MSG_RESULT($system) From b92f76374f34068e3a41130cdba88ac2c5924804 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 18 Nov 2011 17:25:40 +0000 Subject: [PATCH 08/46] * In the platform, canonicalise "amd64" to "x86_64". FreeBSD 8.2's uname reports amd64. * Drop the FreeBSD version number, e.g. "i686-freebsd" instead of "i686-freebsd8.2". --- configure.ac | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/configure.ac b/configure.ac index b3cc8eeea..2ce755d66 100644 --- a/configure.ac +++ b/configure.ac @@ -17,6 +17,8 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], [case "$host_cpu" in i*86) machine_name="i686";; + amd64) + machine_name="x86_64";; *) machine_name="$host_cpu";; esac @@ -25,6 +27,9 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], linux-gnu*) # For backward compatibility, strip the `-gnu' part. system="$machine_name-linux";; + freebsd*) + # Strip the version number (e.g. freebsd8.2). + system="$machine_name-freebsd";; *) # Strip the version number from names such as `gnu0.3', # `darwin10.2.0', etc. From 964399c079f312312f75c41d6f58d323822b6cf0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 20 Nov 2011 19:23:50 +0000 Subject: [PATCH 09/46] * "sed" on FreeBSD doesn't know the "+" operator. --- configure.ac | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/configure.ac b/configure.ac index 2ce755d66..f9f3376b5 100644 --- a/configure.ac +++ b/configure.ac @@ -27,13 +27,10 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], linux-gnu*) # For backward compatibility, strip the `-gnu' part. system="$machine_name-linux";; - freebsd*) - # Strip the version number (e.g. freebsd8.2). - system="$machine_name-freebsd";; *) # Strip the version number from names such as `gnu0.3', # `darwin10.2.0', etc. - system="$machine_name-`echo $host_os | "$SED" -e's/[0-9.]\+$//g'`";; + system="$machine_name-`echo $host_os | "$SED" -e's/[0-9.]*$//g'`";; esac]) AC_MSG_RESULT($system) From a6abade8e832217c27fade5ab8b7c28003c2ac46 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Nov 2011 12:18:26 +0000 Subject: [PATCH 10/46] * Escape the [ and ] characters in the sed call, otherwise autoconf will eat them. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index f9f3376b5..07d67983f 100644 --- a/configure.ac +++ b/configure.ac @@ -30,7 +30,7 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], *) # Strip the version number from names such as `gnu0.3', # `darwin10.2.0', etc. - system="$machine_name-`echo $host_os | "$SED" -e's/[0-9.]*$//g'`";; + system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; esac]) AC_MSG_RESULT($system) From 23e933b3b3e881515993538ab774c8c7b54f8370 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Nov 2011 12:23:48 +0000 Subject: [PATCH 11/46] * Put back the "sys_name" variable which got removed somewhere. This broke building on Cygwin and Solaris. --- configure.ac | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 07d67983f..22139b776 100644 --- a/configure.ac +++ b/configure.ac @@ -33,6 +33,14 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; esac]) +sys_name=$(uname -s | tr 'A-Z ' 'a-z_') + +case $sys_name in + cygwin*) + sys_name=cygwin + ;; +esac + AC_MSG_RESULT($system) AC_SUBST(system) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')]) @@ -54,7 +62,7 @@ fi # Solaris-specific stuff. -if test "$sys_name" = "sunos"; then +if [ "$sys_name" = sunos ]; then # Solaris requires -lsocket -lnsl for network functions LIBS="-lsocket -lnsl $LIBS" fi From 4de3e2a0db8d043324e9799ada181f7e73356908 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Nov 2011 13:22:34 +0000 Subject: [PATCH 12/46] * Doh. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 22139b776..0443fe492 100644 --- a/configure.ac +++ b/configure.ac @@ -62,7 +62,7 @@ fi # Solaris-specific stuff. -if [ "$sys_name" = sunos ]; then +if test "$sys_name" = sunos; then # Solaris requires -lsocket -lnsl for network functions LIBS="-lsocket -lnsl $LIBS" fi From 4e1ea17052b4cc2445bc2ece2136f248112b4e45 Mon Sep 17 00:00:00 2001 From: Rob Vermaas Date: Mon, 21 Nov 2011 15:19:51 +0000 Subject: [PATCH 13/46] nix: add /etc/hosts with localhost entry to chroot builds. --- src/libstore/build.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index d12f41d66..171c08913 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -1650,6 +1650,9 @@ void DerivationGoal::startBuilder() (format("nixbld:!:%1%:\n") % (buildUser.enabled() ? buildUser.getGID() : getgid())).str()); + /* Create /etc/hosts with localhost entry. */ + writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n"); + /* Bind-mount a user-configurable set of directories from the host file system. The `/dev/pts' directory must be mounted separately so that newly-created pseudo-terminals show From 993fa94fb489f46e127ef760bea8c65ef281ef7f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Nov 2011 17:28:41 +0000 Subject: [PATCH 14/46] =?UTF-8?q?*=20Move=20initialisation=20of=20variable?= =?UTF-8?q?s=20like=20nixConfDir=20from=20libmain=20to=20=20=20libstore=20?= =?UTF-8?q?so=20that=20the=20Perl=20bindings=20can=20use=20it=20as=20well.?= =?UTF-8?q?=20=20It's=20vital=20=20=20that=20the=20Perl=20bindings=20use?= =?UTF-8?q?=20the=20configuration=20file,=20because=20otherwise=20=20=20ni?= =?UTF-8?q?x-copy-closure=20will=20fail=20with=20a=20=E2=80=98database=20l?= =?UTF-8?q?ocked=E2=80=99=20message=20if=20the=20=20=20value=20of=20?= =?UTF-8?q?=E2=80=98use-sqlite-wal=E2=80=99=20is=20changed=20from=20the=20?= =?UTF-8?q?default.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- perl/lib/Nix/Store.xs | 4 +--- src/libmain/Makefile.am | 7 ------- src/libmain/shared.cc | 26 ++------------------------ src/libstore/Makefile.am | 11 ++++++++++- src/libstore/globals.cc | 30 ++++++++++++++++++++++++++++++ src/libstore/globals.hh | 4 +++- 6 files changed, 46 insertions(+), 36 deletions(-) diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index af71ad955..9e51ea337 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -18,10 +18,8 @@ using namespace nix; void doInit() { if (!store) { - nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", "/nix/store"))); - nixStateDir = canonPath(getEnv("NIX_STATE_DIR", "/nix/var/nix")); - nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db"); try { + setDefaultsFromEnvironment(); store = openStore(); } catch (Error & e) { croak(e.what()); diff --git a/src/libmain/Makefile.am b/src/libmain/Makefile.am index ababc3595..404353c62 100644 --- a/src/libmain/Makefile.am +++ b/src/libmain/Makefile.am @@ -7,13 +7,6 @@ libmain_la_LIBADD = ../libstore/libstore.la @BDW_GC_LIBS@ pkginclude_HEADERS = shared.hh AM_CXXFLAGS = \ - -DNIX_STORE_DIR=\"$(storedir)\" \ - -DNIX_DATA_DIR=\"$(datadir)\" \ - -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \ - -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \ - -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \ - -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \ - -DNIX_BIN_DIR=\"$(bindir)\" \ -DNIX_VERSION=\"$(VERSION)\" \ -I$(srcdir)/.. -I$(srcdir)/../libutil \ -I$(srcdir)/../libstore diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 8532cdaad..f7c11ed05 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -120,30 +120,8 @@ static bool showTrace = false; processor. */ static void initAndRun(int argc, char * * argv) { - /* Setup Nix paths. */ - nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))); - nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)); - nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)); - nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)); - nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db"); - nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)); - nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); - nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); - - string subs = getEnv("NIX_SUBSTITUTERS", "default"); - if (subs == "default") { - substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); - substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); - } else - substituters = tokenizeString(subs, ":"); - - /* Get some settings from the configuration file. */ - thisSystem = querySetting("system", SYSTEM); - maxBuildJobs = queryIntSetting("build-max-jobs", 1); - buildCores = queryIntSetting("build-cores", 1); - maxSilentTime = queryIntSetting("build-max-silent-time", 0); - buildTimeout = queryIntSetting("build-timeout", 0); - + setDefaultsFromEnvironment(); + /* Catch SIGINT. */ struct sigaction act; act.sa_handler = sigintHandler; diff --git a/src/libstore/Makefile.am b/src/libstore/Makefile.am index e19256b92..39a61233b 100644 --- a/src/libstore/Makefile.am +++ b/src/libstore/Makefile.am @@ -15,7 +15,16 @@ libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_ EXTRA_DIST = schema.sql AM_CXXFLAGS = -Wall \ - ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil + ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil \ + -DNIX_STORE_DIR=\"$(storedir)\" \ + -DNIX_DATA_DIR=\"$(datadir)\" \ + -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \ + -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \ + -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \ + -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \ + -DNIX_BIN_DIR=\"$(bindir)\" \ + -I$(srcdir)/.. -I$(srcdir)/../libutil \ + -I$(srcdir)/../libstore local-store.lo: schema.sql.hh diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 2e9dc8823..5c22f1406 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -1,3 +1,5 @@ +#include "config.h" + #include "globals.hh" #include "util.hh" @@ -138,5 +140,33 @@ void reloadSettings() settings.clear(); } + +void setDefaultsFromEnvironment() +{ + /* Setup Nix paths. */ + nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))); + nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)); + nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)); + nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)); + nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db"); + nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)); + nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)); + nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)); + + string subs = getEnv("NIX_SUBSTITUTERS", "default"); + if (subs == "default") { + substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); + substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl"); + } else + substituters = tokenizeString(subs, ":"); + + /* Get some settings from the configuration file. */ + thisSystem = querySetting("system", SYSTEM); + maxBuildJobs = queryIntSetting("build-max-jobs", 1); + buildCores = queryIntSetting("build-cores", 1); + maxSilentTime = queryIntSetting("build-max-silent-time", 0); + buildTimeout = queryIntSetting("build-timeout", 0); +} + } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 231c1f850..12a9b9ca1 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -114,7 +114,9 @@ void overrideSetting(const string & name, const Strings & value); void reloadSettings(); - +void setDefaultsFromEnvironment(); + + } From ab20af3e6f83f320232d0e5f6bcfcb279c0047c0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 23 Nov 2011 12:21:35 +0000 Subject: [PATCH 15/46] * build-remote.pl: drop a hard-coded reference to /nix/etc/nix. --- perl/lib/Nix/Config.pm.in | 1 + scripts/build-remote.pl.in | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in index 658305fd9..b657683be 100644 --- a/perl/lib/Nix/Config.pm.in +++ b/perl/lib/Nix/Config.pm.in @@ -4,6 +4,7 @@ $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@"; $libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@"; $manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests"; $logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix"; +$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix"; $bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@"; $curl = "@curl@"; diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in index e8c76086d..110c95f22 100755 --- a/scripts/build-remote.pl.in +++ b/scripts/build-remote.pl.in @@ -3,6 +3,7 @@ use Fcntl ':flock'; use English '-no_match_vars'; use IO::Handle; +use Nix::Config; use Nix::SSH qw/sshOpts openSSHConnection/; no warnings('once'); @@ -208,7 +209,7 @@ print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace; my $maybeSign = ""; -$maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec"; +$maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec"; # Register the derivation as a temporary GC root. Note that $PPID is From 5bbd693caedd5d50994938555b3a4b535875347e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 23 Nov 2011 15:13:37 +0000 Subject: [PATCH 16/46] =?UTF-8?q?*=20Add=20an=20API=20function=20exportPat?= =?UTF-8?q?hs()=20that=20provides=20the=20functionality=20of=20=20=20?= =?UTF-8?q?=E2=80=98nix-store=20--export=E2=80=99.=20*=20Add=20a=20Perl=20?= =?UTF-8?q?module=20that=20provides=20the=20functionality=20of=20=20=20?= =?UTF-8?q?=E2=80=98nix-copy-closure=20--to=E2=80=99.=20=20This=20is=20use?= =?UTF-8?q?d=20by=20build-remote.pl=20so=20it=20no=20=20=20longer=20needs?= =?UTF-8?q?=20to=20start=20a=20separate=20nix-copy-closure=20process.=20?= =?UTF-8?q?=20Also,=20it=20=20=20uses=20the=20Perl=20API=20to=20do=20the?= =?UTF-8?q?=20export,=20so=20it=20doesn't=20need=20to=20start=20a=20=20=20?= =?UTF-8?q?separate=20nix-store=20process=20either.=20=20As=20a=20result,?= =?UTF-8?q?=20nix-copy-closure=20=20=20and=20build-remote.pl=20should=20no?= =?UTF-8?q?=20longer=20fail=20on=20very=20large=20closures=20due=20=20=20t?= =?UTF-8?q?o=20an=20"Argument=20list=20too=20long"=20error.=20=20(Note=20t?= =?UTF-8?q?hat=20having=20very=20many=20=20=20dependencies=20in=20a=20sing?= =?UTF-8?q?le=20derivation=20can=20still=20fail=20because=20the=20=20=20en?= =?UTF-8?q?vironment=20can=20become=20too=20large.=20=20Can't=20be=20helpe?= =?UTF-8?q?d=20though.)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- perl/Makefile.am | 2 +- perl/lib/Nix/CopyClosure.pm | 46 +++++++++++++++++++++++++++++++++++++ perl/lib/Nix/Store.pm | 2 +- perl/lib/Nix/Store.xs | 13 +++++++++++ scripts/build-remote.pl.in | 4 ++-- scripts/nix-copy-closure.in | 36 +++++++---------------------- src/libstore/local-store.cc | 2 +- src/libstore/store-api.cc | 11 +++++++++ src/libstore/store-api.hh | 6 +++++ src/nix-store/nix-store.cc | 6 +---- 10 files changed, 90 insertions(+), 38 deletions(-) create mode 100644 perl/lib/Nix/CopyClosure.pm diff --git a/perl/Makefile.am b/perl/Makefile.am index eded469f9..93f5415c8 100644 --- a/perl/Makefile.am +++ b/perl/Makefile.am @@ -2,7 +2,7 @@ perlversion := $(shell perl -e 'use Config; print $$Config{version};') perlarchname := $(shell perl -e 'use Config; print $$Config{archname};') perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname) -PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/Config.pm.in +PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in all: $(PERL_MODULES:.in=) ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/Store.so diff --git a/perl/lib/Nix/CopyClosure.pm b/perl/lib/Nix/CopyClosure.pm new file mode 100644 index 000000000..045f6bfaf --- /dev/null +++ b/perl/lib/Nix/CopyClosure.pm @@ -0,0 +1,46 @@ +package Nix::CopyClosure; + +use strict; +use Nix::Config; +use Nix::Store; + + +sub copyTo { + my ($sshHost, $sshOpts, $storePaths, $compressor, $decompressor, $includeOutputs, $dryRun, $sign) = @_; + + $compressor = "$compressor |" if $compressor ne ""; + $decompressor = "$decompressor |" if $decompressor ne ""; + + # Get the closure of this path. + my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs, + map { followLinksToStorePath $_ } @{$storePaths}))); + + # Ask the remote host which paths are invalid. Because of limits + # to the command line length, do this in chunks. Eventually, + # we'll want to use ‘--from-stdin’, but we can't rely on the + # target having this option yet. + my @missing = (); + while (scalar(@closure) > 0) { + my @ps = splice(@closure, 0, 1500); + open(READ, "set -f; ssh $sshHost @{$sshOpts} nix-store --check-validity --print-invalid @ps|"); + while () { + chomp; + push @missing, $_; + } + close READ or die; + } + + # Export the store paths and import them on the remote machine. + if (scalar @missing > 0) { + print STDERR "copying ", scalar @missing, " missing paths to ‘$sshHost’...\n"; + #print STDERR " $_\n" foreach @missing; + unless ($dryRun) { + open SSH, "| $compressor ssh $sshHost @{$sshOpts} '$decompressor nix-store --import'" or die; + exportPaths(fileno(SSH), $sign, @missing); + close SSH or die "copying store paths to remote machine `$sshHost' failed: $?"; + } + } +} + + +1; diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm index bef6e7460..d96f8e9ab 100644 --- a/perl/lib/Nix/Store.pm +++ b/perl/lib/Nix/Store.pm @@ -12,7 +12,7 @@ our %EXPORT_TAGS = ( 'all' => [ qw( ) ] ); our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); -our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath); +our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath exportPaths); our $VERSION = '0.15'; diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 9e51ea337..b50451f45 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -146,3 +146,16 @@ SV * followLinksToStorePath(char * path) } OUTPUT: RETVAL + + +void exportPaths(int fd, int sign, ...) + PPCODE: + try { + doInit(); + Paths paths; + for (int n = 2; n < items; ++n) paths.push_back(SvPV_nolen(ST(n))); + FdSink sink(fd); + exportPaths(*store, paths, sign, sink); + } catch (Error & e) { + croak(e.what()); + } diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in index 110c95f22..72d09565b 100755 --- a/scripts/build-remote.pl.in +++ b/scripts/build-remote.pl.in @@ -5,6 +5,7 @@ use English '-no_match_vars'; use IO::Handle; use Nix::Config; use Nix::SSH qw/sshOpts openSSHConnection/; +use Nix::CopyClosure; no warnings('once'); @@ -225,8 +226,7 @@ sub removeRoots { # Copy the derivation and its dependencies to the build machine. -system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0 - or die "cannot copy inputs to $hostName: $?"; +Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne ""); # Perform the build. diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in index 172acd9e7..2eac56e3f 100755 --- a/scripts/nix-copy-closure.in +++ b/scripts/nix-copy-closure.in @@ -3,6 +3,7 @@ use Nix::SSH; use Nix::Config; use Nix::Store; +use Nix::CopyClosure; if (scalar @ARGV < 1) { @@ -39,8 +40,8 @@ while (@ARGV) { $sign = 1; } elsif ($arg eq "--gzip") { - $compressor = "| gzip"; - $decompressor = "gunzip |"; + $compressor = "gzip"; + $decompressor = "gunzip"; } elsif ($arg eq "--from") { $toMode = 0; @@ -67,30 +68,7 @@ openSSHConnection $sshHost or die "$0: unable to start SSH\n"; if ($toMode) { # Copy TO the remote machine. - - # Get the closure of this path. - my @allStorePaths = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs, map { followLinksToStorePath $_ } @storePaths))); - - # Ask the remote host which paths are invalid. - open(READ, "set -f; ssh $sshHost @sshOpts nix-store --check-validity --print-invalid @allStorePaths|"); - my @missing = (); - while () { - chomp; - push @missing, $_; - } - close READ or die; - - # Export the store paths and import them on the remote machine. - if (scalar @missing > 0) { - print STDERR "copying these missing paths:\n"; - print STDERR " $_\n" foreach @missing; - unless ($dryRun) { - my $extraOpts = $sign ? "--sign" : ""; - system("set -f; nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0 - or die "copying store paths to remote machine `$sshHost' failed: $?"; - } - } - + Nix::CopyClosure::copyTo($sshHost, [ @sshOpts ], [ @storePaths ], $compressor, $decompressor, $includeOutputs, $dryRun, $sign); } else { # Copy FROM the remote machine. @@ -112,8 +90,10 @@ else { # Copy FROM the remote machine. # Export the store paths on the remote machine and import them on locally. if (scalar @missing > 0) { - print STDERR "copying these missing paths:\n"; - print STDERR " $_\n" foreach @missing; + print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n"; + #print STDERR " $_\n" foreach @missing; + $compressor = "| $compressor" if $compressor ne ""; + $decompressor = "$decompressor |" if $decompressor ne ""; unless ($dryRun) { my $extraOpts = $sign ? "--sign" : ""; system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $Nix::Config::binDir/nix-store --import") == 0 diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 06cadcb0f..3c1f2ecac 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1255,7 +1255,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) bool haveSignature = readInt(hashAndReadSource) == 1; if (requireSignature && !haveSignature) - throw Error("imported archive lacks a signature"); + throw Error(format("imported archive of `%1%' lacks a signature") % dstPath); if (haveSignature) { string signature = readString(hashAndReadSource); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index d67ff2c77..36ade2170 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -298,6 +298,17 @@ string showPaths(const PathSet & paths) } +void exportPaths(StoreAPI & store, const Paths & paths, + bool sign, Sink & sink) +{ + foreach (Paths::const_iterator, i, paths) { + writeInt(1, sink); + store.exportPath(*i, sign, sink); + } + writeInt(0, sink); +} + + } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index b3e67436c..8bfb09880 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -342,6 +342,12 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven = false); +/* Export multiple paths in the format expected by ‘nix-store + --import’. */ +void exportPaths(StoreAPI & store, const Paths & paths, + bool sign, Sink & sink); + + } diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 371ca54af..84d3da032 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -594,11 +594,7 @@ static void opExport(Strings opFlags, Strings opArgs) else throw UsageError(format("unknown flag `%1%'") % *i); FdSink sink(STDOUT_FILENO); - for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i) { - writeInt(1, sink); - store->exportPath(*i, sign, sink); - } - writeInt(0, sink); + exportPaths(*store, opArgs, sign, sink); } From d5ac78e0d6f21dc60bd4f4f3990a18dc2bc12c4c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 23 Nov 2011 15:29:58 +0000 Subject: [PATCH 17/46] * Add bzip2 and xz support to nix-copy-closure. --- doc/manual/nix-copy-closure.xml | 11 ++++++++--- scripts/nix-copy-closure.in | 10 +++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/doc/manual/nix-copy-closure.xml b/doc/manual/nix-copy-closure.xml index 584e713f1..3f2f12320 100644 --- a/doc/manual/nix-copy-closure.xml +++ b/doc/manual/nix-copy-closure.xml @@ -24,6 +24,8 @@ + + user@machine @@ -96,10 +98,13 @@ those paths. If this bothers you, use - + / / - Compress the dump of each path with - gzip before sending it. + Compress the dump of each path with respectively + gzip, bzip2 or + xz before sending it. The corresponding + decompression program must be installed on the target + machine. diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in index 2eac56e3f..9191de59e 100755 --- a/scripts/nix-copy-closure.in +++ b/scripts/nix-copy-closure.in @@ -8,7 +8,7 @@ use Nix::CopyClosure; if (scalar @ARGV < 1) { print STDERR < Date: Wed, 23 Nov 2011 15:39:02 +0000 Subject: [PATCH 18/46] * Document the --include-outputs option. --- doc/manual/nix-copy-closure.xml | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/doc/manual/nix-copy-closure.xml b/doc/manual/nix-copy-closure.xml index 3f2f12320..29383d124 100644 --- a/doc/manual/nix-copy-closure.xml +++ b/doc/manual/nix-copy-closure.xml @@ -26,6 +26,7 @@ + user@machine @@ -86,15 +87,16 @@ those paths. If this bothers you, use Let the sending machine cryptographically sign the dump of each path with the key in - /nix/etc/nix/signing-key.sec. If the user on - the target machine does not have direct access to the Nix store - (i.e., if the target machine has a multi-user Nix installation), - then the target machine will check the dump against - /nix/etc/nix/signing-key.pub before unpacking - it in its Nix store. This allows secure sharing of store paths - between untrusted users on two machines, provided that there is a - trust relation between the Nix installations on both machines - (namely, they have matching public/secret keys). + sysconfdir/nix/signing-key.sec. + If the user on the target machine does not have direct access to + the Nix store (i.e., if the target machine has a multi-user Nix + installation), then the target machine will check the dump against + sysconfdir/nix/signing-key.pub + before unpacking it in its Nix store. This allows secure sharing + of store paths between untrusted users on two machines, provided + that there is a trust relation between the Nix installations on + both machines (namely, they have matching public/secret + keys). @@ -108,6 +110,13 @@ those paths. If this bothers you, use + + + Also copy the outputs of store derivations included + in the closure. + + + From f3bc98b0015fe333f7b1ef342143c23d0d6a2aa5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 23 Nov 2011 15:39:54 +0000 Subject: [PATCH 19/46] --- doc/manual/nix-copy-closure.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/nix-copy-closure.xml b/doc/manual/nix-copy-closure.xml index 29383d124..45cfc0f34 100644 --- a/doc/manual/nix-copy-closure.xml +++ b/doc/manual/nix-copy-closure.xml @@ -28,7 +28,7 @@ - user@machine + user@machine paths From 784083176a6c2bbda84095de97cb59126fe7c7a5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 25 Nov 2011 17:04:26 +0000 Subject: [PATCH 20/46] * Fix race. --- tests/remote-builds.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix index 201ca9dba..de127b8ae 100644 --- a/tests/remote-builds.nix +++ b/tests/remote-builds.nix @@ -72,6 +72,7 @@ in $client->succeed("chmod 600 /root/.ssh/id_dsa"); # Install the SSH key on the slaves. + $client->waitForJob("network-interfaces"); foreach my $slave ($slave1, $slave2) { $slave->succeed("mkdir -m 700 /root/.ssh"); $slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys"); From 1df120cb05121124379676d805b8f1fb090d7e22 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 10:51:22 +0000 Subject: [PATCH 21/46] * Get rid of the shell in ssh calls. --- scripts/build-remote.pl.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in index 72d09565b..c805d6740 100755 --- a/scripts/build-remote.pl.in +++ b/scripts/build-remote.pl.in @@ -240,7 +240,7 @@ my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsD # in which case every child receives SIGHUP; however, `-tt' doesn't # work on some platforms when connection sharing is used.) pipe STDIN, DUMMY; # make sure we have a readable STDIN -if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) { +if (system("exec ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) { # Note that if we get exit code 100 from `nix-store -r', it # denotes a permanent build failure (as opposed to an SSH problem # or a temporary Nix problem). We propagate this to the caller to @@ -260,7 +260,7 @@ foreach my $output (@outputs) { my $maybeSignRemote = ""; $maybeSignRemote = "--sign" if $UID != 0; - system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" . + system("exec ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" . "| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0 or die "cannot copy $output from $hostName: $?"; } From 216440b3ff1431beca7784c7ae76cb5e75446953 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 12:32:55 +0000 Subject: [PATCH 22/46] * For consistency with "nix-store -q --hash", produce hashes in base-32. (This affects Hydra manifests.) --- perl/lib/Nix/Store.xs | 2 +- scripts/nix-push.in | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index b50451f45..aac7761cb 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -67,7 +67,7 @@ SV * queryPathHash(char * path) try { doInit(); Hash hash = store->queryPathHash(path); - string s = "sha256:" + printHash(hash); + string s = "sha256:" + printHash32(hash); XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); } catch (Error & e) { croak(e.what()); diff --git a/scripts/nix-push.in b/scripts/nix-push.in index dcdad5721..cf46d00df 100755 --- a/scripts/nix-push.in +++ b/scripts/nix-push.in @@ -198,8 +198,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) { # In some exceptional cases (such as VM tests that use the Nix # store of the host), the database doesn't contain the hash. So # compute it. - if ($narHash eq "sha256:0000000000000000000000000000000000000000000000000000") { - $narHash = `$binDir/nix-hash --type sha256 '$storePath'`; + if ($narHash =~ /^sha256:0*$/) { + $narHash = `$binDir/nix-hash --type sha256 --base32 '$storePath'`; die "cannot hash `$storePath'" if $? != 0; chomp $narHash; $narHash = "sha256:$narHash"; From b1eb8f4249dbf666afa046c45e903566e9eb2df9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 13:00:41 +0000 Subject: [PATCH 23/46] * Get rid of some superfluous error messages if a substituter fails. * Say "fetch" instead of "substitute". --- src/libmain/shared.cc | 2 +- src/libstore/build.cc | 21 ++++++--------------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index f7c11ed05..9076e9994 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -65,7 +65,7 @@ void printMissing(StoreAPI & store, const PathSet & paths) } if (!willSubstitute.empty()) { - printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):") + printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):") % (downloadSize / (1024.0 * 1024.0)) % (narSize / (1024.0 * 1024.0))); foreach (PathSet::iterator, i, willSubstitute) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 171c08913..a8ef9b23e 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2202,9 +2202,7 @@ void SubstitutionGoal::tryNext() if (subs.size() == 0) { /* None left. Terminate this goal and let someone else deal with it. */ - printMsg(lvlError, - format("path `%1%' is required, but there is no substituter that can build it") - % storePath); + debug(format("path `%1%' is required, but there is no substituter that can build it") % storePath); amDone(ecFailed); return; } @@ -2235,8 +2233,7 @@ void SubstitutionGoal::referencesValid() trace("all references realised"); if (nrFailed > 0) { - printMsg(lvlError, - format("some references of path `%1%' could not be realised") % storePath); + debug(format("some references of path `%1%' could not be realised") % storePath); amDone(ecFailed); return; } @@ -2289,9 +2286,7 @@ void SubstitutionGoal::tryToRun() return; } - printMsg(lvlInfo, - format("substituting path `%1%' using substituter `%2%'") - % storePath % sub); + printMsg(lvlInfo, format("fetching path `%1%'...") % storePath); logPipe.create(); @@ -2367,19 +2362,15 @@ void SubstitutionGoal::finished() try { if (!statusOk(status)) - throw SubstError(format("builder for `%1%' %2%") + throw SubstError(format("fetching path `%1%' %2%") % storePath % statusToString(status)); if (!pathExists(storePath)) - throw SubstError( - format("substitute did not produce path `%1%'") - % storePath); + throw SubstError(format("substitute did not produce path `%1%'") % storePath); } catch (SubstError & e) { - printMsg(lvlInfo, - format("substitution of path `%1%' using substituter `%2%' failed: %3%") - % storePath % sub % e.msg()); + printMsg(lvlInfo, e.msg()); if (printBuildTrace) { printMsg(lvlError, format("@ substituter-failed %1% %2% %3%") From 1749a7b0ae943f6a208ffc3fd0f6e9506872c5b6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 13:01:24 +0000 Subject: [PATCH 24/46] * download-using-manifests: use the Perl bindings. --- perl/lib/Nix/Store.pm | 6 ++- perl/lib/Nix/Store.xs | 11 +++++ scripts/download-using-manifests.pl.in | 66 +++++++++++--------------- 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm index d96f8e9ab..d5fc6eec5 100644 --- a/perl/lib/Nix/Store.pm +++ b/perl/lib/Nix/Store.pm @@ -12,7 +12,11 @@ our %EXPORT_TAGS = ( 'all' => [ qw( ) ] ); our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); -our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath exportPaths); +our @EXPORT = qw( + isValidPath queryReferences queryPathInfo queryDeriver queryPathHash + topoSortPaths computeFSClosure followLinksToStorePath exportPaths + hashPath +); our $VERSION = '0.15'; diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index aac7761cb..5256d1372 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -159,3 +159,14 @@ void exportPaths(int fd, int sign, ...) } catch (Error & e) { croak(e.what()); } + + +SV * hashPath(char * algo, int base32, char * path) + PPCODE: + try { + Hash h = hashPath(parseHashType(algo), path).first; + string s = base32 ? printHash32(h) : printHash(h); + XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); + } catch (Error & e) { + croak(e.what()); + } diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in index a827a995f..ef663dabb 100755 --- a/scripts/download-using-manifests.pl.in +++ b/scripts/download-using-manifests.pl.in @@ -3,6 +3,7 @@ use strict; use Nix::Config; use Nix::Manifest; +use Nix::Store; use POSIX qw(strftime); use File::Temp qw(tempdir); @@ -19,14 +20,8 @@ my $fast = 1; my $dbh = updateManifestDB(); -sub isValidPath { - my $p = shift; - if ($fast) { - return -e $p; - } else { - return system("$Nix::Config::binDir/nix-store --check-validity '$p' 2> /dev/null") == 0; - } -} +# $hashCache->{$algo}->{$path} yields the $algo-hash of $path. +my $hashCache; sub parseHash { @@ -101,15 +96,17 @@ sub computeSmallestDownload { foreach my $patch (@{$patchList}) { if (isValidPath($patch->{basePath})) { - # !!! this should be cached my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash}; - my $format = "--base32"; - $format = "" if $baseHashAlgo eq "md5"; - my $hash = $fast && $baseHashAlgo eq "sha256" - ? `$Nix::Config::binDir/nix-store -q --hash "$patch->{basePath}"` - : `$Nix::Config::binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`; - chomp $hash; - $hash =~ s/.*://; + + my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}}; + if (!defined $hash) { + $hash = $fast && $baseHashAlgo eq "sha256" + ? queryPathHash($patch->{basePath}) + : hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath}); + $hash =~ s/.*://; + $hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash; + } + next if $hash ne $baseHash; } push @queue, $patch->{basePath}; @@ -257,7 +254,7 @@ open LOGFILE, ">>$logFile" or die "cannot open log file $logFile"; my $date = strftime ("%F %H:%M:%S UTC", gmtime (time)); print LOGFILE "$$ get $targetPath $date\n"; -print "\n*** Trying to download/patch `$targetPath'\n"; +print STDERR "\n*** Trying to download/patch `$targetPath'\n"; # Compute the shortest path. @@ -281,7 +278,7 @@ sub downloadFile { $ENV{"PRINT_PATH"} = 1; $ENV{"QUIET"} = 1; my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`; - die "download of `$url' failed" . ($! ? ": $!" : "") unless $? == 0; + die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0; chomp $path; return $path; } @@ -293,17 +290,17 @@ while (scalar @path > 0) { my $u = $edge->{start}; my $v = $edge->{end}; - print "\n*** Step $curStep/$maxStep: "; + print STDERR "\n*** Step $curStep/$maxStep: "; if ($edge->{type} eq "present") { - print "using already present path `$v'\n"; + print STDERR "using already present path `$v'\n"; print LOGFILE "$$ present $v\n"; if ($curStep < $maxStep) { # Since this is not the last step, the path will be used # as a base to one or more patches. So turn the base path # into a NAR archive, to which we can apply the patch. - print " packing base path...\n"; + print STDERR " packing base path...\n"; system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0 or die "cannot dump `$v'"; } @@ -311,17 +308,17 @@ while (scalar @path > 0) { elsif ($edge->{type} eq "patch") { my $patch = $edge->{info}; - print "applying patch `$patch->{url}' to `$u' to create `$v'\n"; + print STDERR "applying patch `$patch->{url}' to `$u' to create `$v'\n"; print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n"; # Download the patch. - print " downloading patch...\n"; + print STDERR " downloading patch...\n"; my $patchPath = downloadFile "$patch->{url}"; # Apply the patch to the NAR archive produced in step 1 (for # the already present path) or a later step (for patch sequences). - print " applying patch...\n"; + print STDERR " applying patch...\n"; system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0 or die "cannot apply patch `$patchPath' to $tmpNar"; @@ -331,7 +328,7 @@ while (scalar @path > 0) { } else { # This was the last patch. Unpack the final NAR archive # into the target path. - print " unpacking patched archive...\n"; + print STDERR " unpacking patched archive...\n"; system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0 or die "cannot unpack $tmpNar2 into `$v'"; } @@ -341,13 +338,13 @@ while (scalar @path > 0) { elsif ($edge->{type} eq "narfile") { my $narFile = $edge->{info}; - print "downloading `$narFile->{url}' into `$v'\n"; + print STDERR "downloading `$narFile->{url}' into `$v'\n"; my $size = $narFile->{size} || -1; print LOGFILE "$$ narfile $narFile->{url} $size $v\n"; # Download the archive. - print " downloading archive...\n"; + print STDERR " downloading archive...\n"; my $narFilePath = downloadFile "$narFile->{url}"; if ($curStep < $maxStep) { @@ -356,7 +353,7 @@ while (scalar @path > 0) { or die "cannot unpack `$narFilePath' into `$v'"; } else { # Unpack the archive into the target path. - print " unpacking archive...\n"; + print STDERR " unpacking archive...\n"; system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0 or die "cannot unpack `$narFilePath' into `$v'"; } @@ -376,20 +373,15 @@ if (defined $finalNarHash) { # The hash in the manifest can be either in base-16 or base-32. # Handle both. - my $extraFlag = - ($hashAlgo eq "sha256" && length($hash) != 64) - ? "--base32" : ""; + my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath); - my $hash2 = `$Nix::Config::binDir/nix-hash --type $hashAlgo $extraFlag $targetPath` - or die "cannot compute hash of path `$targetPath'"; - chomp $hash2; - - die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2" + die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n" if $hash ne $hash2; } else { - die "cannot check integrity of the downloaded path since its hash is not known"; + die "cannot check integrity of the downloaded path since its hash is not known\n"; } +print STDERR "\n"; print LOGFILE "$$ success\n"; close LOGFILE; From 4d0407ba087f8bc3a21865309291996084fd80eb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 13:38:52 +0000 Subject: [PATCH 25/46] * Fix make check. --- perl/Makefile.am | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/perl/Makefile.am b/perl/Makefile.am index 93f5415c8..ccb08b447 100644 --- a/perl/Makefile.am +++ b/perl/Makefile.am @@ -4,8 +4,10 @@ perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname) PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in +# Hack required by "make check". all: $(PERL_MODULES:.in=) - ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/Store.so + mkdir -p lib/auto/Nix/Store + ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/auto/Nix/Store/Store.so install-exec-local: $(PERL_MODULES:.in=) $(INSTALL) -d $(DESTDIR)$(perllibdir)/Nix From f35c4351e5f99f1960a6ca7a3fd6dae76dcca163 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 29 Nov 2011 22:15:07 +0000 Subject: [PATCH 26/46] * Don't require a specific Perl version. --- perl/lib/Nix/Store.pm | 1 - 1 file changed, 1 deletion(-) diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm index d5fc6eec5..126831105 100644 --- a/perl/lib/Nix/Store.pm +++ b/perl/lib/Nix/Store.pm @@ -1,6 +1,5 @@ package Nix::Store; -use 5.010001; use strict; use warnings; From 3c7ec8fc1b1fe3c3bf48b957c2aafa6c3ea6d846 Mon Sep 17 00:00:00 2001 From: Peter Simons Date: Thu, 1 Dec 2011 08:02:37 +0000 Subject: [PATCH 27/46] doc/manual/nix-env.xml: stripped trailing whitespace --- doc/manual/nix-env.xml | 200 ++++++++++++++++++++--------------------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/doc/manual/nix-env.xml b/doc/manual/nix-env.xml index 403ab2678..a6b596408 100644 --- a/doc/manual/nix-env.xml +++ b/doc/manual/nix-env.xml @@ -2,7 +2,7 @@ xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xi="http://www.w3.org/2001/XInclude" xml:id="sec-nix-env"> - + nix-env 1 @@ -60,7 +60,7 @@ environments. nix-env takes exactly one operation flag which indicates the subcommand to be performed. These are documented below. - + @@ -78,19 +78,19 @@ linkend="sec-common-options" />. - + Specifies the Nix expression (designated below as the active Nix expression) used by the , , and operations to obtain derivations. The default is ~/.nix-defexpr. - + - + - + Specifies the profile to be used by those operations that operate on a profile (designated below as the active profile). A profile is sequence of @@ -98,11 +98,11 @@ linkend="sec-common-options" />. which is the current generation. The default profile is the target of the symbolic link ~/.nix-profile (see below). - + - + - + For the , , , and @@ -117,23 +117,23 @@ linkend="sec-common-options" />. substitute is available). - + system - + By default, operations such as only include derivations matching the current platform. This option allows you to use derivations for the specified platform system. The special value * causes derivations for any platform to be included. - + - + @@ -170,25 +170,25 @@ linkend="sec-common-options" />. this directory. - + ~/.nix-profile - + A symbolic link to the user's current profile. By default, this symlink points to prefix/var/nix/profiles/default. The PATH environment variable should include ~/.nix-profile/bin for the user environment to be visible to the user. - + - + - + @@ -214,7 +214,7 @@ linkend="sec-common-options" />. Description - + The install operation creates a new user environment, based on the current generation of the active profile, to which a set of store paths described by args is added. The @@ -306,20 +306,20 @@ number of possible ways: - + Do not remove derivations with a name matching one of the derivations being installed. Usually, trying to have two versions of the same package installed in the same generation of a profile will lead to an error in building the generation, due to file name clashes between the two versions. However, this is not the case for all packages. - + - + Examples @@ -327,7 +327,7 @@ number of possible ways: active Nix expression: -$ nix-env --install gcc-3.3.2 +$ nix-env --install gcc-3.3.2 installing `gcc-3.3.2' uninstalling `gcc-3.1' @@ -408,15 +408,15 @@ the following paths will be substituted: /nix/store/8zbipvm4gp9jfqh9nnk1n3bary1a37gs-perl-XML-Parser-2.34 /nix/store/b8a2bg7gnyvvvjjibp4axg9x1hzkw36c-mono-1.1.4 ... - + - + - + Operation <option>--upgrade</option> @@ -442,7 +442,7 @@ the following paths will be substituted: Description - + The upgrade operation creates a new user environment, based on the current generation of the active profile, in which all store paths are replaced for which there are newer versions in the set of paths @@ -459,47 +459,47 @@ the same symbolic name, only the one with the highest version is installed. - + Flags - + Only upgrade a derivation to newer versions. This is the default. - + - + In addition to upgrading to newer versions, also “upgrade” to derivations that have the same version. Version are not a unique identification of a derivation, so there may be many derivations that have the same version. This flag may be useful to force “synchronisation” between the installed and available derivations. - + - + Only “upgrade” to derivations that have the same version. This may not seem very useful, but it actually is, e.g., when there is a new release of Nixpkgs and you want to replace installed applications with the same versions built against newer dependencies (to reduce the number of dependencies floating around on your system). - + - + In addition to upgrading to newer versions, also “upgrade” to derivations that have the same or a lower version. I.e., derivations may actually be downgraded depending on what is available in the active Nix expression. - + @@ -523,10 +523,10 @@ $ nix-env --upgrade pan $ nix-env -u '*' (try to upgrade everything) upgrading `hello-2.1.2' to `hello-2.1.3' -upgrading `mozilla-1.2' to `mozilla-1.4' +upgrading `mozilla-1.2' to `mozilla-1.4' - + Versions The upgrade operation determines whether a derivation @@ -570,14 +570,14 @@ lexicographically (i.e., using case-sensitive string comparison). 2.3a < 2.3c 2.3pre1 < 2.3c 2.3pre1 < 2.3q - + - + - + @@ -596,14 +596,14 @@ lexicographically (i.e., using case-sensitive string comparison). Description - + The uninstall operation creates a new user environment, based on the current generation of the active profile, from which the store paths designated by the symbolic names names are removed. - + Examples @@ -611,11 +611,11 @@ $ nix-env --uninstall gcc $ nix-env -e '*' (remove everything) - + - + Operation <option>--set-flag</option> @@ -632,7 +632,7 @@ $ nix-env -e '*' (remove everything) Description - + The operation allows meta attributes of installed packages to be modified. There are several attributes that can be usefully modified, because they affect the behaviour of @@ -670,7 +670,7 @@ script: - + Examples To prevent the currently installed Firefox from being upgraded: @@ -716,13 +716,13 @@ $ nix-env --set-flag priority 10 gcc - + - + - + Operation <option>--query</option> Synopsis @@ -738,9 +738,9 @@ $ nix-env --set-flag priority 10 gcc - + - + @@ -785,7 +785,7 @@ $ nix-env --set-flag priority 10 gcc - + names @@ -793,7 +793,7 @@ $ nix-env --set-flag priority 10 gcc Description - + The query operation displays information about either the store paths that are installed in the current generation of the active profile (), or the derivations that are @@ -817,23 +817,23 @@ operates. - + The query operates on the store paths that are installed in the current generation of the active profile. This is the default. - + - + The query operates on the derivations that are available in the active Nix expression. - + - + @@ -874,7 +874,7 @@ user environment elements, etc. --> - + Print the status of the derivation. The status consists of three characters. The first is I or -, indicating @@ -888,7 +888,7 @@ user environment elements, etc. --> third is S or -, indicating whether a substitute is available for the derivation. - + @@ -901,17 +901,17 @@ user environment elements, etc. --> nix-env --install. - + - + Suppress printing of the name attribute of each derivation. - + / - + Compare installed versions to available versions, or vice versa (if is given). This is useful for quickly seeing whether upgrades for installed @@ -926,21 +926,21 @@ user environment elements, etc. --> or installed. - + = version At most the same version of the package is available or installed. - + > version Only older versions of the package are available or installed. - + - ? No version of the package is available or @@ -951,45 +951,45 @@ user environment elements, etc. --> - + - + Print the system attribute of the derivation. - + - + Print the path of the store derivation. - + - + Print the output path of the derivation. - + - + Print a short (one-line) description of the derivation, if available. The description is taken from the meta.description attribute of the derivation. - + - + Print all of the meta-attributes of the derivation. This option is only available with . - + @@ -1023,7 +1023,7 @@ IP- ORBit2-2.8.3 (installed and by definition present)(show available derivations in the Nix expression foo.nix) -$ nix-env -f ./foo.nix -qa '*' +$ nix-env -f ./foo.nix -qa '*' foo-1.2.3 $ nix-env -qc '*' (compare installed versions to what’s available) @@ -1034,7 +1034,7 @@ firefox-1.0.4 < 1.0.7 (a more recent version is availab ... (show info about a specific package, in XML) -$ nix-env -qa --xml --description firefox +$ nix-env -qa --xml --description firefox - - + + @@ -1067,25 +1067,25 @@ $ nix-env -qa --xml --description firefox Description - + This operation makes path the current profile for the user. That is, the symlink ~/.nix-profile is made to point to path. - + Examples $ nix-env -S ~/my-profile - + - + Operation <option>--list-generations</option> @@ -1101,7 +1101,7 @@ $ nix-env -S ~/my-profile Description - + This operation print a list of all the currently existing generations for the active profile. These may be switched to using the operation. It also prints @@ -1121,11 +1121,11 @@ $ nix-env --list-generations 98 2004-02-06 16:24:33 (current) - + - + Operation <option>--delete-generations</option> @@ -1142,7 +1142,7 @@ $ nix-env --list-generations Description - + This operation deletes the specified generations of the current profile. The generations can be a list of generation numbers, or the special value old to delete all non-current @@ -1150,7 +1150,7 @@ generations. Periodically deleting old generations is important to make garbage collection effective. - + Examples @@ -1159,11 +1159,11 @@ $ nix-env --delete-generations 3 4 8 $ nix-env -p other_profile --delete-generations old - + - + Operation <option>--switch-generation</option> @@ -1183,7 +1183,7 @@ $ nix-env -p other_profile --delete-generations old Description - + This operation makes generation number generation the current generation of the active profile. That is, if the @@ -1207,11 +1207,11 @@ $ nix-env -G 42 switching from generation 50 to 42 - + - + Operation <option>--rollback</option> @@ -1226,7 +1226,7 @@ switching from generation 50 to 42 Description - + This operation switches to the “previous” generation of the active profile, that is, the highest numbered generation lower than the current generation, if it exists. It is just a convenience @@ -1246,9 +1246,9 @@ $ nix-env --rolback error: no generation older than the current (91) exists - + - + From be9be4c1476a46e9d0996d89613ce44d9aaa6da4 Mon Sep 17 00:00:00 2001 From: Peter Simons Date: Thu, 1 Dec 2011 08:03:30 +0000 Subject: [PATCH 28/46] doc/manual/nix-env.xml: fixed "nix-env -qaA" typo --- doc/manual/nix-env.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manual/nix-env.xml b/doc/manual/nix-env.xml index a6b596408..a03afaf5c 100644 --- a/doc/manual/nix-env.xml +++ b/doc/manual/nix-env.xml @@ -256,7 +256,7 @@ number of possible ways: attribute paths that select attributes from the top-level Nix expression. This is faster than using derivation names and unambiguous. To find out the attribute paths of available - packages, use nix-env -qaA '*'. + packages, use nix-env -qaP '*'. If path is given, From 24f863d86b0316c736fe9e89998cd442b8a400dd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 1 Dec 2011 13:48:48 +0000 Subject: [PATCH 29/46] * When doing "nix-store --add-fixed" without "--recursive" via the Nix daemon (which is an error), print a nicer error message than "Connection reset by peer" or "broken pipe". * In the daemon, log errors that occur during request parameter processing. --- src/libstore/remote-store.cc | 2 +- src/libstore/store-api.hh | 4 +--- src/nix-worker/nix-worker.cc | 16 ++++++++++++++-- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 568a6aa58..0c6a1c37d 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -328,7 +328,7 @@ Path RemoteStore::addToStore(const Path & _srcPath, openConnection(); Path srcPath(absPath(_srcPath)); - + writeInt(wopAddToStore, to); writeString(baseNameOf(srcPath), to); /* backwards compatibility hack */ diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 8bfb09880..e3a2c0daa 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -151,9 +151,7 @@ public: /* Copy the contents of a path to the store and register the validity the resulting path. The resulting path is returned. - If `fixed' is true, then the output of a fixed-output - derivation is pre-loaded into the Nix store. The function - object `filter' can be used to exclude files (see + The function object `filter' can be used to exclude files (see libutil/archive.hh). */ virtual Path addToStore(const Path & srcPath, bool recursive = true, HashType hashAlgo = htSHA256, diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 0fa1b40ae..d74b82df4 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -241,11 +241,14 @@ struct TunnelSource : Source the contents of the file to `s'. Otherwise barf. */ struct RetrieveRegularNARSink : ParseSink { + bool regular; string s; + RetrieveRegularNARSink() : regular(true) { } + void createDirectory(const Path & path) { - throw Error("regular file expected"); + regular = false; } void receiveContents(unsigned char * data, unsigned int len) @@ -255,7 +258,7 @@ struct RetrieveRegularNARSink : ParseSink void createSymlink(const Path & path, const string & target) { - throw Error("regular file expected"); + regular = false; } }; @@ -363,6 +366,7 @@ static void performOp(unsigned int clientVersion, parseDump(sink, savedNAR); } else { parseDump(savedRegular, from); + if (!savedRegular.regular) throw Error("regular file expected"); } startWork(); @@ -638,7 +642,15 @@ static void processConnection() try { performOp(clientVersion, from, to, op); } catch (Error & e) { + /* If we're not in a state were we can send replies, then + something went wrong processing the input of the + client. This can happen especially if I/O errors occur + during addTextToStore() / importPath(). If that + happens, just send the error message and exit. */ + bool errorAllowed = canSendStderr; + if (!errorAllowed) printMsg(lvlError, format("error processing client input: %1%") % e.msg()); stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0); + if (!errorAllowed) break; } assert(!canSendStderr); From 23c38a04ccba4469b9aa98167532c236beeee0a0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 1 Dec 2011 13:51:05 +0000 Subject: [PATCH 30/46] * Slight improvement. --- src/nix-worker/nix-worker.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index d74b82df4..8950f73ef 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -364,12 +364,11 @@ static void performOp(unsigned int clientVersion, addToStoreFromDump(). */ ParseSink sink; /* null sink; just parse the NAR */ parseDump(sink, savedNAR); - } else { + } else parseDump(savedRegular, from); - if (!savedRegular.regular) throw Error("regular file expected"); - } startWork(); + if (!savedRegular.regular) throw Error("regular file expected"); Path path = dynamic_cast(store.get()) ->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo); stopWork(); From b12b21825c949ef9b9327c6a0c9e2d5601aaf0b2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 1 Dec 2011 16:41:43 +0000 Subject: [PATCH 31/46] * Allow '' syntax to be used in nix-instantiate, nix-build and nix-env, e.g., $ nix-env -f '' -i patchelf or $ nix-build '' -A login.test --- src/libexpr/common-opts.cc | 11 +++++++++++ src/libexpr/common-opts.hh | 2 ++ src/nix-env/nix-env.cc | 2 +- src/nix-instantiate/nix-instantiate.cc | 3 +-- tests/user-envs.sh | 2 +- 5 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/libexpr/common-opts.cc b/src/libexpr/common-opts.cc index d029d2ec3..e0865d9fc 100644 --- a/src/libexpr/common-opts.cc +++ b/src/libexpr/common-opts.cc @@ -44,4 +44,15 @@ bool parseSearchPathArg(const string & arg, Strings::iterator & i, } +Path lookupFileArg(EvalState & state, string s) +{ + if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { + Path p = state.findFile(s.substr(1, s.size() - 2)); + if (p == "") throw Error(format("file `%1%' was not found in the Nix search path (add it using $NIX_PATH or -I)") % p); + return p; + } else + return absPath(s); +} + + } diff --git a/src/libexpr/common-opts.hh b/src/libexpr/common-opts.hh index 6b7247fc3..c28641e90 100644 --- a/src/libexpr/common-opts.hh +++ b/src/libexpr/common-opts.hh @@ -14,6 +14,8 @@ bool parseOptionArg(const string & arg, Strings::iterator & i, bool parseSearchPathArg(const string & arg, Strings::iterator & i, const Strings::iterator & argsEnd, EvalState & state); +Path lookupFileArg(EvalState & state, string s); + } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 3dfecb2d7..a8d9076cf 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1270,7 +1270,7 @@ void run(Strings args) else if (arg == "--profile" || arg == "-p") globals.profile = absPath(needArg(i, args, arg)); else if (arg == "--file" || arg == "-f") - globals.instSource.nixExprPath = absPath(needArg(i, args, arg)); + globals.instSource.nixExprPath = lookupFileArg(globals.state, needArg(i, args, arg)); else if (arg == "--switch-profile" || arg == "-S") op = opSwitchProfile; else if (arg == "--switch-generation" || arg == "-G") diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 98eadbd69..93aa50943 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -138,8 +138,7 @@ void run(Strings args) } foreach (Strings::iterator, i, files) { - Path path = absPath(*i); - Expr * e = state.parseExprFromFile(path); + Expr * e = state.parseExprFromFile(lookupFileArg(state, *i)); processExpr(state, attrPaths, parseOnly, strict, autoArgs, evalOnly, xmlOutput, xmlOutputSourceLocation, e); } diff --git a/tests/user-envs.sh b/tests/user-envs.sh index 025a5ff81..5037e28b9 100644 --- a/tests/user-envs.sh +++ b/tests/user-envs.sh @@ -36,7 +36,7 @@ nix-env -p $profiles/test -q '*' | grep -q foo-2.0pre1 test "$($profiles/test/bin/foo)" = "foo-2.0pre1" # Upgrade "foo": should install foo-2.0. -nix-env -p $profiles/test -f ./user-envs.nix -u foo +NIX_PATH=nixpkgs=./user-envs.nix nix-env -p $profiles/test -f '' -u foo # Query installed: should contain foo-2.0 now. test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1 From 49f59dceca37636353cf2f5f60135d7705ea154e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 2 Dec 2011 11:47:06 +0000 Subject: [PATCH 32/46] * Move parseHash16or32 into libutil, and use in nix-hash. --- src/libexpr/primops.cc | 12 +----------- src/libutil/hash.cc | 16 ++++++++++++++++ src/libutil/hash.hh | 3 +++ src/nix-hash/nix-hash.cc | 6 +++--- src/nix-store/nix-store.cc | 8 -------- 5 files changed, 23 insertions(+), 22 deletions(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 0e81f7b72..66173cdaf 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -401,17 +401,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v) HashType ht = parseHashType(outputHashAlgo); if (ht == htUnknown) throw EvalError(format("unknown hash algorithm `%1%'") % outputHashAlgo); - Hash h(ht); - if (outputHash.size() == h.hashSize * 2) - /* hexadecimal representation */ - h = parseHash(ht, outputHash); - else if (outputHash.size() == hashLength32(h)) - /* base-32 representation */ - h = parseHash32(ht, outputHash); - else - throw Error(format("hash `%1%' has wrong length for hash type `%2%'") - % outputHash % outputHashAlgo); - string s = outputHash; + Hash h = parseHash16or32(ht, outputHash); outputHash = printHash(h); if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo; diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index b9e784699..533423441 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -204,6 +204,22 @@ Hash parseHash32(HashType ht, const string & s) } +Hash parseHash16or32(HashType ht, const string & s) +{ + Hash hash(ht); + if (s.size() == hash.hashSize * 2) + /* hexadecimal representation */ + hash = parseHash(ht, s); + else if (s.size() == hashLength32(hash)) + /* base-32 representation */ + hash = parseHash32(ht, s); + else + throw Error(format("hash `%1%' has wrong length for hash type `%2%'") + % s % printHashType(ht)); + return hash; +} + + bool isHash(const string & s) { if (s.length() != 32) return false; diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index 13740954d..cbdcf4c8d 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -58,6 +58,9 @@ string printHash32(const Hash & hash); /* Parse a base-32 representation of a hash code. */ Hash parseHash32(HashType ht, const string & s); +/* Parse a base-16 or base-32 representation of a hash code. */ +Hash parseHash16or32(HashType ht, const string & s); + /* Verify that the given string is a valid hash code. */ bool isHash(const string & s); diff --git a/src/nix-hash/nix-hash.cc b/src/nix-hash/nix-hash.cc index 4867234bf..5b35ccd9d 100644 --- a/src/nix-hash/nix-hash.cc +++ b/src/nix-hash/nix-hash.cc @@ -43,7 +43,7 @@ void run(Strings args) } if (op == opHash) { - for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) { + foreach (Strings::iterator, i, ss) { Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first; if (truncate && h.hashSize > 20) h = compressHash(h, 20); std::cout << format("%1%\n") % @@ -52,8 +52,8 @@ void run(Strings args) } else { - for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) { - Hash h = op == opTo16 ? parseHash32(ht, *i) : parseHash(ht, *i); + foreach (Strings::iterator, i, ss) { + Hash h = parseHash16or32(ht, *i); std::cout << format("%1%\n") % (op == opTo16 ? printHash(h) : printHash32(h)); } diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 84d3da032..740033b45 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -133,14 +133,6 @@ static void opAddFixed(Strings opFlags, Strings opArgs) } -static Hash parseHash16or32(HashType ht, const string & s) -{ - return s.size() == Hash(ht).hashSize * 2 - ? parseHash(ht, s) - : parseHash32(ht, s); -} - - /* Hack to support caching in `nix-prefetch-url'. */ static void opPrintFixedPath(Strings opFlags, Strings opArgs) { From 92d6a5ed73e043aebe5029c1ed75449873d327ac Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 2 Dec 2011 12:09:24 +0000 Subject: [PATCH 33/46] * Add some more functions to the Perl bindings. --- perl/lib/Nix/Store.pm | 3 ++- perl/lib/Nix/Store.xs | 46 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm index 126831105..4283e77a4 100644 --- a/perl/lib/Nix/Store.pm +++ b/perl/lib/Nix/Store.pm @@ -14,7 +14,8 @@ our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); our @EXPORT = qw( isValidPath queryReferences queryPathInfo queryDeriver queryPathHash topoSortPaths computeFSClosure followLinksToStorePath exportPaths - hashPath + hashPath hashFile hashString + addToStore makeFixedOutputPath ); our $VERSION = '0.15'; diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 5256d1372..f8a577fce 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -170,3 +170,49 @@ SV * hashPath(char * algo, int base32, char * path) } catch (Error & e) { croak(e.what()); } + + +SV * hashFile(char * algo, int base32, char * path) + PPCODE: + try { + Hash h = hashFile(parseHashType(algo), path); + string s = base32 ? printHash32(h) : printHash(h); + XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); + } catch (Error & e) { + croak(e.what()); + } + + +SV * hashString(char * algo, int base32, char * s) + PPCODE: + try { + Hash h = hashString(parseHashType(algo), s); + string s = base32 ? printHash32(h) : printHash(h); + XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); + } catch (Error & e) { + croak(e.what()); + } + + +SV * addToStore(char * srcPath, int recursive, char * algo) + PPCODE: + try { + doInit(); + Path path = store->addToStore(srcPath, recursive, parseHashType(algo)); + XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0))); + } catch (Error & e) { + croak(e.what()); + } + + +SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name) + PPCODE: + try { + doInit(); + HashType ht = parseHashType(algo); + Path path = makeFixedOutputPath(recursive, ht, + parseHash16or32(ht, hash), name); + XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0))); + } catch (Error & e) { + croak(e.what()); + } From 3964d95abff8af2dc88effcebf6d935805ee6265 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 2 Dec 2011 12:09:50 +0000 Subject: [PATCH 34/46] * nix-prefetch-url: rewritten in Perl. --- scripts/nix-prefetch-url.in | 227 +++++++++++++++--------------------- 1 file changed, 95 insertions(+), 132 deletions(-) diff --git a/scripts/nix-prefetch-url.in b/scripts/nix-prefetch-url.in index 45bad75f3..eea2b814b 100755 --- a/scripts/nix-prefetch-url.in +++ b/scripts/nix-prefetch-url.in @@ -1,165 +1,128 @@ -#! @shell@ -e +#! @perl@ -w @perlFlags@ -url=$1 -expHash=$2 +use strict; +use File::Basename; +use File::Temp qw(tempdir); +use File::stat; +use Nix::Store; +use Nix::Config; -binDir=@bindir@ -if [ -n "$NIX_BIN_DIR" ]; then binDir="$NIX_BIN_DIR"; fi +my $url = shift; +my $expHash = shift; +my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256"; +my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'}; -# needed to make it work on NixOS -export PATH=$PATH:@coreutils@ +if (!defined $url || $url eq "") { + print STDERR <$fn" or die; + print TMP "$s" or die; + close TMP or die; +} -hashFormat= -if test "$hashType" != "md5"; then - hashFormat=--base32 -fi +sub readFile { + local $/ = undef; + my ($fn) = @_; + open TMP, "<$fn" or die; + my $s = ; + close TMP or die; + return $s; +} -if test -z "$url"; then - echo "syntax: nix-prefetch-url URL [EXPECTED-HASH]" >&2 - exit 1 -fi +my $tmpDir = tempdir("nix-prefetch-url.XXXXXX", CLEANUP => 1, TMPDIR => 1) + or die "cannot create a temporary directory"; + +# Hack to support the mirror:// scheme from Nixpkgs. +if ($url =~ /^mirror:\/\//) { + system("$Nix::Config::binDir/nix-build '' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0 + or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n"; + my @expanded = split ' ', readFile("$tmpDir/urls"); + die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0; + print STDERR "$url expands to $expanded[0]\n"; + $url = $expanded[0]; +} # Handle escaped characters in the URI. `+', `=' and `?' are the only # characters that are valid in Nix store path names but have a special # meaning in URIs. -name=$(basename "$url" | @sed@ -e 's/%2b/+/g' -e 's/%3d/=/g' -e 's/%3f/\?/g') -if test -z "$name"; then echo "invalid url"; exit 1; fi +my $name = basename $url; +die "cannot figure out file name for ‘$url’\n" if $name eq ""; +$name =~ s/%2b/+/g; +$name =~ s/%3d/=/g; +$name =~ s/%3f/?/g; +my $finalPath; +my $hash; # If the hash was given, a file with that hash may already be in the # store. -if test -n "$expHash"; then - finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$expHash" "$name") - if ! $bindir/nix-store --check-validity "$finalPath" 2> /dev/null; then - finalPath= - fi - hash=$expHash -fi - - -mkTempDir() { - if test -n "$tmpPath"; then return; fi - local i=0 - while true; do - if test -z "$TMPDIR"; then TMPDIR=/tmp; fi - tmpPath=$TMPDIR/nix-prefetch-url-$$-$i - if mkdir "$tmpPath"; then break; fi - # !!! to bad we can't check for ENOENT in mkdir, so this check - # is slightly racy (it bombs out if somebody just removed - # $tmpPath...). - if ! test -e "$tmpPath"; then exit 1; fi - i=$((i + 1)) - done - trap removeTempDir EXIT SIGINT SIGQUIT +if (defined $expHash) { + $finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name); + if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; } } -removeTempDir() { - if test -n "$tmpPath"; then - rm -rf "$tmpPath" || true - fi -} - - -doDownload() { - @curl@ $cacheFlags --fail --location --max-redirs 20 --disable-epsv \ - --cookie-jar $tmpPath/cookies "$url" -o $tmpFile -} - - -# Hack to support the mirror:// scheme from Nixpkgs. -if test "${url:0:9}" = "mirror://"; then - if test -z "$NIXPKGS_ALL"; then - echo "Resolving mirror:// URLs requires Nixpkgs. Please point \$NIXPKGS_ALL at a Nixpkgs tree." >&2 - exit 1 - fi - - mkTempDir - nix-build "$NIXPKGS_ALL" -A resolveMirrorURLs --argstr url "$url" -o $tmpPath/urls > /dev/null - - expanded=($(cat $tmpPath/urls)) - if test "${#expanded[*]}" = 0; then - echo "$0: cannot resolve $url." >&2 - exit 1 - fi - - echo "$url expands to ${expanded[*]} (using ${expanded[0]})" >&2 - url="${expanded[0]}" -fi - - # If we don't know the hash or a file with that hash doesn't exist, # download the file and add it to the store. -if test -z "$finalPath"; then - - mkTempDir - tmpFile=$tmpPath/$name +if (!defined $finalPath) { + my $tmpFile = "$tmpDir/$name"; + # Optionally do timestamp-based caching of the download. # Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is # the hash and the timestamp of the file at $url. The caching of # the file *contents* is done in Nix store, where it can be # garbage-collected independently. - if test -n "$NIX_DOWNLOAD_CACHE"; then - echo -n "$url" > $tmpPath/url - urlHash=$($binDir/nix-hash --type sha256 --base32 --flat $tmpPath/url) - echo "$url" > "$NIX_DOWNLOAD_CACHE/$urlHash.url" - cachedHashFN="$NIX_DOWNLOAD_CACHE/$urlHash.$hashType" - cachedTimestampFN="$NIX_DOWNLOAD_CACHE/$urlHash.stamp" - cacheFlags="--remote-time" - if test -e "$cachedTimestampFN" -a -e "$cachedHashFN"; then - # Only download the file if it is newer than the cached version. - cacheFlags="$cacheFlags --time-cond $cachedTimestampFN" - fi - fi - + my ($cachedTimestampFN, $cachedHashFN, @cacheFlags); + if (defined $cacheDir) { + my $urlHash = hashString("sha256", 1, $url); + writeFile "$cacheDir/$urlHash.url", $url; + $cachedHashFN = "$cacheDir/$urlHash.$hashType"; + $cachedTimestampFN = "$cacheDir/$urlHash.stamp"; + @cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN; + } + # Perform the download. - doDownload + my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || ""))); + (system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n"; - if test -n "$NIX_DOWNLOAD_CACHE" -a ! -e $tmpFile; then + if (defined $cacheDir && ! -e $tmpFile) { # Curl didn't create $tmpFile, so apparently there's no newer # file on the server. - hash=$(cat $cachedHashFN) - finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$hash" "$name") - if ! $binDir/nix-store --check-validity "$finalPath" 2> /dev/null; then - echo "cached contents of \`$url' disappeared, redownloading..." >&2 - finalPath= - cacheFlags="--remote-time" - doDownload - fi - fi + $hash = readFile $cachedHashFN or die; + $finalPath = makeFixedOutputPath(0, $hashType, $hash, $name); + unless (isValidPath $finalPath) { + print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n"; + $finalPath = undef; + (system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n"; + } + } - if test -z "$finalPath"; then - - # Compute the hash. - hash=$($binDir/nix-hash --type "$hashType" $hashFormat --flat $tmpFile) - if ! test -n "$QUIET"; then echo "hash is $hash" >&2; fi - - if test -n "$NIX_DOWNLOAD_CACHE"; then - echo $hash > $cachedHashFN - touch -r $tmpFile $cachedTimestampFN - fi - - # Add the downloaded file to the Nix store. - finalPath=$($binDir/nix-store --add-fixed "$hashType" $tmpFile) - - if test -n "$expHash" -a "$expHash" != "$hash"; then - echo "hash mismatch for URL \`$url'" >&2 - exit 1 - fi + if (!defined $finalPath) { - fi -fi + # Compute the hash. + $hash = hashFile($hashType, $hashType ne "md5", $tmpFile); + if (defined $cacheDir) { + writeFile $cachedHashFN, $hash; + my $st = stat($tmpFile) or die; + open STAMP, ">$cachedTimestampFN" or die; close STAMP; + utime($st->atime, $st->mtime, $cachedTimestampFN) or die; + } + + # Add the downloaded file to the Nix store. + $finalPath = addToStore($tmpFile, 0, $hashType); + } -if ! test -n "$QUIET"; then echo "path is $finalPath" >&2; fi + die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash; +} -echo $hash - -if test -n "$PRINT_PATH"; then - echo $finalPath -fi +print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'}; +print "$hash\n"; +print "$finalPath\n" if $ENV{'PRINT_PATH'}; From 000160f5b915ce784e740c139f81e0cbeda751c4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 2 Dec 2011 17:52:18 +0000 Subject: [PATCH 35/46] =?UTF-8?q?*=20In=20=E2=80=98nix-store=20--verify=20?= =?UTF-8?q?--check-contents=E2=80=99,=20repair=20missing=20hashes=20=20=20?= =?UTF-8?q?rather=20than=20complain=20about=20them.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/libstore/local-store.cc | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 3c1f2ecac..a353168ff 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -366,7 +366,7 @@ void LocalStore::openDB(bool create) stmtRegisterValidPath.create(db, "insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);"); stmtUpdatePathInfo.create(db, - "update ValidPaths set narSize = ? where path = ?;"); + "update ValidPaths set narSize = ?, hash = ? where path = ?;"); stmtAddReference.create(db, "insert or replace into Refs (referrer, reference) values (?, ?);"); stmtQueryPathInfo.create(db, @@ -683,7 +683,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path) } -/* Update path info in the database. Currently only updated the +/* Update path info in the database. Currently only updates the narSize field. */ void LocalStore::updatePathInfo(const ValidPathInfo & info) { @@ -692,6 +692,7 @@ void LocalStore::updatePathInfo(const ValidPathInfo & info) stmtUpdatePathInfo.bind64(info.narSize); else stmtUpdatePathInfo.bind(); // null + stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash)); stmtUpdatePathInfo.bind(info.path); if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE) throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path); @@ -1386,6 +1387,8 @@ void LocalStore::verifyStore(bool checkContents) if (checkContents) { printMsg(lvlInfo, "checking hashes..."); + Hash nullHash(htSHA256); + foreach (PathSet::iterator, i, validPaths) { try { ValidPathInfo info = queryPathInfo(*i); @@ -1394,17 +1397,30 @@ void LocalStore::verifyStore(bool checkContents) printMsg(lvlTalkative, format("checking contents of `%1%'") % *i); HashResult current = hashPath(info.hash.type, *i); - if (current.first != info.hash) { + if (info.hash != nullHash && info.hash != current.first) { printMsg(lvlError, format("path `%1%' was modified! " "expected hash `%2%', got `%3%'") % *i % printHash(info.hash) % printHash(current.first)); } else { + + bool update = false; + + /* Fill in missing hashes. */ + if (info.hash == nullHash) { + printMsg(lvlError, format("fixing missing hash on `%1%'") % *i); + info.hash = current.first; + update = true; + } + /* Fill in missing narSize fields (from old stores). */ if (info.narSize == 0) { printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second); info.narSize = current.second; - updatePathInfo(info); + update = true; } + + if (update) updatePathInfo(info); + } } catch (Error & e) { From c8c0380744afd107611bba17127a182ecebd4e0b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 5 Dec 2011 21:04:20 +0000 Subject: [PATCH 36/46] * Remove unnecessary quotes. showPaths() already adds quotes. --- src/libstore/local-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index a353168ff..5107a93de 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1347,7 +1347,7 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr PathSet referrers; queryReferrers(path, referrers); referrers.erase(path); /* ignore self-references */ if (!referrers.empty()) - throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'") + throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%") % path % showPaths(referrers)); invalidatePath(path); } From 23bf700196b4fa05a2da55798644f8adc2e1d7b1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 14 Dec 2011 22:31:31 +0000 Subject: [PATCH 37/46] * Oops, the daemon test wasn't actually using the daemon. --- tests/Makefile.am | 2 +- tests/common.sh.in | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/Makefile.am b/tests/Makefile.am index 676a9c387..38bfa139d 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,4 +1,4 @@ -TESTS_ENVIRONMENT = $(bash) -e +TESTS_ENVIRONMENT = NIX_REMOTE= $(bash) -e extra1 = $(shell pwd)/test-tmp/shared diff --git a/tests/common.sh.in b/tests/common.sh.in index 62ac669df..4ab490a62 100644 --- a/tests/common.sh.in +++ b/tests/common.sh.in @@ -23,8 +23,6 @@ export SHARED=$TEST_ROOT/shared export PATH=$NIX_BIN_DIR:$TOP/scripts:$PATH -export NIX_REMOTE= - export REAL_BIN_DIR=@bindir@ export REAL_LIBEXEC_DIR=@libexecdir@ export REAL_LOCALSTATE_DIR=@localstatedir@ From 893cac140232478e3ce9640ccf31dbfbfc2434c0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 14 Dec 2011 22:41:10 +0000 Subject: [PATCH 38/46] * Remove the terminate handler, which was only really needed because of Berkeley DB (see r8632). --- src/libmain/shared.cc | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 9076e9994..d3b73f8fd 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -90,23 +90,6 @@ static void setLogType(string lt) } -static void closeStore() -{ - try { - throw; - } catch (std::exception & e) { - printMsg(lvlError, - format("FATAL: unexpected exception (closing store and aborting): %1%") % e.what()); - } - try { - store.reset((StoreAPI *) 0); - } catch (...) { - ignoreException(); - } - abort(); -} - - RemoveTempRoots::~RemoveTempRoots() { removeTempRoots(); @@ -238,12 +221,6 @@ static void initAndRun(int argc, char * * argv) exit. */ RemoveTempRoots removeTempRoots __attribute__((unused)); - /* Make sure that the database gets closed properly, even if - terminate() is called (which happens sometimes due to bugs in - destructor/exceptions interaction, but that needn't preclude a - clean shutdown of the database). */ - std::set_terminate(closeStore); - run(remaining); /* Close the Nix database. */ From 3a48282b0681d68147e18f7464eaddf1d188c3be Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 14 Dec 2011 23:30:06 +0000 Subject: [PATCH 39/46] * Buffer writes in FdSink. This significantly reduces the number of system calls / context switches when dumping a NAR and in the worker protocol. --- src/libstore/remote-store.cc | 4 ++++ src/libutil/serialise.cc | 25 ++++++++++++++++++++++++- src/libutil/serialise.hh | 21 ++++++++++++++------- src/nix-worker/nix-worker.cc | 16 +++++++++------- 4 files changed, 51 insertions(+), 15 deletions(-) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 0c6a1c37d..8269b6a83 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -65,6 +65,7 @@ void RemoteStore::openConnection() /* Send the magic greeting, check for the reply. */ try { writeInt(WORKER_MAGIC_1, to); + to.flush(); unsigned int magic = readInt(from); if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch"); @@ -166,6 +167,7 @@ void RemoteStore::connectToDaemon() RemoteStore::~RemoteStore() { try { + to.flush(); fdSocket.close(); if (child != -1) child.wait(true); @@ -488,6 +490,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths) void RemoteStore::processStderr(Sink * sink, Source * source) { + to.flush(); unsigned int msg; while ((msg = readInt(from)) == STDERR_NEXT || msg == STDERR_READ || msg == STDERR_WRITE) { @@ -503,6 +506,7 @@ void RemoteStore::processStderr(Sink * sink, Source * source) AutoDeleteArray d(buf); (*source)(buf, len); writeString(string((const char *) buf, len), to); + to.flush(); } else { string s = readString(from); diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 9b4222713..66a64a6be 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -9,7 +9,30 @@ namespace nix { void FdSink::operator () (const unsigned char * data, unsigned int len) { - writeFull(fd, data, len); + if (!buffer) buffer = new unsigned char[bufSize]; + + while (len) { + /* Optimisation: bypass the buffer if the data exceeds the + buffer size and there is no unflushed data. */ + if (bufPos == 0 && len >= bufSize) { + writeFull(fd, data, len); + break; + } + /* Otherwise, copy the bytes to the buffer. Flush the buffer + when it's full. */ + size_t n = bufPos + len > bufSize ? bufSize - bufPos : len; + memcpy(buffer + bufPos, data, n); + data += n; bufPos += n; len -= n; + if (bufPos == bufSize) flush(); + } +} + + +void FdSink::flush() +{ + if (fd == -1 || bufPos == 0) return; + writeFull(fd, buffer, bufPos); + bufPos = 0; } diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 0e797d63b..711bd5e6c 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -28,22 +28,29 @@ struct Source }; -/* A sink that writes data to a file descriptor. */ +/* A sink that writes data to a file descriptor (using a buffer). */ struct FdSink : Sink { int fd; + unsigned int bufSize, bufPos; + unsigned char * buffer; - FdSink() - { - fd = -1; - } + FdSink() : fd(-1), bufSize(32 * 1024), bufPos(0), buffer(0) { } - FdSink(int fd) + FdSink(int fd, unsigned int bufSize = 32 * 1024) + : fd(fd), bufSize(bufSize), bufPos(0), buffer(0) { - this->fd = fd; + } + + ~FdSink() + { + flush(); + if (buffer) delete[] buffer; } void operator () (const unsigned char * data, unsigned int len); + + void flush(); }; diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 8950f73ef..6c222420e 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -57,6 +57,7 @@ static void tunnelStderr(const unsigned char * buf, size_t count) try { writeInt(STDERR_NEXT, to); writeString(string((char *) buf, count), to); + to.flush(); } catch (...) { /* Write failed; that means that the other side is gone. */ @@ -200,9 +201,7 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int struct TunnelSink : Sink { Sink & to; - TunnelSink(Sink & to) : to(to) - { - } + TunnelSink(Sink & to) : to(to) { } virtual void operator () (const unsigned char * data, unsigned int len) { @@ -215,9 +214,7 @@ struct TunnelSink : Sink struct TunnelSource : Source { Source & from; - TunnelSource(Source & from) : from(from) - { - } + TunnelSource(Source & from) : from(from) { } virtual void operator () (unsigned char * data, unsigned int len) { @@ -228,6 +225,7 @@ struct TunnelSource : Source writeInt(STDERR_READ, to); writeInt(len, to); + to.flush(); string s = readString(from); if (s.size() != len) throw Error("not enough data"); memcpy(data, (const unsigned char *) s.c_str(), len); @@ -596,8 +594,8 @@ static void processConnection() unsigned int magic = readInt(from); if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch"); writeInt(WORKER_MAGIC_2, to); - writeInt(PROTOCOL_VERSION, to); + to.flush(); unsigned int clientVersion = readInt(from); /* Send startup error messages to the client. */ @@ -619,9 +617,11 @@ static void processConnection() store = boost::shared_ptr(new LocalStore()); stopWork(); + to.flush(); } catch (Error & e) { stopWork(false, e.msg()); + to.flush(); return; } @@ -652,6 +652,8 @@ static void processConnection() if (!errorAllowed) break; } + to.flush(); + assert(!canSendStderr); }; From a3e0656cbbfadba28518e0a29c324edaabb9874a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 15 Dec 2011 12:32:08 +0000 Subject: [PATCH 40/46] * Buffer reads in FdSource. Together with write buffering, this significantly cuts down the number of syscalls (e.g., for "nix-store -qR /var/run/current-system" via the daemon, it reduced the number of syscalls in the client from 29134 to 4766 and in the daemon from 44266 to 20666). --- src/libutil/serialise.cc | 24 +++++++++++++++++++++++- src/libutil/serialise.hh | 18 +++++++++--------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 66a64a6be..e3a53c0d0 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -2,6 +2,7 @@ #include "util.hh" #include +#include namespace nix { @@ -38,7 +39,28 @@ void FdSink::flush() void FdSource::operator () (unsigned char * data, unsigned int len) { - readFull(fd, data, len); + if (!buffer) buffer = new unsigned char[bufSize]; + + while (len) { + if (!bufPosIn) { + /* Read as much data as is available (up to the buffer + size). */ + checkInterrupt(); + ssize_t n = read(fd, (char *) buffer, bufSize); + if (n == -1) { + if (errno == EINTR) continue; + throw SysError("reading from file"); + } + if (n == 0) throw EndOfFile("unexpected end-of-file"); + bufPosIn = n; + } + + /* Copy out the data in the buffer. */ + size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; + memcpy(data, buffer + bufPosOut, n); + data += n; bufPosOut += n; len -= n; + if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0; + } } diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 711bd5e6c..b8d4d7a84 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -38,9 +38,7 @@ struct FdSink : Sink FdSink() : fd(-1), bufSize(32 * 1024), bufPos(0), buffer(0) { } FdSink(int fd, unsigned int bufSize = 32 * 1024) - : fd(fd), bufSize(bufSize), bufPos(0), buffer(0) - { - } + : fd(fd), bufSize(bufSize), bufPos(0), buffer(0) { } ~FdSink() { @@ -58,15 +56,17 @@ struct FdSink : Sink struct FdSource : Source { int fd; + unsigned int bufSize, bufPosIn, bufPosOut; + unsigned char * buffer; - FdSource() - { - fd = -1; - } + FdSource() : fd(-1), bufSize(32 * 1024), bufPosIn(0), bufPosOut(0), buffer(0) { } - FdSource(int fd) + FdSource(int fd, unsigned int bufSize = 32 * 1024) + : fd(fd), bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { } + + ~FdSource() { - this->fd = fd; + if (buffer) delete[] buffer; } void operator () (unsigned char * data, unsigned int len); From a67b8ae22450a0fe10698042b452f5f2f322e008 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 15 Dec 2011 14:04:35 +0000 Subject: [PATCH 41/46] * Typo. --- scripts/nix-copy-closure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in index 9191de59e..8bb60e920 100755 --- a/scripts/nix-copy-closure.in +++ b/scripts/nix-copy-closure.in @@ -96,7 +96,7 @@ else { # Copy FROM the remote machine. close READ or die "nix-store on remote machine `$sshHost' failed: $?"; - # Export the store paths on the remote machine and import them on locally. + # Export the store paths on the remote machine and import them locally. if (scalar @missing > 0) { print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n"; #print STDERR " $_\n" foreach @missing; From 5a1b9ed0aa3a0c240b667dbe504b61b2b68e4d16 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 15 Dec 2011 16:19:53 +0000 Subject: [PATCH 42/46] * Refactoring: move sink/source buffering into separate classes. * Buffer the HashSink. This speeds up hashing a bit because it prevents lots of calls to the hash update functions (e.g. nix-hash went from 9.3s to 8.7s of user time on the closure of my /var/run/current-system). --- src/libstore/local-store.cc | 9 ++-- src/libstore/references.cc | 4 +- src/libstore/remote-store.cc | 2 +- src/libutil/hash.cc | 22 +++++---- src/libutil/hash.hh | 5 +- src/libutil/serialise.cc | 69 +++++++++++++++++--------- src/libutil/serialise.hh | 96 +++++++++++++++++++++--------------- src/nix-worker/nix-worker.cc | 8 ++- 8 files changed, 125 insertions(+), 90 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 5107a93de..525e5fc7b 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1103,16 +1103,14 @@ struct HashAndWriteSink : Sink HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256) { } - virtual void operator () - (const unsigned char * data, unsigned int len) + virtual void operator () (const unsigned char * data, size_t len) { writeSink(data, len); hashSink(data, len); } Hash currentHash() { - HashSink hashSinkClone(hashSink); - return hashSinkClone.finish().first; + return hashSink.currentHash().first; } }; @@ -1201,8 +1199,7 @@ struct HashAndReadSource : Source { hashing = true; } - virtual void operator () - (unsigned char * data, unsigned int len) + virtual void operator () (unsigned char * data, size_t len) { readSource(data, len); if (hashing) hashSink(data, len); diff --git a/src/libstore/references.cc b/src/libstore/references.cc index ade9c9aa2..c1f9e3ba7 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -57,11 +57,11 @@ struct RefScanSink : Sink RefScanSink() : hashSink(htSHA256) { } - void operator () (const unsigned char * data, unsigned int len); + void operator () (const unsigned char * data, size_t len); }; -void RefScanSink::operator () (const unsigned char * data, unsigned int len) +void RefScanSink::operator () (const unsigned char * data, size_t len) { hashSink(data, len); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 8269b6a83..7bf0ad7bd 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -374,7 +374,7 @@ Path RemoteStore::importPath(bool requireSignature, Source & source) openConnection(); writeInt(wopImportPath, to); /* We ignore requireSignature, since the worker forces it to true - anyway. */ + anyway. */ processStderr(0, &source); return readStorePath(from); } diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index 533423441..bbfe7847f 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -306,21 +306,13 @@ HashSink::HashSink(HashType ht) : ht(ht) start(ht, *ctx); } -HashSink::HashSink(const HashSink & h) -{ - ht = h.ht; - bytes = h.bytes; - ctx = new Ctx; - *ctx = *h.ctx; -} - HashSink::~HashSink() { + bufPos = 0; delete ctx; } -void HashSink::operator () - (const unsigned char * data, unsigned int len) +void HashSink::write(const unsigned char * data, size_t len) { bytes += len; update(ht, *ctx, data, len); @@ -328,11 +320,21 @@ void HashSink::operator () HashResult HashSink::finish() { + flush(); Hash hash(ht); nix::finish(ht, *ctx, hash.hash); return HashResult(hash, bytes); } +HashResult HashSink::currentHash() +{ + flush(); + Ctx ctx2 = *ctx; + Hash hash(ht); + nix::finish(ht, ctx2, hash.hash); + return HashResult(hash, bytes); +} + HashResult hashPath( HashType ht, const Path & path, PathFilter & filter) diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index cbdcf4c8d..e0b6478cc 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -91,7 +91,7 @@ string printHashType(HashType ht); union Ctx; -class HashSink : public Sink +class HashSink : public BufferedSink { private: HashType ht; @@ -102,8 +102,9 @@ public: HashSink(HashType ht); HashSink(const HashSink & h); ~HashSink(); - virtual void operator () (const unsigned char * data, unsigned int len); + void write(const unsigned char * data, size_t len); HashResult finish(); + HashResult currentHash(); }; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index e3a53c0d0..a82262704 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -8,7 +8,16 @@ namespace nix { -void FdSink::operator () (const unsigned char * data, unsigned int len) +BufferedSink::~BufferedSink() +{ + /* We can't call flush() here, because C++ for some insane reason + doesn't allow you to call virtual methods from a destructor. */ + assert(!bufPos); + if (buffer) delete[] buffer; +} + + +void BufferedSink::operator () (const unsigned char * data, size_t len) { if (!buffer) buffer = new unsigned char[bufSize]; @@ -16,7 +25,7 @@ void FdSink::operator () (const unsigned char * data, unsigned int len) /* Optimisation: bypass the buffer if the data exceeds the buffer size and there is no unflushed data. */ if (bufPos == 0 && len >= bufSize) { - writeFull(fd, data, len); + write(data, len); break; } /* Otherwise, copy the bytes to the buffer. Flush the buffer @@ -29,31 +38,32 @@ void FdSink::operator () (const unsigned char * data, unsigned int len) } -void FdSink::flush() +void BufferedSink::flush() { - if (fd == -1 || bufPos == 0) return; - writeFull(fd, buffer, bufPos); + if (bufPos == 0) return; + write(buffer, bufPos); bufPos = 0; } -void FdSource::operator () (unsigned char * data, unsigned int len) +void FdSink::write(const unsigned char * data, size_t len) +{ + writeFull(fd, data, len); +} + + +BufferedSource::~BufferedSource() +{ + if (buffer) delete[] buffer; +} + + +void BufferedSource::operator () (unsigned char * data, size_t len) { if (!buffer) buffer = new unsigned char[bufSize]; while (len) { - if (!bufPosIn) { - /* Read as much data as is available (up to the buffer - size). */ - checkInterrupt(); - ssize_t n = read(fd, (char *) buffer, bufSize); - if (n == -1) { - if (errno == EINTR) continue; - throw SysError("reading from file"); - } - if (n == 0) throw EndOfFile("unexpected end-of-file"); - bufPosIn = n; - } + if (!bufPosIn) bufPosIn = read(buffer, bufSize); /* Copy out the data in the buffer. */ size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; @@ -64,7 +74,20 @@ void FdSource::operator () (unsigned char * data, unsigned int len) } -void writePadding(unsigned int len, Sink & sink) +size_t FdSource::read(unsigned char * data, size_t len) +{ + ssize_t n; + do { + checkInterrupt(); + n = ::read(fd, (char *) data, bufSize); + } while (n == -1 && errno == EINTR); + if (n == -1) throw SysError("reading from file"); + if (n == 0) throw EndOfFile("unexpected end-of-file"); + return n; +} + + +void writePadding(size_t len, Sink & sink) { if (len % 8) { unsigned char zero[8]; @@ -103,7 +126,7 @@ void writeLongLong(unsigned long long n, Sink & sink) void writeString(const string & s, Sink & sink) { - unsigned int len = s.length(); + size_t len = s.length(); writeInt(len, sink); sink((const unsigned char *) s.c_str(), len); writePadding(len, sink); @@ -118,11 +141,11 @@ void writeStringSet(const StringSet & ss, Sink & sink) } -void readPadding(unsigned int len, Source & source) +void readPadding(size_t len, Source & source) { if (len % 8) { unsigned char zero[8]; - unsigned int n = 8 - (len % 8); + size_t n = 8 - (len % 8); source(zero, n); for (unsigned int i = 0; i < n; i++) if (zero[i]) throw SerialisationError("non-zero padding"); @@ -162,7 +185,7 @@ unsigned long long readLongLong(Source & source) string readString(Source & source) { - unsigned int len = readInt(source); + size_t len = readInt(source); unsigned char * buf = new unsigned char[len]; AutoDeleteArray d(buf); source(buf, len); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index b8d4d7a84..a0588668f 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -11,7 +11,25 @@ namespace nix { struct Sink { virtual ~Sink() { } - virtual void operator () (const unsigned char * data, unsigned int len) = 0; + virtual void operator () (const unsigned char * data, size_t len) = 0; +}; + + +/* A buffered abstract sink. */ +struct BufferedSink : Sink +{ + size_t bufSize, bufPos; + unsigned char * buffer; + + BufferedSink(size_t bufSize = 32 * 1024) + : bufSize(bufSize), bufPos(0), buffer(0) { } + ~BufferedSink(); + + void operator () (const unsigned char * data, size_t len); + + void flush(); + + virtual void write(const unsigned char * data, size_t len) = 0; }; @@ -20,56 +38,52 @@ struct Source { virtual ~Source() { } - /* The callee should store exactly *len bytes in the buffer - pointed to by data. It should block if that much data is not - yet available, or throw an error if it is not going to be - available. */ - virtual void operator () (unsigned char * data, unsigned int len) = 0; + /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’. + It blocks if that much data is not yet available, or throws an + error if it is not going to be available. */ + virtual void operator () (unsigned char * data, size_t len) = 0; }; -/* A sink that writes data to a file descriptor (using a buffer). */ -struct FdSink : Sink +/* A buffered abstract source. */ +struct BufferedSource : Source { - int fd; - unsigned int bufSize, bufPos; + size_t bufSize, bufPosIn, bufPosOut; unsigned char * buffer; - FdSink() : fd(-1), bufSize(32 * 1024), bufPos(0), buffer(0) { } + BufferedSource(size_t bufSize = 32 * 1024) + : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { } + ~BufferedSource(); - FdSink(int fd, unsigned int bufSize = 32 * 1024) - : fd(fd), bufSize(bufSize), bufPos(0), buffer(0) { } - - ~FdSink() - { - flush(); - if (buffer) delete[] buffer; - } + void operator () (unsigned char * data, size_t len); - void operator () (const unsigned char * data, unsigned int len); + /* Store up to ‘len’ in the buffer pointed to by ‘data’, and + return the number of bytes stored. If should block until at + least one byte is available. */ + virtual size_t read(unsigned char * data, size_t len) = 0; +}; - void flush(); + +/* A sink that writes data to a file descriptor. */ +struct FdSink : BufferedSink +{ + int fd; + + FdSink() : fd(-1) { } + FdSink(int fd) : fd(fd) { } + ~FdSink() { flush(); } + + void write(const unsigned char * data, size_t len); }; /* A source that reads data from a file descriptor. */ -struct FdSource : Source +struct FdSource : BufferedSource { int fd; - unsigned int bufSize, bufPosIn, bufPosOut; - unsigned char * buffer; - - FdSource() : fd(-1), bufSize(32 * 1024), bufPosIn(0), bufPosOut(0), buffer(0) { } - - FdSource(int fd, unsigned int bufSize = 32 * 1024) - : fd(fd), bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { } - - ~FdSource() - { - if (buffer) delete[] buffer; - } - - void operator () (unsigned char * data, unsigned int len); + FdSource() : fd(-1) { } + FdSource(int fd) : fd(fd) { } + size_t read(unsigned char * data, size_t len); }; @@ -77,7 +91,7 @@ struct FdSource : Source struct StringSink : Sink { string s; - virtual void operator () (const unsigned char * data, unsigned int len) + void operator () (const unsigned char * data, size_t len) { s.append((const char *) data, len); } @@ -88,9 +102,9 @@ struct StringSink : Sink struct StringSource : Source { const string & s; - unsigned int pos; + size_t pos; StringSource(const string & _s) : s(_s), pos(0) { } - virtual void operator () (unsigned char * data, unsigned int len) + virtual void operator () (unsigned char * data, size_t len) { s.copy((char *) data, len, pos); pos += len; @@ -100,13 +114,13 @@ struct StringSource : Source }; -void writePadding(unsigned int len, Sink & sink); +void writePadding(size_t len, Sink & sink); void writeInt(unsigned int n, Sink & sink); void writeLongLong(unsigned long long n, Sink & sink); void writeString(const string & s, Sink & sink); void writeStringSet(const StringSet & ss, Sink & sink); -void readPadding(unsigned int len, Source & source); +void readPadding(size_t len, Source & source); unsigned int readInt(Source & source); unsigned long long readLongLong(Source & source); string readString(Source & source); diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 6c222420e..a89852638 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -202,8 +202,7 @@ struct TunnelSink : Sink { Sink & to; TunnelSink(Sink & to) : to(to) { } - virtual void operator () - (const unsigned char * data, unsigned int len) + virtual void operator () (const unsigned char * data, size_t len) { writeInt(STDERR_WRITE, to); writeString(string((const char *) data, len), to); @@ -215,8 +214,7 @@ struct TunnelSource : Source { Source & from; TunnelSource(Source & from) : from(from) { } - virtual void operator () - (unsigned char * data, unsigned int len) + virtual void operator () (unsigned char * data, size_t len) { /* Careful: we're going to receive data from the client now, so we have to disable the SIGPOLL handler. */ @@ -267,7 +265,7 @@ struct SavingSourceAdapter : Source Source & orig; string s; SavingSourceAdapter(Source & orig) : orig(orig) { } - void operator () (unsigned char * data, unsigned int len) + void operator () (unsigned char * data, size_t len) { orig(data, len); s.append((const char *) data, len); From 78598d06f0240a15b74720d8f987daeb702318d7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Dec 2011 15:45:42 +0000 Subject: [PATCH 43/46] * Clean up exception handling. --- src/libutil/serialise.cc | 11 +++++++++-- src/libutil/serialise.hh | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index a82262704..76f2e721a 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -41,8 +41,15 @@ void BufferedSink::operator () (const unsigned char * data, size_t len) void BufferedSink::flush() { if (bufPos == 0) return; - write(buffer, bufPos); - bufPos = 0; + size_t n = bufPos; + bufPos = 0; // don't trigger the assert() in ~BufferedSink() + write(buffer, n); +} + + +FdSink::~FdSink() +{ + try { flush(); } catch (...) { ignoreException(); } } diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index a0588668f..a155f6681 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -71,7 +71,7 @@ struct FdSink : BufferedSink FdSink() : fd(-1) { } FdSink(int fd) : fd(fd) { } - ~FdSink() { flush(); } + ~FdSink(); void write(const unsigned char * data, size_t len); }; From e0bd307802d13476055f8ba99ab7808de0fd71e5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Dec 2011 19:44:13 +0000 Subject: [PATCH 44/46] * Make the import operation through the daemon much more efficient (way fewer roundtrips) by allowing the client to send data in bigger chunks. * Some refactoring. --- src/libstore/local-store.cc | 7 +++--- src/libstore/remote-store.cc | 6 ++--- src/libstore/worker-protocol.hh | 2 +- src/libutil/serialise.cc | 42 +++++++++++++++++++++++---------- src/libutil/serialise.hh | 31 +++++++++++------------- src/nix-worker/nix-worker.cc | 24 ++++++++++++------- 6 files changed, 68 insertions(+), 44 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 525e5fc7b..65b1cdbc8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1199,10 +1199,11 @@ struct HashAndReadSource : Source { hashing = true; } - virtual void operator () (unsigned char * data, size_t len) + size_t read(unsigned char * data, size_t len) { - readSource(data, len); - if (hashing) hashSink(data, len); + size_t n = readSource.read(data, len); + if (hashing) hashSink(data, n); + return n; } }; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 7bf0ad7bd..e976e8fa5 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -501,11 +501,11 @@ void RemoteStore::processStderr(Sink * sink, Source * source) } else if (msg == STDERR_READ) { if (!source) throw Error("no source"); - unsigned int len = readInt(from); + size_t len = readInt(from); unsigned char * buf = new unsigned char[len]; AutoDeleteArray d(buf); - (*source)(buf, len); - writeString(string((const char *) buf, len), to); + size_t n = source->read(buf, len); + writeString(string((const char *) buf, n), to); // !!! inefficient to.flush(); } else { diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index acb8bc8b2..acabd6ca3 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -8,7 +8,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x108 +#define PROTOCOL_VERSION 0x109 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 76f2e721a..640267a13 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -23,8 +23,9 @@ void BufferedSink::operator () (const unsigned char * data, size_t len) while (len) { /* Optimisation: bypass the buffer if the data exceeds the - buffer size and there is no unflushed data. */ - if (bufPos == 0 && len >= bufSize) { + buffer size. */ + if (bufPos + len >= bufSize) { + flush(); write(data, len); break; } @@ -59,29 +60,37 @@ void FdSink::write(const unsigned char * data, size_t len) } +void Source::operator () (unsigned char * data, size_t len) +{ + while (len) { + size_t n = read(data, len); + data += n; len -= n; + } +} + + BufferedSource::~BufferedSource() { if (buffer) delete[] buffer; } -void BufferedSource::operator () (unsigned char * data, size_t len) +size_t BufferedSource::read(unsigned char * data, size_t len) { if (!buffer) buffer = new unsigned char[bufSize]; - while (len) { - if (!bufPosIn) bufPosIn = read(buffer, bufSize); + if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize); - /* Copy out the data in the buffer. */ - size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; - memcpy(data, buffer + bufPosOut, n); - data += n; bufPosOut += n; len -= n; - if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0; - } + /* Copy out the data in the buffer. */ + size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len; + memcpy(data, buffer + bufPosOut, n); + bufPosOut += n; + if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0; + return n; } -size_t FdSource::read(unsigned char * data, size_t len) +size_t FdSource::readUnbuffered(unsigned char * data, size_t len) { ssize_t n; do { @@ -94,6 +103,15 @@ size_t FdSource::read(unsigned char * data, size_t len) } +size_t StringSource::read(unsigned char * data, size_t len) +{ + if (pos == s.size()) throw EndOfFile("end of string reached"); + size_t n = s.copy((char *) data, len, pos); + pos += n; + return n; +} + + void writePadding(size_t len, Sink & sink) { if (len % 8) { diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index a155f6681..25398b09d 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -24,7 +24,7 @@ struct BufferedSink : Sink BufferedSink(size_t bufSize = 32 * 1024) : bufSize(bufSize), bufPos(0), buffer(0) { } ~BufferedSink(); - + void operator () (const unsigned char * data, size_t len); void flush(); @@ -39,9 +39,14 @@ struct Source virtual ~Source() { } /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’. - It blocks if that much data is not yet available, or throws an - error if it is not going to be available. */ - virtual void operator () (unsigned char * data, size_t len) = 0; + It blocks until all the requested data is available, or throws + an error if it is not going to be available. */ + void operator () (unsigned char * data, size_t len); + + /* Store up to ‘len’ in the buffer pointed to by ‘data’, and + return the number of bytes stored. If blocks until at least + one byte is available. */ + virtual size_t read(unsigned char * data, size_t len) = 0; }; @@ -55,12 +60,10 @@ struct BufferedSource : Source : bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { } ~BufferedSource(); - void operator () (unsigned char * data, size_t len); + size_t read(unsigned char * data, size_t len); - /* Store up to ‘len’ in the buffer pointed to by ‘data’, and - return the number of bytes stored. If should block until at - least one byte is available. */ - virtual size_t read(unsigned char * data, size_t len) = 0; + /* Underlying read call, to be overriden. */ + virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; }; @@ -83,7 +86,7 @@ struct FdSource : BufferedSource int fd; FdSource() : fd(-1) { } FdSource(int fd) : fd(fd) { } - size_t read(unsigned char * data, size_t len); + size_t readUnbuffered(unsigned char * data, size_t len); }; @@ -104,13 +107,7 @@ struct StringSource : Source const string & s; size_t pos; StringSource(const string & _s) : s(_s), pos(0) { } - virtual void operator () (unsigned char * data, size_t len) - { - s.copy((char *) data, len, pos); - pos += len; - if (pos > s.size()) - throw Error("end of string reached"); - } + size_t read(unsigned char * data, size_t len); }; diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index a89852638..695e4c38d 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -210,11 +210,11 @@ struct TunnelSink : Sink }; -struct TunnelSource : Source +struct TunnelSource : BufferedSource { Source & from; TunnelSource(Source & from) : from(from) { } - virtual void operator () (unsigned char * data, size_t len) + size_t readUnbuffered(unsigned char * data, size_t len) { /* Careful: we're going to receive data from the client now, so we have to disable the SIGPOLL handler. */ @@ -224,11 +224,16 @@ struct TunnelSource : Source writeInt(STDERR_READ, to); writeInt(len, to); to.flush(); - string s = readString(from); - if (s.size() != len) throw Error("not enough data"); - memcpy(data, (const unsigned char *) s.c_str(), len); + string s = readString(from); // !!! inefficient startWork(); + + if (s.empty()) throw EndOfFile("unexpected end-of-file"); + if (s.size() > len) throw Error("client sent too much data"); + + memcpy(data, (const unsigned char *) s.c_str(), s.size()); + + return s.size(); } }; @@ -265,10 +270,11 @@ struct SavingSourceAdapter : Source Source & orig; string s; SavingSourceAdapter(Source & orig) : orig(orig) { } - void operator () (unsigned char * data, size_t len) + size_t read(unsigned char * data, size_t len) { - orig(data, len); - s.append((const char *) data, len); + size_t n = orig.read(data, len); + s.append((const char *) data, n); + return n; } }; @@ -397,6 +403,8 @@ static void performOp(unsigned int clientVersion, case wopImportPath: { startWork(); + if (GET_PROTOCOL_MINOR(clientVersion) < 9) + throw Error("import not supported; upgrade your client"); TunnelSource source(from); Path path = store->importPath(true, source); stopWork(); From 8d3dfa2c1782e955d2b7796d19dc0d0381596b98 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Dec 2011 21:29:46 +0000 Subject: [PATCH 45/46] * Avoid expensive conversions from char arrays to STL strings. --- src/libutil/serialise.cc | 23 +++++++++++++++++++---- src/libutil/serialise.hh | 2 ++ src/nix-worker/nix-worker.cc | 15 +++++---------- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 640267a13..ba549c214 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -149,12 +149,17 @@ void writeLongLong(unsigned long long n, Sink & sink) } +void writeString(const unsigned char * buf, size_t len, Sink & sink) +{ + writeInt(len, sink); + sink(buf, len); + writePadding(len, sink); +} + + void writeString(const string & s, Sink & sink) { - size_t len = s.length(); - writeInt(len, sink); - sink((const unsigned char *) s.c_str(), len); - writePadding(len, sink); + writeString((const unsigned char *) s.c_str(), s.size(), sink); } @@ -208,6 +213,16 @@ unsigned long long readLongLong(Source & source) } +size_t readString(unsigned char * buf, size_t max, Source & source) +{ + size_t len = readInt(source); + if (len > max) throw Error("string is too long"); + source(buf, len); + readPadding(len, source); + return len; +} + + string readString(Source & source) { size_t len = readInt(source); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 25398b09d..efd8e2a06 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -114,12 +114,14 @@ struct StringSource : Source void writePadding(size_t len, Sink & sink); void writeInt(unsigned int n, Sink & sink); void writeLongLong(unsigned long long n, Sink & sink); +void writeString(const unsigned char * buf, size_t len, Sink & sink); void writeString(const string & s, Sink & sink); void writeStringSet(const StringSet & ss, Sink & sink); void readPadding(size_t len, Source & source); unsigned int readInt(Source & source); unsigned long long readLongLong(Source & source); +size_t readString(unsigned char * buf, size_t max, Source & source); string readString(Source & source); StringSet readStringSet(Source & source); diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 695e4c38d..85e2105b2 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -56,7 +56,7 @@ static void tunnelStderr(const unsigned char * buf, size_t count) if (canSendStderr && myPid == getpid()) { try { writeInt(STDERR_NEXT, to); - writeString(string((char *) buf, count), to); + writeString(buf, count, to); to.flush(); } catch (...) { /* Write failed; that means that the other side is @@ -205,7 +205,7 @@ struct TunnelSink : Sink virtual void operator () (const unsigned char * data, size_t len) { writeInt(STDERR_WRITE, to); - writeString(string((const char *) data, len), to); + writeString(data, len, to); } }; @@ -224,16 +224,11 @@ struct TunnelSource : BufferedSource writeInt(STDERR_READ, to); writeInt(len, to); to.flush(); - string s = readString(from); // !!! inefficient + size_t n = readString(data, len, from); startWork(); - - if (s.empty()) throw EndOfFile("unexpected end-of-file"); - if (s.size() > len) throw Error("client sent too much data"); - - memcpy(data, (const unsigned char *) s.c_str(), s.size()); - - return s.size(); + if (n == 0) throw EndOfFile("unexpected end-of-file"); + return n; } }; From 273b288a7e862ac1918064537ff130cc751fa9fd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 16 Dec 2011 22:31:25 +0000 Subject: [PATCH 46/46] * importPath() -> importPaths(). Because of buffering of the input stream it's now necessary for the daemon to process the entire sequence of exported paths, rather than letting the client do it. --- src/libstore/local-store.cc | 17 ++++++++++++-- src/libstore/local-store.hh | 4 +++- src/libstore/remote-store.cc | 41 +++++++++++++++++---------------- src/libstore/remote-store.hh | 2 +- src/libstore/store-api.hh | 6 ++--- src/libstore/worker-protocol.hh | 4 ++-- src/libutil/serialise.cc | 16 +++++++++---- src/libutil/serialise.hh | 4 ++-- src/nix-store/nix-store.cc | 10 ++++---- src/nix-worker/nix-worker.cc | 28 +++++++++++----------- 10 files changed, 75 insertions(+), 57 deletions(-) diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 65b1cdbc8..cf0e2ad1b 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1156,7 +1156,7 @@ void LocalStore::exportPath(const Path & path, bool sign, PathSet references; queryReferences(path, references); - writeStringSet(references, hashAndWriteSink); + writeStrings(references, hashAndWriteSink); Path deriver = queryDeriver(path); writeString(deriver, hashAndWriteSink); @@ -1243,7 +1243,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source) Path dstPath = readStorePath(hashAndReadSource); - PathSet references = readStorePaths(hashAndReadSource); + PathSet references = readStorePaths(hashAndReadSource); Path deriver = readString(hashAndReadSource); if (deriver != "") assertStorePath(deriver); @@ -1330,6 +1330,19 @@ Path LocalStore::importPath(bool requireSignature, Source & source) } +Paths LocalStore::importPaths(bool requireSignature, Source & source) +{ + Paths res; + while (true) { + unsigned long long n = readLongLong(source); + if (n == 0) break; + if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'"); + res.push_back(importPath(requireSignature, source)); + } + return res; +} + + void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed, unsigned long long & blocksFreed) { diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 7ef01b264..4cb905f67 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -146,7 +146,7 @@ public: void exportPath(const Path & path, bool sign, Sink & sink); - Path importPath(bool requireSignature, Source & source); + Paths importPaths(bool requireSignature, Source & source); void buildDerivations(const PathSet & drvPaths); @@ -259,6 +259,8 @@ private: Path createTempDirInStore(); + Path importPath(bool requireSignature, Source & source); + void checkDerivationOutputs(const Path & drvPath, const Derivation & drv); }; diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index e976e8fa5..942c5bcf1 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -27,13 +27,15 @@ Path readStorePath(Source & from) } -PathSet readStorePaths(Source & from) +template T readStorePaths(Source & from) { - PathSet paths = readStringSet(from); - foreach (PathSet::iterator, i, paths) assertStorePath(*i); + T paths = readStrings(from); + foreach (typename T::iterator, i, paths) assertStorePath(*i); return paths; } +template PathSet readStorePaths(Source & from); + RemoteStore::RemoteStore() { @@ -215,7 +217,7 @@ PathSet RemoteStore::queryValidPaths() openConnection(); writeInt(wopQueryValidPaths, to); processStderr(); - return readStorePaths(from); + return readStorePaths(from); } @@ -242,7 +244,7 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path, if (reply == 0) return false; info.deriver = readString(from); if (info.deriver != "") assertStorePath(info.deriver); - info.references = readStorePaths(from); + info.references = readStorePaths(from); info.downloadSize = readLongLong(from); info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0; return true; @@ -260,7 +262,7 @@ ValidPathInfo RemoteStore::queryPathInfo(const Path & path) info.deriver = readString(from); if (info.deriver != "") assertStorePath(info.deriver); info.hash = parseHash(htSHA256, readString(from)); - info.references = readStorePaths(from); + info.references = readStorePaths(from); info.registrationTime = readInt(from); info.narSize = readLongLong(from); return info; @@ -285,7 +287,7 @@ void RemoteStore::queryReferences(const Path & path, writeInt(wopQueryReferences, to); writeString(path, to); processStderr(); - PathSet references2 = readStorePaths(from); + PathSet references2 = readStorePaths(from); references.insert(references2.begin(), references2.end()); } @@ -297,7 +299,7 @@ void RemoteStore::queryReferrers(const Path & path, writeInt(wopQueryReferrers, to); writeString(path, to); processStderr(); - PathSet referrers2 = readStorePaths(from); + PathSet referrers2 = readStorePaths(from); referrers.insert(referrers2.begin(), referrers2.end()); } @@ -320,7 +322,7 @@ PathSet RemoteStore::queryDerivationOutputs(const Path & path) writeInt(wopQueryDerivationOutputs, to); writeString(path, to); processStderr(); - return readStorePaths(from); + return readStorePaths(from); } @@ -350,7 +352,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, writeInt(wopAddTextToStore, to); writeString(name, to); writeString(s, to); - writeStringSet(references, to); + writeStrings(references, to); processStderr(); return readStorePath(from); @@ -369,14 +371,14 @@ void RemoteStore::exportPath(const Path & path, bool sign, } -Path RemoteStore::importPath(bool requireSignature, Source & source) +Paths RemoteStore::importPaths(bool requireSignature, Source & source) { openConnection(); - writeInt(wopImportPath, to); + writeInt(wopImportPaths, to); /* We ignore requireSignature, since the worker forces it to true anyway. */ processStderr(0, &source); - return readStorePath(from); + return readStorePaths(from); } @@ -384,7 +386,7 @@ void RemoteStore::buildDerivations(const PathSet & drvPaths) { openConnection(); writeInt(wopBuildDerivations, to); - writeStringSet(drvPaths, to); + writeStrings(drvPaths, to); processStderr(); readInt(from); } @@ -451,7 +453,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) writeInt(wopCollectGarbage, to); writeInt(options.action, to); - writeStringSet(options.pathsToDelete, to); + writeStrings(options.pathsToDelete, to); writeInt(options.ignoreLiveness, to); writeLongLong(options.maxFreed, to); writeInt(options.maxLinks, to); @@ -463,7 +465,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) processStderr(); - results.paths = readStringSet(from); + results.paths = readStrings(from); results.bytesFreed = readLongLong(from); results.blocksFreed = readLongLong(from); } @@ -474,7 +476,7 @@ PathSet RemoteStore::queryFailedPaths() openConnection(); writeInt(wopQueryFailedPaths, to); processStderr(); - return readStorePaths(from); + return readStorePaths(from); } @@ -482,7 +484,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths) { openConnection(); writeInt(wopClearFailedPaths, to); - writeStringSet(paths, to); + writeStrings(paths, to); processStderr(); readInt(from); } @@ -504,8 +506,7 @@ void RemoteStore::processStderr(Sink * sink, Source * source) size_t len = readInt(from); unsigned char * buf = new unsigned char[len]; AutoDeleteArray d(buf); - size_t n = source->read(buf, len); - writeString(string((const char *) buf, n), to); // !!! inefficient + writeString(buf, source->read(buf, len), to); to.flush(); } else { diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 519f46fd1..34a2d91df 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -56,7 +56,7 @@ public: void exportPath(const Path & path, bool sign, Sink & sink); - Path importPath(bool requireSignature, Source & source); + Paths importPaths(bool requireSignature, Source & source); void buildDerivations(const PathSet & drvPaths); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index e3a2c0daa..d4997c886 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -169,9 +169,9 @@ public: virtual void exportPath(const Path & path, bool sign, Sink & sink) = 0; - /* Import a NAR dump created by exportPath() into the Nix - store. */ - virtual Path importPath(bool requireSignature, Source & source) = 0; + /* Import a sequence of NAR dumps created by exportPaths() into + the Nix store. */ + virtual Paths importPaths(bool requireSignature, Source & source) = 0; /* Ensure that the output paths of the derivation are valid. If they are already valid, this is a no-op. Otherwise, validity diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index acabd6ca3..760d08a74 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -29,7 +29,6 @@ typedef enum { wopSyncWithGC = 13, wopFindRoots = 14, wopExportPath = 16, - wopImportPath = 17, wopQueryDeriver = 18, wopSetOptions = 19, wopCollectGarbage = 20, @@ -39,6 +38,7 @@ typedef enum { wopQueryFailedPaths = 24, wopClearFailedPaths = 25, wopQueryPathInfo = 26, + wopImportPaths = 27, } WorkerOp; @@ -58,7 +58,7 @@ typedef enum { Path readStorePath(Source & from); -PathSet readStorePaths(Source & from); +template T readStorePaths(Source & from); } diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index ba549c214..c4563ffd1 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -163,13 +163,16 @@ void writeString(const string & s, Sink & sink) } -void writeStringSet(const StringSet & ss, Sink & sink) +template void writeStrings(const T & ss, Sink & sink) { writeInt(ss.size(), sink); - for (StringSet::iterator i = ss.begin(); i != ss.end(); ++i) + foreach (typename T::const_iterator, i, ss) writeString(*i, sink); } +template void writeStrings(const Paths & ss, Sink & sink); +template void writeStrings(const PathSet & ss, Sink & sink); + void readPadding(size_t len, Source & source) { @@ -234,14 +237,17 @@ string readString(Source & source) } -StringSet readStringSet(Source & source) +template T readStrings(Source & source) { unsigned int count = readInt(source); - StringSet ss; + T ss; while (count--) - ss.insert(readString(source)); + ss.insert(ss.end(), readString(source)); return ss; } +template Paths readStrings(Source & source); +template PathSet readStrings(Source & source); + } diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index efd8e2a06..ded4b12a0 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -116,14 +116,14 @@ void writeInt(unsigned int n, Sink & sink); void writeLongLong(unsigned long long n, Sink & sink); void writeString(const unsigned char * buf, size_t len, Sink & sink); void writeString(const string & s, Sink & sink); -void writeStringSet(const StringSet & ss, Sink & sink); +template void writeStrings(const T & ss, Sink & sink); void readPadding(size_t len, Source & source); unsigned int readInt(Source & source); unsigned long long readLongLong(Source & source); size_t readString(unsigned char * buf, size_t max, Source & source); string readString(Source & source); -StringSet readStringSet(Source & source); +template T readStrings(Source & source); MakeError(SerialisationError, Error) diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 740033b45..e92ccb153 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -600,12 +600,10 @@ static void opImport(Strings opFlags, Strings opArgs) if (!opArgs.empty()) throw UsageError("no arguments expected"); FdSource source(STDIN_FILENO); - while (true) { - unsigned long long n = readLongLong(source); - if (n == 0) break; - if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'"); - cout << format("%1%\n") % store->importPath(requireSignature, source) << std::flush; - } + Paths paths = store->importPaths(requireSignature, source); + + foreach (Paths::iterator, i, paths) + cout << format("%1%\n") % *i << std::flush; } diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index 85e2105b2..5f57b2981 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -327,7 +327,7 @@ static void performOp(unsigned int clientVersion, store->queryReferrers(path, paths); else paths = store->queryDerivationOutputs(path); stopWork(); - writeStringSet(paths, to); + writeStrings(paths, to); break; } @@ -377,7 +377,7 @@ static void performOp(unsigned int clientVersion, case wopAddTextToStore: { string suffix = readString(from); string s = readString(from); - PathSet refs = readStorePaths(from); + PathSet refs = readStorePaths(from); startWork(); Path path = store->addTextToStore(suffix, s, refs); stopWork(); @@ -396,19 +396,17 @@ static void performOp(unsigned int clientVersion, break; } - case wopImportPath: { + case wopImportPaths: { startWork(); - if (GET_PROTOCOL_MINOR(clientVersion) < 9) - throw Error("import not supported; upgrade your client"); TunnelSource source(from); - Path path = store->importPath(true, source); + Paths paths = store->importPaths(true, source); stopWork(); - writeString(path, to); + writeStrings(paths, to); break; } case wopBuildDerivations: { - PathSet drvs = readStorePaths(from); + PathSet drvs = readStorePaths(from); startWork(); store->buildDerivations(drvs); stopWork(); @@ -466,7 +464,7 @@ static void performOp(unsigned int clientVersion, case wopCollectGarbage: { GCOptions options; options.action = (GCOptions::GCAction) readInt(from); - options.pathsToDelete = readStorePaths(from); + options.pathsToDelete = readStorePaths(from); options.ignoreLiveness = readInt(from); options.maxFreed = readLongLong(from); options.maxLinks = readInt(from); @@ -484,7 +482,7 @@ static void performOp(unsigned int clientVersion, store->collectGarbage(options, results); stopWork(); - writeStringSet(results.paths, to); + writeStrings(results.paths, to); writeLongLong(results.bytesFreed, to); writeLongLong(results.blocksFreed, to); @@ -522,7 +520,7 @@ static void performOp(unsigned int clientVersion, writeInt(res ? 1 : 0, to); if (res) { writeString(info.deriver, to); - writeStringSet(info.references, to); + writeStrings(info.references, to); writeLongLong(info.downloadSize, to); if (GET_PROTOCOL_MINOR(clientVersion) >= 7) writeLongLong(info.narSize, to); @@ -534,7 +532,7 @@ static void performOp(unsigned int clientVersion, startWork(); PathSet paths = store->queryValidPaths(); stopWork(); - writeStringSet(paths, to); + writeStrings(paths, to); break; } @@ -542,12 +540,12 @@ static void performOp(unsigned int clientVersion, startWork(); PathSet paths = store->queryFailedPaths(); stopWork(); - writeStringSet(paths, to); + writeStrings(paths, to); break; } case wopClearFailedPaths: { - PathSet paths = readStringSet(from); + PathSet paths = readStrings(from); startWork(); store->clearFailedPaths(paths); stopWork(); @@ -562,7 +560,7 @@ static void performOp(unsigned int clientVersion, stopWork(); writeString(info.deriver, to); writeString(printHash(info.hash), to); - writeStringSet(info.references, to); + writeStrings(info.references, to); writeInt(info.registrationTime, to); writeLongLong(info.narSize, to); break;