diff --git a/release.nix b/release.nix index 4f3e5fac0..91f3752d5 100644 --- a/release.nix +++ b/release.nix @@ -1,5 +1,5 @@ { nixpkgs ? , nixos ? -, nix ? { outPath = ../nix-export; rev = 1234; } +, nix ? { outPath = ./.; rev = 1234; } , officialRelease ? false }: @@ -98,7 +98,7 @@ let ]; configureFlags = '' - --disable-init-state --disable-shared + --disable-init-state --with-bzip2=${bzip2} --with-sqlite=${sqlite} --with-dbi=${perlPackages.DBI}/lib/perl5/site_perl --with-dbd-sqlite=${perlPackages.DBDSQLite}/lib/perl5/site_perl diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in index c805d6740..8bf77df04 100755 --- a/scripts/build-remote.pl.in +++ b/scripts/build-remote.pl.in @@ -1,6 +1,6 @@ #! @perl@ -w @perlFlags@ -use Fcntl ':flock'; +use Fcntl qw(:DEFAULT :flock); use English '-no_match_vars'; use IO::Handle; use Nix::Config; @@ -56,7 +56,7 @@ sub openSlotLock { my ($machine, $slot) = @_; my $slotLockFn = "$currentLoad/" . (join '+', @{$machine->{systemTypes}}) . "-" . $machine->{hostName} . "-$slot"; my $slotLock = new IO::Handle; - open $slotLock, ">>$slotLockFn" or die; + sysopen $slotLock, "$slotLockFn", O_RDWR|O_CREAT, 0600 or die; return $slotLock; } @@ -64,7 +64,7 @@ sub openSlotLock { # Read the list of machines. my @machines; if (defined $conf && -e $conf) { - open CONF, "< $conf" or die; + open CONF, "<$conf" or die; while () { chomp; s/\#.*$//g; @@ -104,7 +104,7 @@ REQ: while (1) { # Acquire the exclusive lock on $currentLoad/main-lock. mkdir $currentLoad, 0777 or die unless -d $currentLoad; my $mainLock = "$currentLoad/main-lock"; - open MAINLOCK, ">>$mainLock" or die; + sysopen MAINLOCK, "$mainLock", O_RDWR|O_CREAT, 0600 or die; flock(MAINLOCK, LOCK_EX) or die; @@ -225,8 +225,17 @@ sub removeRoots { } -# Copy the derivation and its dependencies to the build machine. +# Copy the derivation and its dependencies to the build machine. This +# is guarded by an exclusive lock per machine to prevent multiple +# build-remote instances from copying to a machine simultaneously. +# That's undesirable because we may end up with N instances uploading +# the same missing path simultaneously, causing the effective network +# bandwidth and target disk speed to be divided by N. +my $uploadLock = "$currentLoad/$hostName.upload-lock"; +sysopen MAINLOCK, "$uploadLock", O_RDWR|O_CREAT, 0600 or die; +flock(MAINLOCK, LOCK_EX) or die; Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne ""); +close MAINLOCK; # Perform the build. diff --git a/src/libstore/build.cc b/src/libstore/build.cc index a8ef9b23e..149cd8b09 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -1300,6 +1300,13 @@ void DerivationGoal::buildDone() being valid. */ computeClosure(); + /* It is now safe to delete the lock files, since all future + lockers will see that the output paths are valid; they will + not create new lock files with the same names as the old + (unlinked) lock files. */ + outputLocks.setDeletion(true); + outputLocks.unlock(); + } catch (BuildError & e) { printMsg(lvlError, e.msg()); outputLocks.unlock(); @@ -1987,13 +1994,6 @@ void DerivationGoal::computeClosure() infos.push_back(info); } worker.store.registerValidPaths(infos); - - /* It is now safe to delete the lock files, since all future - lockers will see that the output paths are valid; they will not - create new lock files with the same names as the old (unlinked) - lock files. */ - outputLocks.setDeletion(true); - outputLocks.unlock(); } diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index e79d93723..feaab573e 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -617,27 +617,51 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) } else { - printMsg(lvlError, format("reading the Nix store...")); - Paths entries = readDirectory(nixStore); - - /* Randomise the order in which we delete entries to make the - collector less biased towards deleting paths that come - alphabetically first (e.g. /nix/store/000...). This - matters when using --max-freed etc. */ - vector entries_(entries.begin(), entries.end()); - random_shuffle(entries_.begin(), entries_.end()); - if (shouldDelete(state.options.action)) printMsg(lvlError, format("deleting garbage...")); else printMsg(lvlError, format("determining live/dead paths...")); try { + + AutoCloseDir dir = opendir(nixStore.c_str()); + if (!dir) throw SysError(format("opening directory `%1%'") % nixStore); + + /* Read the store and immediately delete all paths that + aren't valid. When using --max-freed etc., deleting + invalid paths is preferred over deleting unreachable + paths, since unreachable paths could become reachable + again. We don't use readDirectory() here so that GCing + can start faster. */ + Paths entries; + struct dirent * dirent; + while (errno = 0, dirent = readdir(dir)) { + checkInterrupt(); + string name = dirent->d_name; + if (name == "." || name == "..") continue; + Path path = nixStore + "/" + name; + if (isValidPath(path)) + entries.push_back(path); + else + tryToDelete(state, path); + } + + dir.close(); + + /* Now delete the unreachable valid paths. Randomise the + order in which we delete entries to make the collector + less biased towards deleting paths that come + alphabetically first (e.g. /nix/store/000...). This + matters when using --max-freed etc. */ + vector entries_(entries.begin(), entries.end()); + random_shuffle(entries_.begin(), entries_.end()); + foreach (vector::iterator, i, entries_) - tryToDelete(state, canonPath(nixStore + "/" + *i)); + tryToDelete(state, *i); + } catch (GCLimitReached & e) { } - } + } } diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index d8290815c..645f4cd67 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -16,7 +16,7 @@ int openLockFile(const Path & path, bool create) { AutoCloseFD fd; - fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0666); + fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0600); if (fd == -1 && (create || errno != ENOENT)) throw SysError(format("opening lock file `%1%'") % path); diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 9adaac40d..0352754f5 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -701,7 +701,7 @@ AutoCloseDir::AutoCloseDir(DIR * dir) AutoCloseDir::~AutoCloseDir() { - if (dir) closedir(dir); + close(); } @@ -717,6 +717,14 @@ AutoCloseDir::operator DIR *() } +void AutoCloseDir::close() +{ + if (dir) { + closedir(dir); + dir = 0; + } +} + ////////////////////////////////////////////////////////////////////// diff --git a/src/libutil/util.hh b/src/libutil/util.hh index f86290f31..a1cf68e69 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -223,6 +223,7 @@ public: ~AutoCloseDir(); void operator =(DIR * dir); operator DIR *(); + void close(); }; diff --git a/tests/lang/eval-okay-search-path.nix~ b/tests/lang/eval-okay-search-path.nix~ deleted file mode 100644 index da52a6d39..000000000 --- a/tests/lang/eval-okay-search-path.nix~ +++ /dev/null @@ -1 +0,0 @@ -(import ) \ No newline at end of file diff --git a/tests/lang/eval-okay-search-path.out b/tests/lang/eval-okay-search-path.out deleted file mode 100644 index d0bc8c5e8..000000000 --- a/tests/lang/eval-okay-search-path.out +++ /dev/null @@ -1 +0,0 @@ -"abcc"