2006-11-30 17:43:04 +00:00
|
|
|
|
#include "local-store.hh"
|
2006-09-04 21:06:23 +00:00
|
|
|
|
#include "globals.hh"
|
|
|
|
|
#include "archive.hh"
|
|
|
|
|
#include "pathlocks.hh"
|
2007-02-21 15:45:32 +00:00
|
|
|
|
#include "worker-protocol.hh"
|
2010-02-22 11:15:50 +00:00
|
|
|
|
#include "derivations.hh"
|
2016-04-29 11:57:08 +00:00
|
|
|
|
#include "nar-info.hh"
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2003-06-23 13:27:59 +00:00
|
|
|
|
#include <iostream>
|
2003-12-22 16:40:46 +00:00
|
|
|
|
#include <algorithm>
|
2013-11-22 10:00:43 +00:00
|
|
|
|
#include <cstring>
|
2003-06-23 13:27:59 +00:00
|
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/stat.h>
|
2015-05-13 07:37:12 +00:00
|
|
|
|
#include <sys/select.h>
|
2012-07-23 20:52:25 +00:00
|
|
|
|
#include <sys/time.h>
|
2003-10-15 12:42:39 +00:00
|
|
|
|
#include <unistd.h>
|
2005-01-19 16:39:47 +00:00
|
|
|
|
#include <utime.h>
|
2008-06-09 13:52:45 +00:00
|
|
|
|
#include <fcntl.h>
|
2008-07-18 15:34:46 +00:00
|
|
|
|
#include <errno.h>
|
2009-09-24 07:39:55 +00:00
|
|
|
|
#include <stdio.h>
|
2010-12-17 17:23:15 +00:00
|
|
|
|
#include <time.h>
|
2014-05-02 12:31:15 +00:00
|
|
|
|
#include <grp.h>
|
2003-06-23 13:27:59 +00:00
|
|
|
|
|
2015-12-03 15:30:19 +00:00
|
|
|
|
#if __linux__
|
2012-09-19 19:45:29 +00:00
|
|
|
|
#include <sched.h>
|
2013-03-08 00:39:55 +00:00
|
|
|
|
#include <sys/statvfs.h>
|
2012-09-19 19:45:29 +00:00
|
|
|
|
#include <sys/mount.h>
|
2013-01-03 11:59:23 +00:00
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
#endif
|
|
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
|
#include <sqlite3.h>
|
|
|
|
|
|
2006-11-30 18:35:36 +00:00
|
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
|
namespace nix {
|
2003-06-23 13:27:59 +00:00
|
|
|
|
|
2010-12-05 18:23:19 +00:00
|
|
|
|
|
2016-06-01 12:49:12 +00:00
|
|
|
|
LocalStore::LocalStore(const Params & params)
|
2016-09-02 18:15:04 +00:00
|
|
|
|
: Store(params)
|
|
|
|
|
, LocalFSStore(params)
|
2016-07-27 15:47:18 +00:00
|
|
|
|
, realStoreDir(get(params, "real", rootDir != "" ? rootDir + "/nix/store" : storeDir))
|
2016-07-27 15:14:41 +00:00
|
|
|
|
, dbDir(stateDir + "/db")
|
2016-06-02 13:08:18 +00:00
|
|
|
|
, linksDir(realStoreDir + "/.links")
|
2016-06-02 11:33:49 +00:00
|
|
|
|
, reservedPath(dbDir + "/reserved")
|
|
|
|
|
, schemaPath(dbDir + "/schema")
|
2016-06-02 13:08:18 +00:00
|
|
|
|
, trashDir(realStoreDir + "/trash")
|
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
|
|
|
|
, requireSigs(trim(settings.signedBinaryCaches) != "") // FIXME: rename option
|
2016-05-30 11:55:09 +00:00
|
|
|
|
, publicKeys(getDefaultPublicKeys())
|
2003-10-15 12:42:39 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2009-09-23 17:05:51 +00:00
|
|
|
|
/* Create missing state directories if they don't already exist. */
|
2016-06-02 13:08:18 +00:00
|
|
|
|
createDirs(realStoreDir);
|
2012-09-25 20:30:08 +00:00
|
|
|
|
makeStoreWritable();
|
2016-04-08 16:07:13 +00:00
|
|
|
|
createDirs(linksDir);
|
2016-06-02 11:33:49 +00:00
|
|
|
|
Path profilesDir = stateDir + "/profiles";
|
2014-05-02 12:31:15 +00:00
|
|
|
|
createDirs(profilesDir);
|
2016-06-02 11:33:49 +00:00
|
|
|
|
createDirs(stateDir + "/temproots");
|
|
|
|
|
createDirs(dbDir);
|
|
|
|
|
Path gcRootsDir = stateDir + "/gcroots";
|
2009-09-23 17:05:51 +00:00
|
|
|
|
if (!pathExists(gcRootsDir)) {
|
|
|
|
|
createDirs(gcRootsDir);
|
2014-02-27 22:17:53 +00:00
|
|
|
|
createSymlink(profilesDir, gcRootsDir + "/profiles");
|
2009-09-23 17:05:51 +00:00
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2014-05-02 12:31:15 +00:00
|
|
|
|
/* Optionally, create directories and set permissions for a
|
|
|
|
|
multi-user install. */
|
|
|
|
|
if (getuid() == 0 && settings.buildUsersGroup != "") {
|
|
|
|
|
|
|
|
|
|
Path perUserDir = profilesDir + "/per-user";
|
|
|
|
|
createDirs(perUserDir);
|
2015-01-08 15:49:31 +00:00
|
|
|
|
if (chmod(perUserDir.c_str(), 01777) == -1)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("could not set permissions on ‘%1%’ to 1777") % perUserDir);
|
2014-05-02 12:31:15 +00:00
|
|
|
|
|
2015-04-07 11:21:26 +00:00
|
|
|
|
mode_t perm = 01775;
|
2015-01-08 15:49:31 +00:00
|
|
|
|
|
2014-05-02 12:31:15 +00:00
|
|
|
|
struct group * gr = getgrnam(settings.buildUsersGroup.c_str());
|
|
|
|
|
if (!gr)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("warning: the group ‘%1%’ specified in ‘build-users-group’ does not exist")
|
2014-05-02 12:31:15 +00:00
|
|
|
|
% settings.buildUsersGroup);
|
2014-12-29 12:18:48 +00:00
|
|
|
|
else {
|
|
|
|
|
struct stat st;
|
2016-06-02 13:08:18 +00:00
|
|
|
|
if (stat(realStoreDir.c_str(), &st))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("getting attributes of path ‘%1%’") % realStoreDir);
|
2014-12-29 12:18:48 +00:00
|
|
|
|
|
2015-01-08 15:39:07 +00:00
|
|
|
|
if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
|
2016-06-02 13:08:18 +00:00
|
|
|
|
if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("changing ownership of path ‘%1%’") % realStoreDir);
|
2016-06-02 13:08:18 +00:00
|
|
|
|
if (chmod(realStoreDir.c_str(), perm) == -1)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("changing permissions on path ‘%1%’") % realStoreDir);
|
2014-12-29 12:18:48 +00:00
|
|
|
|
}
|
2014-05-02 12:31:15 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-01 12:49:12 +00:00
|
|
|
|
/* Ensure that the store and its parents are not symlinks. */
|
|
|
|
|
if (getEnv("NIX_IGNORE_SYMLINK_STORE") != "1") {
|
2016-06-02 13:08:18 +00:00
|
|
|
|
Path path = realStoreDir;
|
2016-06-01 12:49:12 +00:00
|
|
|
|
struct stat st;
|
|
|
|
|
while (path != "/") {
|
|
|
|
|
if (lstat(path.c_str(), &st))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("getting status of ‘%1%’") % path);
|
2016-06-01 12:49:12 +00:00
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
|
throw Error(format(
|
2016-11-25 23:37:43 +00:00
|
|
|
|
"the path ‘%1%’ is a symlink; "
|
2016-06-01 12:49:12 +00:00
|
|
|
|
"this is not allowed for the Nix store and its parent directories")
|
|
|
|
|
% path);
|
|
|
|
|
path = dirOf(path);
|
|
|
|
|
}
|
|
|
|
|
}
|
2006-03-10 22:27:26 +00:00
|
|
|
|
|
2012-05-30 02:59:12 +00:00
|
|
|
|
/* We can't open a SQLite database if the disk is full. Since
|
|
|
|
|
this prevents the garbage collector from running when it's most
|
|
|
|
|
needed, we reserve some dummy space that we can free just
|
|
|
|
|
before doing a garbage collection. */
|
|
|
|
|
try {
|
2016-02-24 16:33:53 +00:00
|
|
|
|
struct stat st;
|
|
|
|
|
if (stat(reservedPath.c_str(), &st) == -1 ||
|
|
|
|
|
st.st_size != settings.reservedSize)
|
|
|
|
|
{
|
2016-06-09 14:15:58 +00:00
|
|
|
|
AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600);
|
2016-02-24 16:33:53 +00:00
|
|
|
|
int res = -1;
|
2015-06-22 13:54:55 +00:00
|
|
|
|
#if HAVE_POSIX_FALLOCATE
|
2016-07-11 19:44:44 +00:00
|
|
|
|
res = posix_fallocate(fd.get(), 0, settings.reservedSize);
|
2015-06-22 13:54:55 +00:00
|
|
|
|
#endif
|
2016-02-24 16:33:53 +00:00
|
|
|
|
if (res == -1) {
|
2016-07-11 19:44:44 +00:00
|
|
|
|
writeFull(fd.get(), string(settings.reservedSize, 'X'));
|
2016-10-12 13:53:38 +00:00
|
|
|
|
[[gnu::unused]] auto res2 = ftruncate(fd.get(), settings.reservedSize);
|
2015-06-22 13:54:55 +00:00
|
|
|
|
}
|
2012-05-30 02:59:12 +00:00
|
|
|
|
}
|
|
|
|
|
} catch (SysError & e) { /* don't care about errors */
|
|
|
|
|
}
|
|
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
|
/* Acquire the big fat lock in shared mode to make sure that no
|
|
|
|
|
schema upgrade is in progress. */
|
2016-06-03 14:04:27 +00:00
|
|
|
|
Path globalLockPath = dbDir + "/big-lock";
|
|
|
|
|
globalLock = openLockFile(globalLockPath.c_str(), true);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-07-11 19:44:44 +00:00
|
|
|
|
if (!lockFile(globalLock.get(), ltRead, false)) {
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError("waiting for the big Nix store lock...");
|
2016-07-11 19:44:44 +00:00
|
|
|
|
lockFile(globalLock.get(), ltRead, true);
|
2006-02-16 13:19:15 +00:00
|
|
|
|
}
|
2010-02-18 14:30:42 +00:00
|
|
|
|
|
|
|
|
|
/* Check the current database schema and if necessary do an
|
2010-02-24 16:30:20 +00:00
|
|
|
|
upgrade. */
|
2008-06-09 13:52:45 +00:00
|
|
|
|
int curSchema = getSchema();
|
2005-02-09 09:50:29 +00:00
|
|
|
|
if (curSchema > nixSchemaVersion)
|
|
|
|
|
throw Error(format("current Nix store schema is version %1%, but I only support %2%")
|
|
|
|
|
% curSchema % nixSchemaVersion);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
|
else if (curSchema == 0) { /* new store */
|
2010-02-18 14:30:42 +00:00
|
|
|
|
curSchema = nixSchemaVersion;
|
2016-04-08 16:07:13 +00:00
|
|
|
|
openDB(*state, true);
|
2010-02-24 16:30:20 +00:00
|
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
|
else if (curSchema < nixSchemaVersion) {
|
|
|
|
|
if (curSchema < 5)
|
|
|
|
|
throw Error(
|
|
|
|
|
"Your Nix store has a database in Berkeley DB format,\n"
|
|
|
|
|
"which is no longer supported. To convert to the new format,\n"
|
|
|
|
|
"please upgrade Nix to version 0.12 first.");
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-03-21 14:09:03 +00:00
|
|
|
|
if (curSchema < 6)
|
|
|
|
|
throw Error(
|
|
|
|
|
"Your Nix store has a database in flat file format,\n"
|
|
|
|
|
"which is no longer supported. To convert to the new format,\n"
|
|
|
|
|
"please upgrade Nix to version 1.11 first.");
|
|
|
|
|
|
2016-07-11 19:44:44 +00:00
|
|
|
|
if (!lockFile(globalLock.get(), ltWrite, false)) {
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError("waiting for exclusive access to the Nix store...");
|
2016-07-11 19:44:44 +00:00
|
|
|
|
lockFile(globalLock.get(), ltWrite, true);
|
2010-02-24 16:30:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Get the schema version again, because another process may
|
|
|
|
|
have performed the upgrade already. */
|
|
|
|
|
curSchema = getSchema();
|
|
|
|
|
|
2016-03-30 15:35:48 +00:00
|
|
|
|
if (curSchema < 7) { upgradeStore7(); }
|
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
openDB(*state, false);
|
2016-03-30 15:35:48 +00:00
|
|
|
|
|
|
|
|
|
if (curSchema < 8) {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
SQLiteTxn txn(state->db);
|
2016-08-09 12:27:30 +00:00
|
|
|
|
state->db.exec("alter table ValidPaths add column ultimate integer");
|
|
|
|
|
state->db.exec("alter table ValidPaths add column sigs text");
|
2016-03-30 15:35:48 +00:00
|
|
|
|
txn.commit();
|
|
|
|
|
}
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
2016-04-08 16:16:53 +00:00
|
|
|
|
if (curSchema < 9) {
|
|
|
|
|
SQLiteTxn txn(state->db);
|
2016-08-09 12:27:30 +00:00
|
|
|
|
state->db.exec("drop table FailedPaths");
|
2016-04-08 16:16:53 +00:00
|
|
|
|
txn.commit();
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-03 11:17:11 +00:00
|
|
|
|
if (curSchema < 10) {
|
|
|
|
|
SQLiteTxn txn(state->db);
|
2016-08-09 12:27:30 +00:00
|
|
|
|
state->db.exec("alter table ValidPaths add column ca text");
|
2016-08-03 11:17:11 +00:00
|
|
|
|
txn.commit();
|
|
|
|
|
}
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
2016-07-11 19:44:44 +00:00
|
|
|
|
lockFile(globalLock.get(), ltRead, true);
|
2005-02-09 09:50:29 +00:00
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
else openDB(*state, false);
|
2016-06-03 14:04:27 +00:00
|
|
|
|
|
|
|
|
|
/* Prepare SQL statements. */
|
|
|
|
|
state->stmtRegisterValidPath.create(state->db,
|
2016-08-03 11:17:11 +00:00
|
|
|
|
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
|
2016-06-03 14:04:27 +00:00
|
|
|
|
state->stmtUpdatePathInfo.create(state->db,
|
2016-08-03 11:17:11 +00:00
|
|
|
|
"update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
|
2016-06-03 14:04:27 +00:00
|
|
|
|
state->stmtAddReference.create(state->db,
|
|
|
|
|
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
|
|
|
|
state->stmtQueryPathInfo.create(state->db,
|
2016-08-03 11:17:11 +00:00
|
|
|
|
"select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
|
2016-06-03 14:04:27 +00:00
|
|
|
|
state->stmtQueryReferences.create(state->db,
|
|
|
|
|
"select path from Refs join ValidPaths on reference = id where referrer = ?;");
|
|
|
|
|
state->stmtQueryReferrers.create(state->db,
|
|
|
|
|
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
|
|
|
|
state->stmtInvalidatePath.create(state->db,
|
|
|
|
|
"delete from ValidPaths where path = ?;");
|
|
|
|
|
state->stmtAddDerivationOutput.create(state->db,
|
|
|
|
|
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
|
|
|
|
state->stmtQueryValidDerivers.create(state->db,
|
|
|
|
|
"select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
|
|
|
|
|
state->stmtQueryDerivationOutputs.create(state->db,
|
|
|
|
|
"select id, path from DerivationOutputs where drv = ?;");
|
|
|
|
|
// Use "path >= ?" with limit 1 rather than "path like '?%'" to
|
|
|
|
|
// ensure efficient lookup.
|
|
|
|
|
state->stmtQueryPathFromHashPart.create(state->db,
|
|
|
|
|
"select path from ValidPaths where path >= ? limit 1;");
|
|
|
|
|
state->stmtQueryValidPaths.create(state->db, "select path from ValidPaths");
|
2003-10-15 12:42:39 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-11-30 17:43:04 +00:00
|
|
|
|
LocalStore::~LocalStore()
|
2006-03-01 16:36:35 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
2014-11-19 16:07:29 +00:00
|
|
|
|
try {
|
2016-07-11 19:44:44 +00:00
|
|
|
|
if (state->fdTempRoots) {
|
|
|
|
|
state->fdTempRoots = -1;
|
2016-04-08 16:07:13 +00:00
|
|
|
|
unlink(state->fnTempRoots.c_str());
|
2014-11-19 16:07:29 +00:00
|
|
|
|
}
|
|
|
|
|
} catch (...) {
|
|
|
|
|
ignoreException();
|
|
|
|
|
}
|
2006-03-01 16:36:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-29 14:26:16 +00:00
|
|
|
|
std::string LocalStore::getUri()
|
|
|
|
|
{
|
|
|
|
|
return "local";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
|
int LocalStore::getSchema()
|
2003-10-15 12:42:39 +00:00
|
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
|
int curSchema = 0;
|
|
|
|
|
if (pathExists(schemaPath)) {
|
|
|
|
|
string s = readFile(schemaPath);
|
|
|
|
|
if (!string2Int(s, curSchema))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("‘%1%’ is corrupt") % schemaPath);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
}
|
|
|
|
|
return curSchema;
|
2003-10-15 12:42:39 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
void LocalStore::openDB(State & state, bool create)
|
2010-02-18 13:16:59 +00:00
|
|
|
|
{
|
2016-06-02 11:33:49 +00:00
|
|
|
|
if (access(dbDir.c_str(), R_OK | W_OK))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("Nix database directory ‘%1%’ is not writable") % dbDir);
|
2014-02-01 15:57:38 +00:00
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
|
/* Open the Nix database. */
|
2016-06-02 11:33:49 +00:00
|
|
|
|
string dbPath = dbDir + "/db.sqlite";
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto & db(state.db);
|
2014-02-01 15:57:38 +00:00
|
|
|
|
if (sqlite3_open_v2(dbPath.c_str(), &db.db,
|
2010-02-24 16:30:20 +00:00
|
|
|
|
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("cannot open Nix database ‘%1%’") % dbPath);
|
2010-02-19 16:04:51 +00:00
|
|
|
|
|
2010-03-08 10:35:45 +00:00
|
|
|
|
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
|
throwSQLiteError(db, "setting timeout");
|
2010-02-18 14:30:42 +00:00
|
|
|
|
|
2016-08-09 12:27:30 +00:00
|
|
|
|
db.exec("pragma foreign_keys = 1");
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
|
|
|
|
/* !!! check whether sqlite has been built with foreign key
|
|
|
|
|
support */
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
|
/* Whether SQLite should fsync(). "Normal" synchronous mode
|
|
|
|
|
should be safe enough. If the user asks for it, don't sync at
|
|
|
|
|
all. This can cause database corruption if the system
|
|
|
|
|
crashes. */
|
2012-07-30 23:55:41 +00:00
|
|
|
|
string syncMode = settings.fsyncMetadata ? "normal" : "off";
|
2016-08-09 12:27:30 +00:00
|
|
|
|
db.exec("pragma synchronous = " + syncMode);
|
2010-02-18 14:30:42 +00:00
|
|
|
|
|
2011-11-07 21:11:59 +00:00
|
|
|
|
/* Set the SQLite journal mode. WAL mode is fastest, so it's the
|
|
|
|
|
default. */
|
2012-07-30 23:55:41 +00:00
|
|
|
|
string mode = settings.useSQLiteWAL ? "wal" : "truncate";
|
2010-09-01 11:36:22 +00:00
|
|
|
|
string prevMode;
|
|
|
|
|
{
|
|
|
|
|
SQLiteStmt stmt;
|
|
|
|
|
stmt.create(db, "pragma main.journal_mode;");
|
|
|
|
|
if (sqlite3_step(stmt) != SQLITE_ROW)
|
2010-12-05 18:23:19 +00:00
|
|
|
|
throwSQLiteError(db, "querying journal mode");
|
2010-09-01 11:36:22 +00:00
|
|
|
|
prevMode = string((const char *) sqlite3_column_text(stmt, 0));
|
|
|
|
|
}
|
|
|
|
|
if (prevMode != mode &&
|
|
|
|
|
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
|
throwSQLiteError(db, "setting journal mode");
|
2010-02-18 14:30:42 +00:00
|
|
|
|
|
2013-06-20 14:01:33 +00:00
|
|
|
|
/* Increase the auto-checkpoint interval to 40000 pages. This
|
2010-08-04 17:35:59 +00:00
|
|
|
|
seems enough to ensure that instantiating the NixOS system
|
|
|
|
|
derivation is done in a single fsync(). */
|
2013-06-20 14:01:33 +00:00
|
|
|
|
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
|
throwSQLiteError(db, "setting autocheckpoint interval");
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
|
/* Initialise the database schema, if necessary. */
|
|
|
|
|
if (create) {
|
2012-10-03 20:54:40 +00:00
|
|
|
|
const char * schema =
|
2010-02-24 16:30:20 +00:00
|
|
|
|
#include "schema.sql.hh"
|
2012-10-03 20:54:40 +00:00
|
|
|
|
;
|
2016-08-09 12:27:30 +00:00
|
|
|
|
db.exec(schema);
|
2010-02-24 16:30:20 +00:00
|
|
|
|
}
|
2010-02-18 13:16:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-09-19 19:45:29 +00:00
|
|
|
|
/* To improve purity, users may want to make the Nix store a read-only
|
|
|
|
|
bind mount. So make the Nix store writable for this process. */
|
|
|
|
|
void LocalStore::makeStoreWritable()
|
|
|
|
|
{
|
2015-12-03 15:30:19 +00:00
|
|
|
|
#if __linux__
|
2012-09-19 19:45:29 +00:00
|
|
|
|
if (getuid() != 0) return;
|
makeStoreWritable: Ask forgiveness, not permission
It is surprisingly impossible to check if a mountpoint is a bind mount
on Linux, and in my previous commit I forgot to check if /nix/store was
even a mountpoint at all. statvfs.f_flag is not populated with MS_BIND
(and even if it were, my check was wrong in the previous commit).
Luckily, the semantics of mount with MS_REMOUNT | MS_BIND make both
checks unnecessary: if /nix/store is not a mountpoint, then mount will
fail with EINVAL, and if /nix/store is not a bind-mount, then it will
not be made writable. Thus, if /nix/store is not a mountpoint, we fail
immediately (since we don't know how to make it writable), and if
/nix/store IS a mountpoint but not a bind-mount, we fail at first write
(see below for why we can't check and fail immediately).
Note that, due to what is IMO buggy behavior in Linux, calling mount
with MS_REMOUNT | MS_BIND on a non-bind readonly mount makes the
mountpoint appear writable in two places: In the sixth (but not the
10th!) column of mountinfo, and in the f_flags member of struct statfs.
All other syscalls behave as if the mount point were still readonly (at
least for Linux 3.9-rc1, but I don't think this has changed recently or
is expected to soon). My preferred semantics would be for MS_REMOUNT |
MS_BIND to fail on a non-bind mount, as it doesn't make sense to remount
a non bind-mount as a bind mount.
2013-03-08 03:53:49 +00:00
|
|
|
|
/* Check if /nix/store is on a read-only mount. */
|
2013-03-08 00:39:55 +00:00
|
|
|
|
struct statvfs stat;
|
2016-06-02 13:08:18 +00:00
|
|
|
|
if (statvfs(realStoreDir.c_str(), &stat) != 0)
|
2014-02-17 13:15:56 +00:00
|
|
|
|
throw SysError("getting info about the Nix store mount point");
|
2012-09-19 19:45:29 +00:00
|
|
|
|
|
makeStoreWritable: Ask forgiveness, not permission
It is surprisingly impossible to check if a mountpoint is a bind mount
on Linux, and in my previous commit I forgot to check if /nix/store was
even a mountpoint at all. statvfs.f_flag is not populated with MS_BIND
(and even if it were, my check was wrong in the previous commit).
Luckily, the semantics of mount with MS_REMOUNT | MS_BIND make both
checks unnecessary: if /nix/store is not a mountpoint, then mount will
fail with EINVAL, and if /nix/store is not a bind-mount, then it will
not be made writable. Thus, if /nix/store is not a mountpoint, we fail
immediately (since we don't know how to make it writable), and if
/nix/store IS a mountpoint but not a bind-mount, we fail at first write
(see below for why we can't check and fail immediately).
Note that, due to what is IMO buggy behavior in Linux, calling mount
with MS_REMOUNT | MS_BIND on a non-bind readonly mount makes the
mountpoint appear writable in two places: In the sixth (but not the
10th!) column of mountinfo, and in the f_flags member of struct statfs.
All other syscalls behave as if the mount point were still readonly (at
least for Linux 3.9-rc1, but I don't think this has changed recently or
is expected to soon). My preferred semantics would be for MS_REMOUNT |
MS_BIND to fail on a non-bind mount, as it doesn't make sense to remount
a non bind-mount as a bind mount.
2013-03-08 03:53:49 +00:00
|
|
|
|
if (stat.f_flag & ST_RDONLY) {
|
2013-03-08 00:39:55 +00:00
|
|
|
|
if (unshare(CLONE_NEWNS) == -1)
|
|
|
|
|
throw SysError("setting up a private mount namespace");
|
2012-09-19 19:45:29 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
|
|
|
|
|
throw SysError(format("remounting %1% writable") % realStoreDir);
|
2013-03-08 00:39:55 +00:00
|
|
|
|
}
|
2012-09-19 19:45:29 +00:00
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-02-15 00:00:30 +00:00
|
|
|
|
const time_t mtimeStore = 1; /* 1 second into the epoch */
|
|
|
|
|
|
|
|
|
|
|
2013-03-08 00:24:59 +00:00
|
|
|
|
static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
|
2005-01-19 16:39:47 +00:00
|
|
|
|
{
|
|
|
|
|
if (!S_ISLNK(st.st_mode)) {
|
|
|
|
|
|
|
|
|
|
/* Mask out all type related bits. */
|
|
|
|
|
mode_t mode = st.st_mode & ~S_IFMT;
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
|
if (mode != 0444 && mode != 0555) {
|
|
|
|
|
mode = (st.st_mode & S_IFMT)
|
|
|
|
|
| 0444
|
|
|
|
|
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
|
|
|
|
if (chmod(path.c_str(), mode) == -1)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("changing mode of ‘%1%’ to %2$o") % path % mode);
|
2005-01-19 16:39:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-07-23 20:52:25 +00:00
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2012-07-23 20:52:25 +00:00
|
|
|
|
if (st.st_mtime != mtimeStore) {
|
|
|
|
|
struct timeval times[2];
|
|
|
|
|
times[0].tv_sec = st.st_atime;
|
|
|
|
|
times[0].tv_usec = 0;
|
|
|
|
|
times[1].tv_sec = mtimeStore;
|
|
|
|
|
times[1].tv_usec = 0;
|
|
|
|
|
#if HAVE_LUTIMES
|
|
|
|
|
if (lutimes(path.c_str(), times) == -1)
|
2012-11-06 04:00:21 +00:00
|
|
|
|
if (errno != ENOSYS ||
|
|
|
|
|
(!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
|
2012-07-23 20:52:25 +00:00
|
|
|
|
#else
|
|
|
|
|
if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
|
2012-07-30 19:42:18 +00:00
|
|
|
|
#endif
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("changing modification time of ‘%1%’") % path);
|
2005-01-19 16:39:47 +00:00
|
|
|
|
}
|
2013-03-08 00:24:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void canonicaliseTimestampAndPermissions(const Path & path)
|
|
|
|
|
{
|
|
|
|
|
struct stat st;
|
|
|
|
|
if (lstat(path.c_str(), &st))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("getting attributes of path ‘%1%’") % path);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
canonicaliseTimestampAndPermissions(path, st);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
|
|
|
|
|
{
|
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
|
|
struct stat st;
|
|
|
|
|
if (lstat(path.c_str(), &st))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("getting attributes of path ‘%1%’") % path);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
|
2014-10-14 08:51:19 +00:00
|
|
|
|
/* Really make sure that the path is of a supported type. */
|
|
|
|
|
if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("file ‘%1%’ has an unsupported type") % path);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
|
|
|
|
|
/* Fail if the file is not owned by the build user. This prevents
|
|
|
|
|
us from messing up the ownership/permissions of files
|
|
|
|
|
hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
|
|
|
|
|
However, ignore files that we chown'ed ourselves previously to
|
|
|
|
|
ensure that we don't fail on hard links within the same build
|
|
|
|
|
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
|
|
|
|
|
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
|
|
|
|
|
assert(!S_ISDIR(st.st_mode));
|
|
|
|
|
if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end())
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw BuildError(format("invalid ownership on file ‘%1%’") % path);
|
2013-03-08 00:24:59 +00:00
|
|
|
|
mode_t mode = st.st_mode & ~S_IFMT;
|
|
|
|
|
assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inodesSeen.insert(Inode(st.st_dev, st.st_ino));
|
|
|
|
|
|
|
|
|
|
canonicaliseTimestampAndPermissions(path, st);
|
|
|
|
|
|
|
|
|
|
/* Change ownership to the current uid. If it's a symlink, use
|
|
|
|
|
lchown if available, otherwise don't bother. Wrong ownership
|
|
|
|
|
of a symlink doesn't matter, since the owning user can't change
|
|
|
|
|
the symlink and can't delete it because the directory is not
|
|
|
|
|
writable. The only exception is top-level paths in the Nix
|
|
|
|
|
store (since that directory is group-writable for the Nix build
|
|
|
|
|
users group); we check for this case below. */
|
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
|
#if HAVE_LCHOWN
|
2016-01-12 16:27:40 +00:00
|
|
|
|
if (lchown(path.c_str(), geteuid(), getegid()) == -1)
|
2013-03-08 00:24:59 +00:00
|
|
|
|
#else
|
|
|
|
|
if (!S_ISLNK(st.st_mode) &&
|
2016-01-12 16:27:40 +00:00
|
|
|
|
chown(path.c_str(), geteuid(), getegid()) == -1)
|
2013-03-08 00:24:59 +00:00
|
|
|
|
#endif
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("changing owner of ‘%1%’ to %2%")
|
2013-03-08 00:24:59 +00:00
|
|
|
|
% path % geteuid());
|
|
|
|
|
}
|
2005-01-19 16:39:47 +00:00
|
|
|
|
|
2013-03-08 00:24:59 +00:00
|
|
|
|
if (S_ISDIR(st.st_mode)) {
|
2014-08-01 14:37:47 +00:00
|
|
|
|
DirEntries entries = readDirectory(path);
|
|
|
|
|
for (auto & i : entries)
|
|
|
|
|
canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
|
2006-12-09 20:02:27 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-06-13 15:29:56 +00:00
|
|
|
|
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
|
2006-12-09 20:02:27 +00:00
|
|
|
|
{
|
2013-03-08 00:24:59 +00:00
|
|
|
|
canonicalisePathMetaData_(path, fromUid, inodesSeen);
|
2006-12-09 20:02:27 +00:00
|
|
|
|
|
|
|
|
|
/* On platforms that don't have lchown(), the top-level path can't
|
|
|
|
|
be a symlink, since we can't change its ownership. */
|
|
|
|
|
struct stat st;
|
|
|
|
|
if (lstat(path.c_str(), &st))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("getting attributes of path ‘%1%’") % path);
|
2006-12-09 20:02:27 +00:00
|
|
|
|
|
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
|
assert(S_ISLNK(st.st_mode));
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("wrong ownership of top-level store path ‘%1%’") % path);
|
2005-01-19 16:39:47 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-06-13 15:29:56 +00:00
|
|
|
|
void canonicalisePathMetaData(const Path & path, uid_t fromUid)
|
|
|
|
|
{
|
|
|
|
|
InodesSeen inodesSeen;
|
|
|
|
|
canonicalisePathMetaData(path, fromUid, inodesSeen);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-07-20 18:10:47 +00:00
|
|
|
|
void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
|
|
|
|
|
{
|
|
|
|
|
string drvName = storePathToName(drvPath);
|
|
|
|
|
assert(isDerivation(drvName));
|
|
|
|
|
drvName = string(drvName, 0, drvName.size() - drvExtension.size());
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
|
|
|
|
if (drv.isFixedOutput()) {
|
2011-07-20 18:10:47 +00:00
|
|
|
|
DerivationOutputs::const_iterator out = drv.outputs.find("out");
|
|
|
|
|
if (out == drv.outputs.end())
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath);
|
2011-07-20 18:10:47 +00:00
|
|
|
|
|
2016-07-26 19:25:52 +00:00
|
|
|
|
bool recursive; Hash h;
|
|
|
|
|
out->second.parseHashInfo(recursive, h);
|
|
|
|
|
Path outPath = makeFixedOutputPath(recursive, h, drvName);
|
2011-07-20 18:10:47 +00:00
|
|
|
|
|
|
|
|
|
StringPairs::const_iterator j = drv.env.find("out");
|
|
|
|
|
if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
|
2011-07-20 18:10:47 +00:00
|
|
|
|
% drvPath % out->second.path % outPath);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
else {
|
|
|
|
|
Derivation drvCopy(drv);
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : drvCopy.outputs) {
|
|
|
|
|
i.second.path = "";
|
|
|
|
|
drvCopy.env[i.first] = "";
|
2011-07-20 18:10:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-31 21:11:50 +00:00
|
|
|
|
Hash h = hashDerivationModulo(*this, drvCopy);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : drv.outputs) {
|
|
|
|
|
Path outPath = makeOutputPath(i.first, h, drvName);
|
|
|
|
|
StringPairs::const_iterator j = drv.env.find(i.first);
|
|
|
|
|
if (i.second.path != outPath || j == drv.env.end() || j->second != outPath)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
|
2015-07-17 17:24:28 +00:00
|
|
|
|
% drvPath % i.second.path % outPath);
|
2011-07-20 18:10:47 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
uint64_t LocalStore::addValidPath(State & state,
|
|
|
|
|
const ValidPathInfo & info, bool checkOutputs)
|
2003-10-10 14:46:28 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
state.stmtRegisterValidPath.use()
|
2016-03-30 13:50:45 +00:00
|
|
|
|
(info.path)
|
|
|
|
|
("sha256:" + printHash(info.narHash))
|
|
|
|
|
(info.registrationTime == 0 ? time(0) : info.registrationTime)
|
|
|
|
|
(info.deriver, info.deriver != "")
|
|
|
|
|
(info.narSize, info.narSize != 0)
|
2016-03-30 15:35:48 +00:00
|
|
|
|
(info.ultimate ? 1 : 0, info.ultimate)
|
2016-04-07 12:14:06 +00:00
|
|
|
|
(concatStringsSep(" ", info.sigs), !info.sigs.empty())
|
2016-08-03 11:17:11 +00:00
|
|
|
|
(info.ca, !info.ca.empty())
|
2016-03-30 13:50:45 +00:00
|
|
|
|
.exec();
|
2016-04-08 16:07:13 +00:00
|
|
|
|
uint64_t id = sqlite3_last_insert_rowid(state.db);
|
2010-02-22 11:15:50 +00:00
|
|
|
|
|
|
|
|
|
/* If this is a derivation, then store the derivation outputs in
|
|
|
|
|
the database. This is useful for the garbage collector: it can
|
|
|
|
|
efficiently query whether a path is an output of some
|
|
|
|
|
derivation. */
|
|
|
|
|
if (isDerivation(info.path)) {
|
2016-06-03 11:37:52 +00:00
|
|
|
|
Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(info.path));
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2011-07-20 18:10:47 +00:00
|
|
|
|
/* Verify that the output paths in the derivation are correct
|
|
|
|
|
(i.e., follow the scheme for computing output paths from
|
|
|
|
|
derivations). Note that if this throws an error, then the
|
|
|
|
|
DB transaction is rolled back, so the path validity
|
|
|
|
|
registration above is undone. */
|
2011-09-12 09:07:43 +00:00
|
|
|
|
if (checkOutputs) checkDerivationOutputs(info.path, drv);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : drv.outputs) {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
state.stmtAddDerivationOutput.use()
|
2016-03-30 13:50:45 +00:00
|
|
|
|
(id)
|
|
|
|
|
(i.first)
|
|
|
|
|
(i.second.path)
|
|
|
|
|
.exec();
|
2010-02-22 11:15:50 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
{
|
|
|
|
|
auto state_(Store::state.lock());
|
2016-04-21 15:53:47 +00:00
|
|
|
|
state_->pathInfoCache.upsert(storePathToHash(info.path), std::make_shared<ValidPathInfo>(info));
|
2016-04-19 16:50:15 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-02-22 11:15:50 +00:00
|
|
|
|
return id;
|
2010-02-19 16:04:51 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
|
|
|
|
Hash parseHashField(const Path & path, const string & s)
|
2003-10-10 15:25:21 +00:00
|
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
|
string::size_type colon = s.find(':');
|
|
|
|
|
if (colon == string::npos)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("corrupt hash ‘%1%’ in valid-path entry for ‘%2%’")
|
2008-06-09 13:52:45 +00:00
|
|
|
|
% s % path);
|
|
|
|
|
HashType ht = parseHashType(string(s, 0, colon));
|
|
|
|
|
if (ht == htUnknown)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("unknown hash type ‘%1%’ in valid-path entry for ‘%2%’")
|
2008-06-09 13:52:45 +00:00
|
|
|
|
% string(s, 0, colon) % path);
|
|
|
|
|
return parseHash(ht, string(s, colon + 1));
|
2003-10-10 15:25:21 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
void LocalStore::queryPathInfoUncached(const Path & path,
|
|
|
|
|
std::function<void(std::shared_ptr<ValidPathInfo>)> success,
|
|
|
|
|
std::function<void(std::exception_ptr exc)> failure)
|
2006-11-30 17:43:04 +00:00
|
|
|
|
{
|
2016-09-16 16:54:14 +00:00
|
|
|
|
sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
auto info = std::make_shared<ValidPathInfo>();
|
|
|
|
|
info->path = path;
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
assertStorePath(path);
|
2010-02-19 16:43:25 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
|
|
|
|
|
auto state(_state.lock());
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
/* Get the path info. */
|
|
|
|
|
auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
|
2010-01-29 11:53:58 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
if (!useQueryPathInfo.next())
|
|
|
|
|
return std::shared_ptr<ValidPathInfo>();
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
info->id = useQueryPathInfo.getInt(0);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
info->narHash = parseHashField(path, useQueryPathInfo.getStr(1));
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
info->registrationTime = useQueryPathInfo.getInt(2);
|
2010-11-17 12:40:52 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
|
|
|
|
|
if (s) info->deriver = s;
|
2010-02-18 15:52:57 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
/* Note that narSize = NULL yields 0. */
|
|
|
|
|
info->narSize = useQueryPathInfo.getInt(4);
|
2016-03-30 15:35:48 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
info->ultimate = useQueryPathInfo.getInt(5) == 1;
|
2016-03-30 15:35:48 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
|
|
|
|
|
if (s) info->sigs = tokenizeString<StringSet>(s, " ");
|
2016-08-03 11:17:11 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 7);
|
|
|
|
|
if (s) info->ca = s;
|
2010-02-18 15:52:57 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
/* Get the references. */
|
|
|
|
|
auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
|
2013-10-16 13:58:20 +00:00
|
|
|
|
|
2016-09-16 16:54:14 +00:00
|
|
|
|
while (useQueryReferences.next())
|
|
|
|
|
info->references.insert(useQueryReferences.getStr(0));
|
|
|
|
|
|
|
|
|
|
return info;
|
|
|
|
|
});
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2006-11-30 17:43:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-05 13:30:22 +00:00
|
|
|
|
/* Update path info in the database. */
|
2016-04-08 16:07:13 +00:00
|
|
|
|
void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
|
2010-12-06 15:29:38 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
state.stmtUpdatePathInfo.use()
|
2016-03-30 13:50:45 +00:00
|
|
|
|
(info.narSize, info.narSize != 0)
|
|
|
|
|
("sha256:" + printHash(info.narHash))
|
2016-03-30 15:35:48 +00:00
|
|
|
|
(info.ultimate ? 1 : 0, info.ultimate)
|
2016-04-05 13:30:22 +00:00
|
|
|
|
(concatStringsSep(" ", info.sigs), !info.sigs.empty())
|
2016-08-03 11:17:11 +00:00
|
|
|
|
(info.ca, !info.ca.empty())
|
2016-03-30 13:50:45 +00:00
|
|
|
|
(info.path)
|
|
|
|
|
.exec();
|
2010-12-06 15:29:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
uint64_t LocalStore::queryValidPathId(State & state, const Path & path)
|
2010-02-24 15:07:23 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto use(state.stmtQueryPathInfo.use()(path));
|
2016-03-30 13:50:45 +00:00
|
|
|
|
if (!use.next())
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("path ‘%1%’ is not valid") % path);
|
2016-03-30 13:50:45 +00:00
|
|
|
|
return use.getInt(0);
|
2010-02-24 15:07:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
bool LocalStore::isValidPath_(State & state, const Path & path)
|
2005-01-19 16:59:56 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
return state.stmtQueryPathInfo.use()(path).next();
|
2005-01-19 16:59:56 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
bool LocalStore::isValidPathUncached(const Path & path)
|
2012-07-11 15:08:47 +00:00
|
|
|
|
{
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<bool>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
2016-04-19 16:50:15 +00:00
|
|
|
|
return isValidPath_(*state, path);
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2012-07-11 15:08:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
PathSet LocalStore::queryValidPaths(const PathSet & paths)
|
2006-11-30 17:43:04 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
PathSet res;
|
|
|
|
|
for (auto & i : paths)
|
|
|
|
|
if (isValidPath(i)) res.insert(i);
|
|
|
|
|
return res;
|
2013-10-16 13:58:20 +00:00
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
PathSet LocalStore::queryAllValidPaths()
|
|
|
|
|
{
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<PathSet>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
auto use(state->stmtQueryValidPaths.use());
|
2013-10-16 13:58:20 +00:00
|
|
|
|
PathSet res;
|
2016-03-30 13:50:45 +00:00
|
|
|
|
while (use.next()) res.insert(use.getStr(0));
|
2013-10-16 13:58:20 +00:00
|
|
|
|
return res;
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2006-11-30 17:43:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
void LocalStore::queryReferrers(State & state, const Path & path, PathSet & referrers)
|
2005-02-07 13:40:40 +00:00
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto useQueryReferrers(state.stmtQueryReferrers.use()(path));
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2016-03-30 13:50:45 +00:00
|
|
|
|
while (useQueryReferrers.next())
|
|
|
|
|
referrers.insert(useQueryReferrers.getStr(0));
|
2005-02-07 13:40:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
|
|
|
|
|
{
|
|
|
|
|
assertStorePath(path);
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<void>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
queryReferrers(*state, path, referrers);
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2013-10-16 13:58:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-02-22 11:44:17 +00:00
|
|
|
|
PathSet LocalStore::queryValidDerivers(const Path & path)
|
|
|
|
|
{
|
|
|
|
|
assertStorePath(path);
|
|
|
|
|
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<PathSet>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(path));
|
2013-10-16 13:58:20 +00:00
|
|
|
|
|
|
|
|
|
PathSet derivers;
|
2016-03-30 13:50:45 +00:00
|
|
|
|
while (useQueryValidDerivers.next())
|
|
|
|
|
derivers.insert(useQueryValidDerivers.getStr(1));
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
return derivers;
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2010-02-22 11:44:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-02-22 12:44:36 +00:00
|
|
|
|
PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
|
|
|
|
{
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<PathSet>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
|
|
|
|
|
(queryValidPathId(*state, path)));
|
2013-10-16 13:58:20 +00:00
|
|
|
|
|
|
|
|
|
PathSet outputs;
|
2016-03-30 13:50:45 +00:00
|
|
|
|
while (useQueryDerivationOutputs.next())
|
|
|
|
|
outputs.insert(useQueryDerivationOutputs.getStr(1));
|
2010-02-22 12:44:36 +00:00
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
return outputs;
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2010-02-22 12:44:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-11-06 06:28:20 +00:00
|
|
|
|
StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
|
|
|
|
{
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<StringSet>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
|
|
|
|
|
(queryValidPathId(*state, path)));
|
2013-10-16 13:58:20 +00:00
|
|
|
|
|
|
|
|
|
StringSet outputNames;
|
2016-03-30 13:50:45 +00:00
|
|
|
|
while (useQueryDerivationOutputs.next())
|
|
|
|
|
outputNames.insert(useQueryDerivationOutputs.getStr(0));
|
2011-11-06 06:28:20 +00:00
|
|
|
|
|
2013-10-16 13:58:20 +00:00
|
|
|
|
return outputNames;
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2011-11-06 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-17 22:55:39 +00:00
|
|
|
|
Path LocalStore::queryPathFromHashPart(const string & hashPart)
|
|
|
|
|
{
|
2016-02-15 11:49:01 +00:00
|
|
|
|
if (hashPart.size() != storePathHashLen) throw Error("invalid hash part");
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-06-01 12:49:12 +00:00
|
|
|
|
Path prefix = storeDir + "/" + hashPart;
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-08-24 12:53:33 +00:00
|
|
|
|
return retrySQLite<Path>([&]() -> std::string {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix));
|
2012-07-17 22:55:39 +00:00
|
|
|
|
|
2016-03-30 13:50:45 +00:00
|
|
|
|
if (!useQueryPathFromHashPart.next()) return "";
|
2012-07-17 22:55:39 +00:00
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
const char * s = (const char *) sqlite3_column_text(state->stmtQueryPathFromHashPart, 0);
|
2013-10-16 13:58:20 +00:00
|
|
|
|
return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : "";
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2012-07-17 22:55:39 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-07-11 21:52:18 +00:00
|
|
|
|
PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
|
2004-06-20 19:17:54 +00:00
|
|
|
|
{
|
2016-06-02 12:25:07 +00:00
|
|
|
|
if (!settings.useSubstitutes) return PathSet();
|
2016-10-07 17:57:47 +00:00
|
|
|
|
|
|
|
|
|
auto remaining = paths;
|
2012-07-11 21:52:18 +00:00
|
|
|
|
PathSet res;
|
2016-10-07 17:57:47 +00:00
|
|
|
|
|
2016-04-29 11:57:08 +00:00
|
|
|
|
for (auto & sub : getDefaultSubstituters()) {
|
2016-10-07 17:57:47 +00:00
|
|
|
|
if (remaining.empty()) break;
|
2016-06-01 14:40:49 +00:00
|
|
|
|
if (sub->storeDir != storeDir) continue;
|
2016-05-30 11:33:05 +00:00
|
|
|
|
if (!sub->wantMassQuery()) continue;
|
2016-10-07 17:57:47 +00:00
|
|
|
|
|
|
|
|
|
auto valid = sub->queryValidPaths(remaining);
|
|
|
|
|
|
|
|
|
|
PathSet remaining2;
|
|
|
|
|
for (auto & path : remaining)
|
|
|
|
|
if (valid.count(path))
|
|
|
|
|
res.insert(path);
|
|
|
|
|
else
|
|
|
|
|
remaining2.insert(path);
|
|
|
|
|
|
|
|
|
|
std::swap(remaining, remaining2);
|
2008-08-02 12:54:35 +00:00
|
|
|
|
}
|
2016-10-07 17:57:47 +00:00
|
|
|
|
|
2012-07-11 21:52:18 +00:00
|
|
|
|
return res;
|
2008-08-02 12:54:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 23:08:20 +00:00
|
|
|
|
void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
|
|
|
|
|
SubstitutablePathInfos & infos)
|
|
|
|
|
{
|
2016-06-02 12:25:07 +00:00
|
|
|
|
if (!settings.useSubstitutes) return;
|
2016-04-29 11:57:08 +00:00
|
|
|
|
for (auto & sub : getDefaultSubstituters()) {
|
2016-06-01 14:40:49 +00:00
|
|
|
|
if (sub->storeDir != storeDir) continue;
|
2016-04-29 11:57:08 +00:00
|
|
|
|
for (auto & path : paths) {
|
|
|
|
|
if (infos.count(path)) continue;
|
2016-11-25 23:37:43 +00:00
|
|
|
|
debug(format("checking substituter ‘%s’ for path ‘%s’")
|
2016-04-29 11:57:08 +00:00
|
|
|
|
% sub->getUri() % path);
|
|
|
|
|
try {
|
|
|
|
|
auto info = sub->queryPathInfo(path);
|
|
|
|
|
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
|
|
|
|
|
std::shared_ptr<const ValidPathInfo>(info));
|
|
|
|
|
infos[path] = SubstitutablePathInfo{
|
|
|
|
|
info->deriver,
|
|
|
|
|
info->references,
|
|
|
|
|
narInfo ? narInfo->fileSize : 0,
|
|
|
|
|
info->narSize};
|
|
|
|
|
} catch (InvalidPath) {
|
|
|
|
|
}
|
|
|
|
|
}
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 23:08:20 +00:00
|
|
|
|
}
|
2004-12-20 13:43:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-12-14 13:25:20 +00:00
|
|
|
|
void LocalStore::registerValidPath(const ValidPathInfo & info)
|
|
|
|
|
{
|
|
|
|
|
ValidPathInfos infos;
|
|
|
|
|
infos.push_back(info);
|
|
|
|
|
registerValidPaths(infos);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-02-24 12:48:00 +00:00
|
|
|
|
void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
2005-03-02 15:57:06 +00:00
|
|
|
|
{
|
2016-03-30 15:35:48 +00:00
|
|
|
|
/* SQLite will fsync by default, but the new valid paths may not
|
|
|
|
|
be fsync-ed. So some may want to fsync them before registering
|
|
|
|
|
the validity, at the expense of some speed of the path
|
|
|
|
|
registering operation. */
|
2012-07-30 23:55:41 +00:00
|
|
|
|
if (settings.syncBeforeRegistering) sync();
|
2012-03-22 19:05:54 +00:00
|
|
|
|
|
2016-03-30 10:04:27 +00:00
|
|
|
|
return retrySQLite<void>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
SQLiteTxn txn(state->db);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
PathSet paths;
|
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : infos) {
|
2016-02-16 10:49:12 +00:00
|
|
|
|
assert(i.narHash.type == htSHA256);
|
2016-04-19 16:50:15 +00:00
|
|
|
|
if (isValidPath_(*state, i.path))
|
2016-04-08 16:07:13 +00:00
|
|
|
|
updatePathInfo(*state, i);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
else
|
2016-04-08 16:07:13 +00:00
|
|
|
|
addValidPath(*state, i, false);
|
2015-07-17 17:24:28 +00:00
|
|
|
|
paths.insert(i.path);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
}
|
2005-01-19 16:39:47 +00:00
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : infos) {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto referrer = queryValidPathId(*state, i.path);
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & j : i.references)
|
2016-04-08 16:07:13 +00:00
|
|
|
|
state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)).exec();
|
2010-12-14 13:25:20 +00:00
|
|
|
|
}
|
2013-10-16 12:46:35 +00:00
|
|
|
|
|
2014-02-03 21:36:07 +00:00
|
|
|
|
/* Check that the derivation outputs are correct. We can't do
|
|
|
|
|
this in addValidPath() above, because the references might
|
|
|
|
|
not be valid yet. */
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : infos)
|
|
|
|
|
if (isDerivation(i.path)) {
|
2014-02-03 21:36:07 +00:00
|
|
|
|
// FIXME: inefficient; we already loaded the
|
|
|
|
|
// derivation in addValidPath().
|
2016-06-03 11:37:52 +00:00
|
|
|
|
Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(i.path));
|
2015-07-17 17:24:28 +00:00
|
|
|
|
checkDerivationOutputs(i.path, drv);
|
2014-02-03 21:36:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-10-16 12:46:35 +00:00
|
|
|
|
/* Do a topological sort of the paths. This will throw an
|
|
|
|
|
error if a cycle is detected and roll back the
|
|
|
|
|
transaction. Cycles can only occur when a derivation
|
|
|
|
|
has multiple outputs. */
|
Eliminate the "store" global variable
Also, move a few free-standing functions into StoreAPI and Derivation.
Also, introduce a non-nullable smart pointer, ref<T>, which is just a
wrapper around std::shared_ptr ensuring that the pointer is never
null. (For reference-counted values, this is better than passing a
"T&", because the latter doesn't maintain the refcount. Usually, the
caller will have a shared_ptr keeping the value alive, but that's not
always the case, e.g., when passing a reference to a std::thread via
std::bind.)
2016-02-04 13:28:26 +00:00
|
|
|
|
topoSortPaths(paths);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
|
|
|
|
|
txn.commit();
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2003-10-08 15:06:59 +00:00
|
|
|
|
}
|
2003-07-07 09:25:26 +00:00
|
|
|
|
|
2003-07-31 16:05:35 +00:00
|
|
|
|
|
2005-01-31 14:00:43 +00:00
|
|
|
|
/* Invalidate a path. The caller is responsible for checking that
|
2005-12-13 21:04:48 +00:00
|
|
|
|
there are no referrers. */
|
2016-04-08 16:07:13 +00:00
|
|
|
|
void LocalStore::invalidatePath(State & state, const Path & path)
|
2003-07-08 09:54:47 +00:00
|
|
|
|
{
|
2016-11-25 23:37:43 +00:00
|
|
|
|
debug(format("invalidating path ‘%1%’") % path);
|
2010-02-22 14:18:55 +00:00
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
state.stmtInvalidatePath.use()(path).exec();
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
|
/* Note that the foreign key constraints on the Refs table take
|
2010-02-22 14:18:55 +00:00
|
|
|
|
care of deleting the references entries for `path'. */
|
2016-04-19 16:50:15 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
auto state_(Store::state.lock());
|
2016-04-21 15:53:47 +00:00
|
|
|
|
state_->pathInfoCache.erase(storePathToHash(path));
|
2016-04-19 16:50:15 +00:00
|
|
|
|
}
|
2003-07-08 09:54:47 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-10-21 14:50:28 +00:00
|
|
|
|
void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
2016-10-21 16:09:30 +00:00
|
|
|
|
bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
|
2016-05-04 11:36:54 +00:00
|
|
|
|
{
|
2016-10-21 14:50:28 +00:00
|
|
|
|
Hash h = hashString(htSHA256, *nar);
|
2016-05-30 11:44:09 +00:00
|
|
|
|
if (h != info.narHash)
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("hash mismatch importing path ‘%s’; expected hash ‘%s’, got ‘%s’") %
|
2016-05-30 11:44:09 +00:00
|
|
|
|
info.path % info.narHash.to_string() % h.to_string());
|
|
|
|
|
|
2016-08-03 11:17:11 +00:00
|
|
|
|
if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path);
|
2016-05-30 11:55:09 +00:00
|
|
|
|
|
2016-05-04 11:36:54 +00:00
|
|
|
|
addTempRoot(info.path);
|
|
|
|
|
|
|
|
|
|
if (repair || !isValidPath(info.path)) {
|
|
|
|
|
|
|
|
|
|
PathLocks outputLock;
|
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
Path realPath = realStoreDir + "/" + baseNameOf(info.path);
|
|
|
|
|
|
2016-05-04 11:36:54 +00:00
|
|
|
|
/* Lock the output path. But don't lock if we're being called
|
|
|
|
|
from a build hook (whose parent process already acquired a
|
|
|
|
|
lock on this path). */
|
|
|
|
|
Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS"));
|
|
|
|
|
if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end())
|
2016-06-02 13:08:18 +00:00
|
|
|
|
outputLock.lockPaths({realPath});
|
2016-05-04 11:36:54 +00:00
|
|
|
|
|
|
|
|
|
if (repair || !isValidPath(info.path)) {
|
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
deletePath(realPath);
|
2016-05-04 11:36:54 +00:00
|
|
|
|
|
2016-10-21 14:50:28 +00:00
|
|
|
|
StringSource source(*nar);
|
2016-06-02 13:08:18 +00:00
|
|
|
|
restorePath(realPath, source);
|
2016-05-04 11:36:54 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
canonicalisePathMetaData(realPath, -1);
|
2016-05-04 11:36:54 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
optimisePath(realPath); // FIXME: combine with hashPath()
|
2016-05-04 11:36:54 +00:00
|
|
|
|
|
|
|
|
|
registerValidPath(info);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-12-03 18:05:14 +00:00
|
|
|
|
Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
2012-10-03 19:09:18 +00:00
|
|
|
|
bool recursive, HashType hashAlgo, bool repair)
|
2003-07-07 09:25:26 +00:00
|
|
|
|
{
|
2008-12-03 18:05:14 +00:00
|
|
|
|
Hash h = hashString(hashAlgo, dump);
|
2008-12-03 15:51:17 +00:00
|
|
|
|
|
2016-07-26 19:25:52 +00:00
|
|
|
|
Path dstPath = makeFixedOutputPath(recursive, h, name);
|
2003-07-10 15:11:48 +00:00
|
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
|
addTempRoot(dstPath);
|
2005-01-31 10:27:25 +00:00
|
|
|
|
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2003-07-10 15:11:48 +00:00
|
|
|
|
|
2003-10-08 15:06:59 +00:00
|
|
|
|
/* The first check above is an optimisation to prevent
|
|
|
|
|
unnecessary lock acquisition. */
|
2003-07-22 15:15:15 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
|
|
|
|
|
|
|
|
|
|
PathLocks outputLock({realPath});
|
2003-07-22 15:15:15 +00:00
|
|
|
|
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2004-06-21 07:46:02 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
deletePath(realPath);
|
2004-10-25 14:38:23 +00:00
|
|
|
|
|
2008-12-03 15:51:17 +00:00
|
|
|
|
if (recursive) {
|
2008-12-03 18:05:14 +00:00
|
|
|
|
StringSource source(dump);
|
2016-06-02 13:08:18 +00:00
|
|
|
|
restorePath(realPath, source);
|
2008-12-03 15:51:17 +00:00
|
|
|
|
} else
|
2016-06-02 13:08:18 +00:00
|
|
|
|
writeFile(realPath, dump);
|
2005-01-14 13:51:38 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
canonicalisePathMetaData(realPath, -1);
|
2008-12-03 15:51:17 +00:00
|
|
|
|
|
|
|
|
|
/* Register the SHA-256 hash of the NAR serialisation of
|
|
|
|
|
the path in the database. We may just have computed it
|
|
|
|
|
above (if called with recursive == true and hashAlgo ==
|
|
|
|
|
sha256); otherwise, compute it here. */
|
2010-11-16 17:11:46 +00:00
|
|
|
|
HashResult hash;
|
|
|
|
|
if (recursive) {
|
|
|
|
|
hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
|
|
|
|
|
hash.second = dump.size();
|
|
|
|
|
} else
|
2016-06-02 13:08:18 +00:00
|
|
|
|
hash = hashPath(htSHA256, realPath);
|
2012-07-23 19:02:52 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
optimisePath(realPath); // FIXME: combine with hashPath()
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-11-16 17:11:46 +00:00
|
|
|
|
ValidPathInfo info;
|
|
|
|
|
info.path = dstPath;
|
2016-02-16 10:49:12 +00:00
|
|
|
|
info.narHash = hash.first;
|
2010-11-16 17:11:46 +00:00
|
|
|
|
info.narSize = hash.second;
|
2016-03-30 15:35:48 +00:00
|
|
|
|
info.ultimate = true;
|
2016-08-03 11:17:11 +00:00
|
|
|
|
info.ca = "fixed:" + (recursive ? (std::string) "r:" : "") + h.to_string();
|
2010-11-16 17:11:46 +00:00
|
|
|
|
registerValidPath(info);
|
2003-08-01 09:01:51 +00:00
|
|
|
|
}
|
2003-11-22 18:45:56 +00:00
|
|
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-06-16 13:33:38 +00:00
|
|
|
|
}
|
2003-08-04 07:09:36 +00:00
|
|
|
|
|
2003-10-08 15:06:59 +00:00
|
|
|
|
return dstPath;
|
2003-06-16 13:33:38 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-03-25 16:06:12 +00:00
|
|
|
|
Path LocalStore::addToStore(const string & name, const Path & _srcPath,
|
2012-10-03 19:09:18 +00:00
|
|
|
|
bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
|
2008-12-03 18:05:14 +00:00
|
|
|
|
{
|
|
|
|
|
Path srcPath(absPath(_srcPath));
|
|
|
|
|
|
|
|
|
|
/* Read the whole path into memory. This is not a very scalable
|
|
|
|
|
method for very large paths, but `copyPath' is mainly used for
|
|
|
|
|
small files. */
|
|
|
|
|
StringSink sink;
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 23:08:20 +00:00
|
|
|
|
if (recursive)
|
2016-03-22 13:21:45 +00:00
|
|
|
|
dumpPath(srcPath, sink, filter);
|
2008-12-03 18:05:14 +00:00
|
|
|
|
else
|
2016-03-04 15:49:56 +00:00
|
|
|
|
sink.s = make_ref<std::string>(readFile(srcPath));
|
2008-12-03 18:05:14 +00:00
|
|
|
|
|
2016-03-04 15:49:56 +00:00
|
|
|
|
return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair);
|
2008-12-03 18:05:14 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2008-12-03 15:06:30 +00:00
|
|
|
|
Path LocalStore::addTextToStore(const string & name, const string & s,
|
2012-10-03 19:09:18 +00:00
|
|
|
|
const PathSet & references, bool repair)
|
2003-10-15 12:42:39 +00:00
|
|
|
|
{
|
2016-08-03 11:17:11 +00:00
|
|
|
|
auto hash = hashString(htSHA256, s);
|
|
|
|
|
auto dstPath = makeTextPath(name, hash, references);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
|
addTempRoot(dstPath);
|
2005-01-31 10:27:25 +00:00
|
|
|
|
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2003-10-15 12:42:39 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
|
|
|
|
|
|
|
|
|
|
PathLocks outputLock({realPath});
|
2003-10-23 10:51:55 +00:00
|
|
|
|
|
2012-10-03 19:09:18 +00:00
|
|
|
|
if (repair || !isValidPath(dstPath)) {
|
2004-06-21 07:46:02 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
deletePath(realPath);
|
2004-06-21 07:46:02 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
writeFile(realPath, s);
|
2003-10-15 12:42:39 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
canonicalisePathMetaData(realPath, -1);
|
2010-11-16 17:11:46 +00:00
|
|
|
|
|
2016-02-24 16:11:31 +00:00
|
|
|
|
StringSink sink;
|
|
|
|
|
dumpString(s, sink);
|
2016-08-03 11:17:11 +00:00
|
|
|
|
auto narHash = hashString(htSHA256, *sink.s);
|
2012-07-23 19:02:52 +00:00
|
|
|
|
|
2016-06-02 13:08:18 +00:00
|
|
|
|
optimisePath(realPath);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-11-16 17:11:46 +00:00
|
|
|
|
ValidPathInfo info;
|
|
|
|
|
info.path = dstPath;
|
2016-08-03 11:17:11 +00:00
|
|
|
|
info.narHash = narHash;
|
2016-03-04 15:49:56 +00:00
|
|
|
|
info.narSize = sink.s->size();
|
2010-11-16 17:11:46 +00:00
|
|
|
|
info.references = references;
|
2016-03-30 15:35:48 +00:00
|
|
|
|
info.ultimate = true;
|
2016-08-03 11:17:11 +00:00
|
|
|
|
info.ca = "text:" + hash.to_string();
|
2010-11-16 17:11:46 +00:00
|
|
|
|
registerValidPath(info);
|
2003-10-23 10:51:55 +00:00
|
|
|
|
}
|
2003-11-22 18:45:56 +00:00
|
|
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-10-15 12:42:39 +00:00
|
|
|
|
}
|
2005-01-14 13:51:38 +00:00
|
|
|
|
|
|
|
|
|
return dstPath;
|
2003-10-15 12:42:39 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-06-21 11:08:09 +00:00
|
|
|
|
/* Create a temporary directory in the store that won't be
|
|
|
|
|
garbage-collected. */
|
|
|
|
|
Path LocalStore::createTempDirInStore()
|
|
|
|
|
{
|
|
|
|
|
Path tmpDir;
|
|
|
|
|
do {
|
|
|
|
|
/* There is a slight possibility that `tmpDir' gets deleted by
|
|
|
|
|
the GC between createTempDir() and addTempRoot(), so repeat
|
|
|
|
|
until `tmpDir' exists. */
|
2016-06-02 13:08:18 +00:00
|
|
|
|
tmpDir = createTempDir(realStoreDir);
|
2010-06-21 11:08:09 +00:00
|
|
|
|
addTempRoot(tmpDir);
|
|
|
|
|
} while (!pathExists(tmpDir));
|
|
|
|
|
return tmpDir;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-03-26 18:43:33 +00:00
|
|
|
|
void LocalStore::invalidatePathChecked(const Path & path)
|
2003-06-23 14:40:49 +00:00
|
|
|
|
{
|
2004-02-14 21:44:18 +00:00
|
|
|
|
assertStorePath(path);
|
2003-07-08 09:54:47 +00:00
|
|
|
|
|
2016-03-30 10:04:27 +00:00
|
|
|
|
retrySQLite<void>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
SQLiteTxn txn(state->db);
|
2010-12-05 18:23:19 +00:00
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
if (isValidPath_(*state, path)) {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
PathSet referrers; queryReferrers(*state, path, referrers);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
referrers.erase(path); /* ignore self-references */
|
|
|
|
|
if (!referrers.empty())
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw PathInUse(format("cannot delete path ‘%1%’ because it is in use by %2%")
|
2013-10-16 12:46:35 +00:00
|
|
|
|
% path % showPaths(referrers));
|
2016-04-08 16:07:13 +00:00
|
|
|
|
invalidatePath(*state, path);
|
2013-10-16 12:46:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
txn.commit();
|
2016-03-30 10:04:27 +00:00
|
|
|
|
});
|
2003-06-23 14:40:49 +00:00
|
|
|
|
}
|
2003-07-17 12:27:55 +00:00
|
|
|
|
|
|
|
|
|
|
2012-10-02 19:04:59 +00:00
|
|
|
|
bool LocalStore::verifyStore(bool checkContents, bool repair)
|
2003-07-17 12:27:55 +00:00
|
|
|
|
{
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError(format("reading the Nix store..."));
|
2007-01-14 17:28:30 +00:00
|
|
|
|
|
2012-10-02 19:04:59 +00:00
|
|
|
|
bool errors = false;
|
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
|
/* Acquire the global GC lock to prevent a garbage collection. */
|
|
|
|
|
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2014-08-01 14:37:47 +00:00
|
|
|
|
PathSet store;
|
2016-06-02 13:08:18 +00:00
|
|
|
|
for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
|
2010-08-31 11:47:31 +00:00
|
|
|
|
|
|
|
|
|
/* Check whether all valid paths actually exist. */
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printInfo("checking path existence...");
|
2010-08-31 11:47:31 +00:00
|
|
|
|
|
2012-07-11 14:49:04 +00:00
|
|
|
|
PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
|
2010-08-31 11:47:31 +00:00
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : validPaths2)
|
|
|
|
|
verifyPath(i, store, done, validPaths, repair, errors);
|
2003-07-17 12:27:55 +00:00
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
|
/* Release the GC lock so that checking content hashes (which can
|
|
|
|
|
take ages) doesn't block the GC or builds. */
|
2016-07-11 19:44:44 +00:00
|
|
|
|
fdGCLock = -1;
|
2010-12-06 15:29:38 +00:00
|
|
|
|
|
2010-02-18 16:51:27 +00:00
|
|
|
|
/* Optionally, check the content hashes (slow). */
|
|
|
|
|
if (checkContents) {
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printInfo("checking hashes...");
|
2007-01-14 17:28:30 +00:00
|
|
|
|
|
2011-12-02 17:52:18 +00:00
|
|
|
|
Hash nullHash(htSHA256);
|
|
|
|
|
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : validPaths) {
|
2010-12-06 15:29:38 +00:00
|
|
|
|
try {
|
2016-04-19 16:50:15 +00:00
|
|
|
|
auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(i)));
|
2010-12-06 15:29:38 +00:00
|
|
|
|
|
|
|
|
|
/* Check the content hash (optionally - slow). */
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printMsg(lvlTalkative, format("checking contents of ‘%1%’") % i);
|
2016-04-19 16:50:15 +00:00
|
|
|
|
HashResult current = hashPath(info->narHash.type, i);
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
if (info->narHash != nullHash && info->narHash != current.first) {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("path ‘%1%’ was modified! "
|
|
|
|
|
"expected hash ‘%2%’, got ‘%3%’")
|
2016-04-19 16:50:15 +00:00
|
|
|
|
% i % printHash(info->narHash) % printHash(current.first));
|
2015-07-17 17:24:28 +00:00
|
|
|
|
if (repair) repairPath(i); else errors = true;
|
2010-12-06 15:29:38 +00:00
|
|
|
|
} else {
|
2011-12-02 17:52:18 +00:00
|
|
|
|
|
|
|
|
|
bool update = false;
|
|
|
|
|
|
|
|
|
|
/* Fill in missing hashes. */
|
2016-04-19 16:50:15 +00:00
|
|
|
|
if (info->narHash == nullHash) {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("fixing missing hash on ‘%1%’") % i);
|
2016-04-19 16:50:15 +00:00
|
|
|
|
info->narHash = current.first;
|
2011-12-02 17:52:18 +00:00
|
|
|
|
update = true;
|
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
|
/* Fill in missing narSize fields (from old stores). */
|
2016-04-19 16:50:15 +00:00
|
|
|
|
if (info->narSize == 0) {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("updating size field on ‘%1%’ to %2%") % i % current.second);
|
2016-04-19 16:50:15 +00:00
|
|
|
|
info->narSize = current.second;
|
2012-07-30 19:42:18 +00:00
|
|
|
|
update = true;
|
2010-12-06 15:29:38 +00:00
|
|
|
|
}
|
2011-12-02 17:52:18 +00:00
|
|
|
|
|
2016-04-08 16:07:13 +00:00
|
|
|
|
if (update) {
|
|
|
|
|
auto state(_state.lock());
|
2016-04-19 16:50:15 +00:00
|
|
|
|
updatePathInfo(*state, *info);
|
2016-04-08 16:07:13 +00:00
|
|
|
|
}
|
2011-12-02 17:52:18 +00:00
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
|
} catch (Error & e) {
|
|
|
|
|
/* It's possible that the path got GC'ed, so ignore
|
|
|
|
|
errors on invalid paths. */
|
2015-07-17 17:24:28 +00:00
|
|
|
|
if (isValidPath(i))
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError(format("error: %1%") % e.msg());
|
2012-10-04 14:20:23 +00:00
|
|
|
|
else
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError(format("warning: %1%") % e.msg());
|
2012-10-02 19:04:59 +00:00
|
|
|
|
errors = true;
|
2010-12-06 15:29:38 +00:00
|
|
|
|
}
|
2005-02-08 13:23:55 +00:00
|
|
|
|
}
|
2007-08-13 11:37:39 +00:00
|
|
|
|
}
|
2012-10-02 19:04:59 +00:00
|
|
|
|
|
|
|
|
|
return errors;
|
2010-02-18 15:11:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
|
void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
2012-10-02 19:04:59 +00:00
|
|
|
|
PathSet & done, PathSet & validPaths, bool repair, bool & errors)
|
2010-08-31 11:47:31 +00:00
|
|
|
|
{
|
|
|
|
|
checkInterrupt();
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
|
if (done.find(path) != done.end()) return;
|
|
|
|
|
done.insert(path);
|
|
|
|
|
|
|
|
|
|
if (!isStorePath(path)) {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("path ‘%1%’ is not in the Nix store") % path);
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
invalidatePath(*state, path);
|
2010-08-31 11:47:31 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (store.find(baseNameOf(path)) == store.end()) {
|
|
|
|
|
/* Check any referrers first. If we can invalidate them
|
|
|
|
|
first, then we can invalidate this path as well. */
|
|
|
|
|
bool canInvalidate = true;
|
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
2015-07-17 17:24:28 +00:00
|
|
|
|
for (auto & i : referrers)
|
|
|
|
|
if (i != path) {
|
|
|
|
|
verifyPath(i, store, done, validPaths, repair, errors);
|
|
|
|
|
if (validPaths.find(i) != validPaths.end())
|
2010-08-31 11:47:31 +00:00
|
|
|
|
canInvalidate = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (canInvalidate) {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("path ‘%1%’ disappeared, removing from database...") % path);
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
invalidatePath(*state, path);
|
2012-10-02 19:04:59 +00:00
|
|
|
|
} else {
|
2016-11-25 23:37:43 +00:00
|
|
|
|
printError(format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
|
2012-10-02 19:04:59 +00:00
|
|
|
|
if (repair)
|
|
|
|
|
try {
|
|
|
|
|
repairPath(path);
|
|
|
|
|
} catch (Error & e) {
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError(format("warning: %1%") % e.msg());
|
2012-10-02 19:04:59 +00:00
|
|
|
|
errors = true;
|
|
|
|
|
}
|
|
|
|
|
else errors = true;
|
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
2012-07-30 19:42:18 +00:00
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
|
validPaths.insert(path);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-01-03 11:59:23 +00:00
|
|
|
|
#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
|
|
|
|
|
|
|
|
|
|
static void makeMutable(const Path & path)
|
|
|
|
|
{
|
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
|
|
struct stat st = lstat(path);
|
|
|
|
|
|
|
|
|
|
if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
|
|
|
|
|
|
|
|
|
|
if (S_ISDIR(st.st_mode)) {
|
2014-08-01 14:37:47 +00:00
|
|
|
|
for (auto & i : readDirectory(path))
|
|
|
|
|
makeMutable(path + "/" + i.name);
|
2013-01-03 11:59:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The O_NOFOLLOW is important to prevent us from changing the
|
|
|
|
|
mutable bit on the target of a symlink (which would be a
|
|
|
|
|
security hole). */
|
2016-06-09 14:15:58 +00:00
|
|
|
|
AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW | O_CLOEXEC);
|
2013-01-03 11:59:23 +00:00
|
|
|
|
if (fd == -1) {
|
|
|
|
|
if (errno == ELOOP) return; // it's a symlink
|
2016-11-25 23:37:43 +00:00
|
|
|
|
throw SysError(format("opening file ‘%1%’") % path);
|
2013-01-03 11:59:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned int flags = 0, old;
|
|
|
|
|
|
|
|
|
|
/* Silently ignore errors getting/setting the immutable flag so
|
|
|
|
|
that we work correctly on filesystems that don't support it. */
|
|
|
|
|
if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
|
|
|
|
|
old = flags;
|
|
|
|
|
flags &= ~FS_IMMUTABLE_FL;
|
|
|
|
|
if (old == flags) return;
|
|
|
|
|
if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
|
|
|
|
|
void LocalStore::upgradeStore7()
|
|
|
|
|
{
|
|
|
|
|
if (getuid() != 0) return;
|
2016-09-21 14:11:01 +00:00
|
|
|
|
printError("removing immutable bits from the Nix store (this may take a while)...");
|
2016-06-02 13:08:18 +00:00
|
|
|
|
makeMutable(realStoreDir);
|
2013-01-03 11:59:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
|
|
void LocalStore::upgradeStore7()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
2012-09-13 18:33:41 +00:00
|
|
|
|
void LocalStore::vacuumDB()
|
|
|
|
|
{
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
2016-08-09 12:27:30 +00:00
|
|
|
|
state->db.exec("vacuum");
|
2012-09-13 18:33:41 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-05 13:30:22 +00:00
|
|
|
|
void LocalStore::addSignatures(const Path & storePath, const StringSet & sigs)
|
|
|
|
|
{
|
|
|
|
|
retrySQLite<void>([&]() {
|
2016-04-08 16:07:13 +00:00
|
|
|
|
auto state(_state.lock());
|
|
|
|
|
|
|
|
|
|
SQLiteTxn txn(state->db);
|
2016-04-05 13:30:22 +00:00
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(storePath)));
|
2016-04-05 13:30:22 +00:00
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
info->sigs.insert(sigs.begin(), sigs.end());
|
2016-04-05 13:30:22 +00:00
|
|
|
|
|
2016-04-19 16:50:15 +00:00
|
|
|
|
updatePathInfo(*state, *info);
|
2016-04-05 13:30:22 +00:00
|
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-04-07 12:14:06 +00:00
|
|
|
|
void LocalStore::signPathInfo(ValidPathInfo & info)
|
|
|
|
|
{
|
|
|
|
|
// FIXME: keep secret keys in memory.
|
|
|
|
|
|
Explicitly model all settings and fail on unrecognized ones
Previously, the Settings class allowed other code to query for string
properties, which led to a proliferation of code all over the place making
up new options without any sort of central registry of valid options. This
commit pulls all those options back into the central Settings class and
removes the public get() methods, to discourage future abuses like that.
Furthermore, because we know the full set of options ahead of time, we
now fail loudly if someone enters an unrecognized option, thus preventing
subtle typos. With some template fun, we could probably also dump the full
set of options (with documentation, defaults, etc.) to the command line,
but I'm not doing that yet here.
2017-02-22 03:50:18 +00:00
|
|
|
|
auto secretKeyFiles = settings.secretKeyFiles;
|
2016-04-07 12:14:06 +00:00
|
|
|
|
|
|
|
|
|
for (auto & secretKeyFile : secretKeyFiles) {
|
|
|
|
|
SecretKey secretKey(readFile(secretKeyFile));
|
|
|
|
|
info.sign(secretKey);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
|
}
|