2007-03-30 13:24:35 +00:00
|
|
|
#include "config.h"
|
2006-11-30 17:43:04 +00:00
|
|
|
#include "local-store.hh"
|
2006-09-04 21:06:23 +00:00
|
|
|
#include "globals.hh"
|
|
|
|
#include "archive.hh"
|
|
|
|
#include "pathlocks.hh"
|
2007-02-21 15:45:32 +00:00
|
|
|
#include "worker-protocol.hh"
|
2010-02-22 11:15:50 +00:00
|
|
|
#include "derivations.hh"
|
2012-02-15 00:31:56 +00:00
|
|
|
#include "immutable.hh"
|
2006-09-04 21:06:23 +00:00
|
|
|
|
2003-06-23 13:27:59 +00:00
|
|
|
#include <iostream>
|
2003-12-22 16:40:46 +00:00
|
|
|
#include <algorithm>
|
2003-06-23 13:27:59 +00:00
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
2003-10-15 12:42:39 +00:00
|
|
|
#include <unistd.h>
|
2005-01-19 16:39:47 +00:00
|
|
|
#include <utime.h>
|
2008-06-09 13:52:45 +00:00
|
|
|
#include <fcntl.h>
|
2008-07-18 15:34:46 +00:00
|
|
|
#include <errno.h>
|
2009-09-24 07:39:55 +00:00
|
|
|
#include <stdio.h>
|
2010-12-17 17:23:15 +00:00
|
|
|
#include <time.h>
|
2003-06-23 13:27:59 +00:00
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
#include <sqlite3.h>
|
|
|
|
|
2006-11-30 18:35:36 +00:00
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
namespace nix {
|
2003-06-23 13:27:59 +00:00
|
|
|
|
2010-12-05 18:23:19 +00:00
|
|
|
|
|
|
|
MakeError(SQLiteError, Error);
|
|
|
|
MakeError(SQLiteBusy, SQLiteError);
|
|
|
|
|
|
|
|
|
|
|
|
static void throwSQLiteError(sqlite3 * db, const format & f)
|
|
|
|
__attribute__ ((noreturn));
|
|
|
|
|
|
|
|
static void throwSQLiteError(sqlite3 * db, const format & f)
|
2010-02-18 13:16:59 +00:00
|
|
|
{
|
2010-12-05 18:23:19 +00:00
|
|
|
int err = sqlite3_errcode(db);
|
|
|
|
if (err == SQLITE_BUSY) {
|
|
|
|
printMsg(lvlError, "warning: SQLite database is busy");
|
2010-12-17 17:23:15 +00:00
|
|
|
/* Sleep for a while since retrying the transaction right away
|
|
|
|
is likely to fail again. */
|
|
|
|
#if HAVE_NANOSLEEP
|
|
|
|
struct timespec t;
|
|
|
|
t.tv_sec = 0;
|
|
|
|
t.tv_nsec = 100 * 1000 * 1000; /* 0.1s */
|
|
|
|
nanosleep(&t, 0);
|
|
|
|
#else
|
|
|
|
sleep(1);
|
|
|
|
#endif
|
2010-12-05 18:23:19 +00:00
|
|
|
throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
|
2010-02-18 13:16:59 +00:00
|
|
|
}
|
2010-12-05 18:23:19 +00:00
|
|
|
else
|
|
|
|
throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
|
|
|
|
}
|
2010-02-18 13:16:59 +00:00
|
|
|
|
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
SQLite::~SQLite()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (db && sqlite3_close(db) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "closing database");
|
2010-02-18 14:30:42 +00:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SQLiteStmt::create(sqlite3 * db, const string & s)
|
|
|
|
{
|
2010-03-02 19:04:17 +00:00
|
|
|
checkInterrupt();
|
2010-02-18 14:30:42 +00:00
|
|
|
assert(!stmt);
|
|
|
|
if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "creating statement");
|
2010-02-18 14:30:42 +00:00
|
|
|
this->db = db;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 15:11:08 +00:00
|
|
|
void SQLiteStmt::reset()
|
|
|
|
{
|
|
|
|
assert(stmt);
|
2010-12-08 18:19:15 +00:00
|
|
|
/* Note: sqlite3_reset() returns the error code for the most
|
|
|
|
recent call to sqlite3_step(). So ignore it. */
|
|
|
|
sqlite3_reset(stmt);
|
2010-02-19 16:43:25 +00:00
|
|
|
curArg = 1;
|
2010-02-18 15:11:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
SQLiteStmt::~SQLiteStmt()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "finalizing statement");
|
2010-02-18 14:30:42 +00:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
void SQLiteStmt::bind(const string & value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 16:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void SQLiteStmt::bind(int value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_int(stmt, curArg++, value) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 16:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-17 12:40:52 +00:00
|
|
|
void SQLiteStmt::bind64(long long value)
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-11-17 12:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
void SQLiteStmt::bind()
|
|
|
|
{
|
|
|
|
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "binding argument");
|
2010-02-19 16:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
/* Helper class to ensure that prepared statements are reset when
|
|
|
|
leaving the scope that uses them. Unfinished prepared statements
|
|
|
|
prevent transactions from being aborted, and can cause locks to be
|
|
|
|
kept when they should be released. */
|
|
|
|
struct SQLiteStmtUse
|
|
|
|
{
|
|
|
|
SQLiteStmt & stmt;
|
|
|
|
SQLiteStmtUse(SQLiteStmt & stmt) : stmt(stmt)
|
|
|
|
{
|
|
|
|
stmt.reset();
|
|
|
|
}
|
|
|
|
~SQLiteStmtUse()
|
|
|
|
{
|
2010-02-22 14:18:55 +00:00
|
|
|
try {
|
|
|
|
stmt.reset();
|
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
2010-02-19 16:04:51 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-02-18 14:40:07 +00:00
|
|
|
struct SQLiteTxn
|
|
|
|
{
|
|
|
|
bool active;
|
|
|
|
sqlite3 * db;
|
|
|
|
|
|
|
|
SQLiteTxn(sqlite3 * db) : active(false) {
|
|
|
|
this->db = db;
|
|
|
|
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "starting transaction");
|
2010-02-18 14:40:07 +00:00
|
|
|
active = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void commit()
|
|
|
|
{
|
|
|
|
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "committing transaction");
|
2010-02-18 14:40:07 +00:00
|
|
|
active = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
~SQLiteTxn()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "aborting transaction");
|
2010-02-18 14:40:07 +00:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2006-03-10 22:27:26 +00:00
|
|
|
void checkStoreNotSymlink()
|
|
|
|
{
|
|
|
|
if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
|
|
|
|
Path path = nixStore;
|
|
|
|
struct stat st;
|
|
|
|
while (path != "/") {
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
throw Error(format(
|
|
|
|
"the path `%1%' is a symlink; "
|
|
|
|
"this is not allowed for the Nix store and its parent directories")
|
|
|
|
% path);
|
|
|
|
path = dirOf(path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
LocalStore::LocalStore()
|
2003-10-15 12:42:39 +00:00
|
|
|
{
|
2007-08-12 00:29:28 +00:00
|
|
|
substitutablePathsLoaded = false;
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
schemaPath = nixDBPath + "/schema";
|
|
|
|
|
2010-02-24 16:44:43 +00:00
|
|
|
if (readOnlyMode) {
|
|
|
|
openDB(false);
|
|
|
|
return;
|
|
|
|
}
|
2005-02-09 09:50:29 +00:00
|
|
|
|
2009-09-23 17:05:51 +00:00
|
|
|
/* Create missing state directories if they don't already exist. */
|
2009-03-27 14:19:04 +00:00
|
|
|
createDirs(nixStore);
|
2009-09-23 17:05:51 +00:00
|
|
|
Path profilesDir = nixStateDir + "/profiles";
|
|
|
|
createDirs(nixStateDir + "/profiles");
|
|
|
|
createDirs(nixStateDir + "/temproots");
|
2010-12-13 13:42:34 +00:00
|
|
|
createDirs(nixDBPath);
|
2009-09-23 17:05:51 +00:00
|
|
|
Path gcRootsDir = nixStateDir + "/gcroots";
|
|
|
|
if (!pathExists(gcRootsDir)) {
|
|
|
|
createDirs(gcRootsDir);
|
|
|
|
if (symlink(profilesDir.c_str(), (gcRootsDir + "/profiles").c_str()) == -1)
|
|
|
|
throw SysError(format("creating symlink to `%1%'") % profilesDir);
|
|
|
|
}
|
|
|
|
|
2006-03-10 22:27:26 +00:00
|
|
|
checkStoreNotSymlink();
|
|
|
|
|
2010-02-18 14:30:42 +00:00
|
|
|
/* Acquire the big fat lock in shared mode to make sure that no
|
|
|
|
schema upgrade is in progress. */
|
2008-07-18 15:34:46 +00:00
|
|
|
try {
|
|
|
|
Path globalLockPath = nixDBPath + "/big-lock";
|
|
|
|
globalLock = openLockFile(globalLockPath.c_str(), true);
|
|
|
|
} catch (SysError & e) {
|
|
|
|
if (e.errNo != EACCES) throw;
|
|
|
|
readOnlyMode = true;
|
2010-02-24 16:44:43 +00:00
|
|
|
openDB(false);
|
2008-07-18 15:34:46 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
|
|
if (!lockFile(globalLock, ltRead, false)) {
|
|
|
|
printMsg(lvlError, "waiting for the big Nix store lock...");
|
|
|
|
lockFile(globalLock, ltRead, true);
|
2006-02-16 13:19:15 +00:00
|
|
|
}
|
2010-02-18 14:30:42 +00:00
|
|
|
|
|
|
|
/* Check the current database schema and if necessary do an
|
2010-02-24 16:30:20 +00:00
|
|
|
upgrade. */
|
2008-06-09 13:52:45 +00:00
|
|
|
int curSchema = getSchema();
|
2005-02-09 09:50:29 +00:00
|
|
|
if (curSchema > nixSchemaVersion)
|
|
|
|
throw Error(format("current Nix store schema is version %1%, but I only support %2%")
|
|
|
|
% curSchema % nixSchemaVersion);
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
|
|
else if (curSchema == 0) { /* new store */
|
2010-02-18 14:30:42 +00:00
|
|
|
curSchema = nixSchemaVersion;
|
2010-02-24 16:30:20 +00:00
|
|
|
openDB(true);
|
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (curSchema < nixSchemaVersion) {
|
|
|
|
if (curSchema < 5)
|
|
|
|
throw Error(
|
|
|
|
"Your Nix store has a database in Berkeley DB format,\n"
|
|
|
|
"which is no longer supported. To convert to the new format,\n"
|
|
|
|
"please upgrade Nix to version 0.12 first.");
|
|
|
|
|
|
|
|
if (!lockFile(globalLock, ltWrite, false)) {
|
|
|
|
printMsg(lvlError, "waiting for exclusive access to the Nix store...");
|
|
|
|
lockFile(globalLock, ltWrite, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the schema version again, because another process may
|
|
|
|
have performed the upgrade already. */
|
|
|
|
curSchema = getSchema();
|
|
|
|
|
|
|
|
if (curSchema < 6) upgradeStore6();
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
|
|
lockFile(globalLock, ltRead, true);
|
2005-02-09 09:50:29 +00:00
|
|
|
}
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
|
|
else openDB(false);
|
2003-10-15 12:42:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-30 17:43:04 +00:00
|
|
|
LocalStore::~LocalStore()
|
2006-03-01 16:36:35 +00:00
|
|
|
{
|
2007-05-01 15:16:17 +00:00
|
|
|
try {
|
2008-08-02 12:54:35 +00:00
|
|
|
foreach (RunningSubstituters::iterator, i, runningSubstituters) {
|
2009-03-28 19:41:53 +00:00
|
|
|
i->second.to.close();
|
|
|
|
i->second.from.close();
|
2008-08-02 12:54:35 +00:00
|
|
|
i->second.pid.wait(true);
|
|
|
|
}
|
2007-05-01 15:16:17 +00:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
2006-03-01 16:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
int LocalStore::getSchema()
|
2003-10-15 12:42:39 +00:00
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
int curSchema = 0;
|
|
|
|
if (pathExists(schemaPath)) {
|
|
|
|
string s = readFile(schemaPath);
|
|
|
|
if (!string2Int(s, curSchema))
|
|
|
|
throw Error(format("`%1%' is corrupt") % schemaPath);
|
|
|
|
}
|
|
|
|
return curSchema;
|
2003-10-15 12:42:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
void LocalStore::openDB(bool create)
|
2010-02-18 13:16:59 +00:00
|
|
|
{
|
2010-02-24 16:30:20 +00:00
|
|
|
/* Open the Nix database. */
|
|
|
|
if (sqlite3_open_v2((nixDBPath + "/db.sqlite").c_str(), &db.db,
|
|
|
|
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
|
|
|
|
throw Error("cannot open SQLite database");
|
2010-02-19 16:04:51 +00:00
|
|
|
|
2010-03-08 10:35:45 +00:00
|
|
|
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "setting timeout");
|
2010-02-18 14:30:42 +00:00
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
if (sqlite3_exec(db, "pragma foreign_keys = 1;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "enabling foreign keys");
|
2010-02-24 16:30:20 +00:00
|
|
|
|
|
|
|
/* !!! check whether sqlite has been built with foreign key
|
|
|
|
support */
|
|
|
|
|
|
|
|
/* Whether SQLite should fsync(). "Normal" synchronous mode
|
|
|
|
should be safe enough. If the user asks for it, don't sync at
|
|
|
|
all. This can cause database corruption if the system
|
|
|
|
crashes. */
|
|
|
|
string syncMode = queryBoolSetting("fsync-metadata", true) ? "normal" : "off";
|
|
|
|
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "setting synchronous mode");
|
2010-02-18 14:30:42 +00:00
|
|
|
|
2011-11-07 21:11:59 +00:00
|
|
|
/* Set the SQLite journal mode. WAL mode is fastest, so it's the
|
|
|
|
default. */
|
|
|
|
string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate";
|
2010-09-01 11:36:22 +00:00
|
|
|
string prevMode;
|
|
|
|
{
|
|
|
|
SQLiteStmt stmt;
|
|
|
|
stmt.create(db, "pragma main.journal_mode;");
|
|
|
|
if (sqlite3_step(stmt) != SQLITE_ROW)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "querying journal mode");
|
2010-09-01 11:36:22 +00:00
|
|
|
prevMode = string((const char *) sqlite3_column_text(stmt, 0));
|
|
|
|
}
|
|
|
|
if (prevMode != mode &&
|
|
|
|
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "setting journal mode");
|
2010-02-18 14:30:42 +00:00
|
|
|
|
2010-08-04 17:35:59 +00:00
|
|
|
/* Increase the auto-checkpoint interval to 8192 pages. This
|
|
|
|
seems enough to ensure that instantiating the NixOS system
|
|
|
|
derivation is done in a single fsync(). */
|
2010-12-08 18:19:15 +00:00
|
|
|
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 8192;", 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "setting autocheckpoint interval");
|
2010-08-04 17:35:59 +00:00
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
/* Initialise the database schema, if necessary. */
|
|
|
|
if (create) {
|
|
|
|
#include "schema.sql.hh"
|
|
|
|
if (sqlite3_exec(db, (const char *) schema, 0, 0, 0) != SQLITE_OK)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "initialising database schema");
|
2010-02-24 16:30:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prepare SQL statements. */
|
2010-02-18 14:30:42 +00:00
|
|
|
stmtRegisterValidPath.create(db,
|
2010-11-16 17:11:46 +00:00
|
|
|
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
|
2010-12-06 15:29:38 +00:00
|
|
|
stmtUpdatePathInfo.create(db,
|
2011-12-02 17:52:18 +00:00
|
|
|
"update ValidPaths set narSize = ?, hash = ? where path = ?;");
|
2010-02-18 14:30:42 +00:00
|
|
|
stmtAddReference.create(db,
|
2010-02-19 16:04:51 +00:00
|
|
|
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
2010-02-18 15:52:57 +00:00
|
|
|
stmtQueryPathInfo.create(db,
|
2010-11-17 12:40:52 +00:00
|
|
|
"select id, hash, registrationTime, deriver, narSize from ValidPaths where path = ?;");
|
2010-02-18 15:52:57 +00:00
|
|
|
stmtQueryReferences.create(db,
|
|
|
|
"select path from Refs join ValidPaths on reference = id where referrer = ?;");
|
2010-02-18 16:21:59 +00:00
|
|
|
stmtQueryReferrers.create(db,
|
|
|
|
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtInvalidatePath.create(db,
|
|
|
|
"delete from ValidPaths where path = ?;");
|
2010-02-19 17:15:22 +00:00
|
|
|
stmtRegisterFailedPath.create(db,
|
|
|
|
"insert into FailedPaths (path, time) values (?, ?);");
|
|
|
|
stmtHasPathFailed.create(db,
|
|
|
|
"select time from FailedPaths where path = ?;");
|
2010-04-26 12:43:42 +00:00
|
|
|
stmtQueryFailedPaths.create(db,
|
|
|
|
"select path from FailedPaths;");
|
2012-03-20 17:23:26 +00:00
|
|
|
// If the path is a derivation, then clear its outputs.
|
2010-04-26 12:56:42 +00:00
|
|
|
stmtClearFailedPath.create(db,
|
2012-03-20 17:23:26 +00:00
|
|
|
"delete from FailedPaths where ?1 = '*' or path = ?1 "
|
|
|
|
"or path in (select d.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where v.path = ?1);");
|
2010-02-22 11:15:50 +00:00
|
|
|
stmtAddDerivationOutput.create(db,
|
|
|
|
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
2010-02-22 11:44:17 +00:00
|
|
|
stmtQueryValidDerivers.create(db,
|
|
|
|
"select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
|
2010-02-22 12:44:36 +00:00
|
|
|
stmtQueryDerivationOutputs.create(db,
|
|
|
|
"select id, path from DerivationOutputs where drv = ?;");
|
2010-02-18 13:16:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-02-15 00:00:30 +00:00
|
|
|
const time_t mtimeStore = 1; /* 1 second into the epoch */
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
void canonicalisePathMetaData(const Path & path, bool recurse)
|
2005-01-19 16:39:47 +00:00
|
|
|
{
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
|
|
|
|
2012-02-15 00:31:56 +00:00
|
|
|
/* Really make sure that the path is of a supported type. This
|
|
|
|
has already been checked in dumpPath(). */
|
|
|
|
assert(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode));
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
/* Change ownership to the current uid. If it's a symlink, use
|
2006-12-09 20:02:27 +00:00
|
|
|
lchown if available, otherwise don't bother. Wrong ownership
|
|
|
|
of a symlink doesn't matter, since the owning user can't change
|
|
|
|
the symlink and can't delete it because the directory is not
|
|
|
|
writable. The only exception is top-level paths in the Nix
|
|
|
|
store (since that directory is group-writable for the Nix build
|
|
|
|
users group); we check for this case below. */
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
#if HAVE_LCHOWN
|
2007-02-06 20:03:53 +00:00
|
|
|
if (lchown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 20:02:27 +00:00
|
|
|
#else
|
|
|
|
if (!S_ISLNK(st.st_mode) &&
|
2007-02-06 20:03:53 +00:00
|
|
|
chown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 20:02:27 +00:00
|
|
|
#endif
|
|
|
|
throw SysError(format("changing owner of `%1%' to %2%")
|
|
|
|
% path % geteuid());
|
|
|
|
}
|
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
if (!S_ISLNK(st.st_mode)) {
|
|
|
|
|
|
|
|
/* Mask out all type related bits. */
|
|
|
|
mode_t mode = st.st_mode & ~S_IFMT;
|
|
|
|
|
|
|
|
if (mode != 0444 && mode != 0555) {
|
|
|
|
mode = (st.st_mode & S_IFMT)
|
|
|
|
| 0444
|
|
|
|
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
|
|
|
if (chmod(path.c_str(), mode) == -1)
|
|
|
|
throw SysError(format("changing mode of `%1%' to %2$o") % path % mode);
|
|
|
|
}
|
|
|
|
|
2012-02-15 00:00:30 +00:00
|
|
|
if (st.st_mtime != mtimeStore) {
|
2005-01-19 16:39:47 +00:00
|
|
|
struct utimbuf utimbuf;
|
|
|
|
utimbuf.actime = st.st_atime;
|
2012-02-15 00:00:30 +00:00
|
|
|
utimbuf.modtime = mtimeStore;
|
2005-01-19 16:39:47 +00:00
|
|
|
if (utime(path.c_str(), &utimbuf) == -1)
|
|
|
|
throw SysError(format("changing modification time of `%1%'") % path);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-10-09 22:14:27 +00:00
|
|
|
if (recurse && S_ISDIR(st.st_mode)) {
|
2005-01-19 16:39:47 +00:00
|
|
|
Strings names = readDirectory(path);
|
2009-04-21 11:52:16 +00:00
|
|
|
foreach (Strings::iterator, i, names)
|
2008-06-09 13:52:45 +00:00
|
|
|
canonicalisePathMetaData(path + "/" + *i, true);
|
2006-12-09 20:02:27 +00:00
|
|
|
}
|
2012-02-15 00:31:56 +00:00
|
|
|
|
|
|
|
makeImmutable(path);
|
2006-12-09 20:02:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void canonicalisePathMetaData(const Path & path)
|
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
canonicalisePathMetaData(path, true);
|
2006-12-09 20:02:27 +00:00
|
|
|
|
|
|
|
/* On platforms that don't have lchown(), the top-level path can't
|
|
|
|
be a symlink, since we can't change its ownership. */
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
|
|
|
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
assert(S_ISLNK(st.st_mode));
|
|
|
|
throw Error(format("wrong ownership of top-level store path `%1%'") % path);
|
2005-01-19 16:39:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-20 18:10:47 +00:00
|
|
|
void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
|
|
|
|
{
|
|
|
|
string drvName = storePathToName(drvPath);
|
|
|
|
assert(isDerivation(drvName));
|
|
|
|
drvName = string(drvName, 0, drvName.size() - drvExtension.size());
|
|
|
|
|
|
|
|
if (isFixedOutputDrv(drv)) {
|
|
|
|
DerivationOutputs::const_iterator out = drv.outputs.find("out");
|
|
|
|
if (out == drv.outputs.end())
|
|
|
|
throw Error(format("derivation `%1%' does not have an output named `out'") % drvPath);
|
|
|
|
|
|
|
|
bool recursive; HashType ht; Hash h;
|
|
|
|
out->second.parseHashInfo(recursive, ht, h);
|
|
|
|
Path outPath = makeFixedOutputPath(recursive, ht, h, drvName);
|
|
|
|
|
|
|
|
StringPairs::const_iterator j = drv.env.find("out");
|
|
|
|
if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
|
|
|
|
throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'")
|
|
|
|
% drvPath % out->second.path % outPath);
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
|
|
|
Derivation drvCopy(drv);
|
|
|
|
foreach (DerivationOutputs::iterator, i, drvCopy.outputs) {
|
|
|
|
i->second.path = "";
|
|
|
|
drvCopy.env[i->first] = "";
|
|
|
|
}
|
|
|
|
|
2011-08-31 21:11:50 +00:00
|
|
|
Hash h = hashDerivationModulo(*this, drvCopy);
|
2011-07-20 18:10:47 +00:00
|
|
|
|
|
|
|
foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
|
|
|
|
Path outPath = makeOutputPath(i->first, h, drvName);
|
|
|
|
StringPairs::const_iterator j = drv.env.find(i->first);
|
|
|
|
if (i->second.path != outPath || j == drv.env.end() || j->second != outPath)
|
|
|
|
throw Error(format("derivation `%1%' has incorrect output `%2%', should be `%3%'")
|
|
|
|
% drvPath % i->second.path % outPath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-12 09:07:43 +00:00
|
|
|
unsigned long long LocalStore::addValidPath(const ValidPathInfo & info, bool checkOutputs)
|
2003-10-10 14:46:28 +00:00
|
|
|
{
|
2010-02-19 16:04:51 +00:00
|
|
|
SQLiteStmtUse use(stmtRegisterValidPath);
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtRegisterValidPath.bind(info.path);
|
|
|
|
stmtRegisterValidPath.bind("sha256:" + printHash(info.hash));
|
2010-12-14 13:25:20 +00:00
|
|
|
stmtRegisterValidPath.bind(info.registrationTime == 0 ? time(0) : info.registrationTime);
|
2010-02-19 16:43:25 +00:00
|
|
|
if (info.deriver != "")
|
|
|
|
stmtRegisterValidPath.bind(info.deriver);
|
|
|
|
else
|
|
|
|
stmtRegisterValidPath.bind(); // null
|
2010-11-16 17:11:46 +00:00
|
|
|
if (info.narSize != 0)
|
2010-11-17 12:40:52 +00:00
|
|
|
stmtRegisterValidPath.bind64(info.narSize);
|
2010-11-16 17:11:46 +00:00
|
|
|
else
|
|
|
|
stmtRegisterValidPath.bind(); // null
|
2010-02-19 16:04:51 +00:00
|
|
|
if (sqlite3_step(stmtRegisterValidPath) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("registering valid path `%1%' in database") % info.path);
|
2010-02-22 11:15:50 +00:00
|
|
|
unsigned long long id = sqlite3_last_insert_rowid(db);
|
|
|
|
|
|
|
|
/* If this is a derivation, then store the derivation outputs in
|
|
|
|
the database. This is useful for the garbage collector: it can
|
|
|
|
efficiently query whether a path is an output of some
|
|
|
|
derivation. */
|
|
|
|
if (isDerivation(info.path)) {
|
2010-05-12 22:13:09 +00:00
|
|
|
Derivation drv = parseDerivation(readFile(info.path));
|
2011-07-20 18:10:47 +00:00
|
|
|
|
|
|
|
/* Verify that the output paths in the derivation are correct
|
|
|
|
(i.e., follow the scheme for computing output paths from
|
|
|
|
derivations). Note that if this throws an error, then the
|
|
|
|
DB transaction is rolled back, so the path validity
|
|
|
|
registration above is undone. */
|
2011-09-12 09:07:43 +00:00
|
|
|
if (checkOutputs) checkDerivationOutputs(info.path, drv);
|
2011-07-20 18:10:47 +00:00
|
|
|
|
2010-02-22 11:15:50 +00:00
|
|
|
foreach (DerivationOutputs::iterator, i, drv.outputs) {
|
|
|
|
SQLiteStmtUse use(stmtAddDerivationOutput);
|
|
|
|
stmtAddDerivationOutput.bind(id);
|
|
|
|
stmtAddDerivationOutput.bind(i->first);
|
|
|
|
stmtAddDerivationOutput.bind(i->second.path);
|
|
|
|
if (sqlite3_step(stmtAddDerivationOutput) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("adding derivation output for `%1%' in database") % info.path);
|
2010-02-22 11:15:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return id;
|
2010-02-19 16:04:51 +00:00
|
|
|
}
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
void LocalStore::addReference(unsigned long long referrer, unsigned long long reference)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtAddReference);
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtAddReference.bind(referrer);
|
|
|
|
stmtAddReference.bind(reference);
|
2010-02-19 16:04:51 +00:00
|
|
|
if (sqlite3_step(stmtAddReference) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "adding reference to database");
|
2010-02-19 16:04:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-25 21:05:42 +00:00
|
|
|
void LocalStore::registerFailedPath(const Path & path)
|
|
|
|
{
|
2010-02-19 17:15:22 +00:00
|
|
|
if (hasPathFailed(path)) return;
|
|
|
|
SQLiteStmtUse use(stmtRegisterFailedPath);
|
|
|
|
stmtRegisterFailedPath.bind(path);
|
|
|
|
stmtRegisterFailedPath.bind(time(0));
|
|
|
|
if (sqlite3_step(stmtRegisterFailedPath) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("registering failed path `%1%'") % path);
|
2009-03-25 21:05:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool LocalStore::hasPathFailed(const Path & path)
|
|
|
|
{
|
2010-02-19 17:15:22 +00:00
|
|
|
SQLiteStmtUse use(stmtHasPathFailed);
|
|
|
|
stmtHasPathFailed.bind(path);
|
|
|
|
int res = sqlite3_step(stmtHasPathFailed);
|
|
|
|
if (res != SQLITE_DONE && res != SQLITE_ROW)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "querying whether path failed");
|
2010-02-19 17:15:22 +00:00
|
|
|
return res == SQLITE_ROW;
|
2009-03-25 21:05:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-26 12:43:42 +00:00
|
|
|
PathSet LocalStore::queryFailedPaths()
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtQueryFailedPaths);
|
|
|
|
|
|
|
|
PathSet res;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryFailedPaths)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryFailedPaths, 0);
|
|
|
|
assert(s);
|
|
|
|
res.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "error querying failed paths");
|
2010-04-26 12:43:42 +00:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-04-26 12:56:42 +00:00
|
|
|
void LocalStore::clearFailedPaths(const PathSet & paths)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
foreach (PathSet::const_iterator, i, paths) {
|
|
|
|
SQLiteStmtUse use(stmtClearFailedPath);
|
|
|
|
stmtClearFailedPath.bind(*i);
|
|
|
|
if (sqlite3_step(stmtClearFailedPath) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("clearing failed path `%1%' in database") % *i);
|
2010-04-26 12:56:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
Hash parseHashField(const Path & path, const string & s)
|
2003-10-10 15:25:21 +00:00
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
string::size_type colon = s.find(':');
|
|
|
|
if (colon == string::npos)
|
|
|
|
throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'")
|
|
|
|
% s % path);
|
|
|
|
HashType ht = parseHashType(string(s, 0, colon));
|
|
|
|
if (ht == htUnknown)
|
|
|
|
throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'")
|
|
|
|
% string(s, 0, colon) % path);
|
|
|
|
return parseHash(ht, string(s, colon + 1));
|
2003-10-10 15:25:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
ValidPathInfo LocalStore::queryPathInfo(const Path & path)
|
2006-11-30 17:43:04 +00:00
|
|
|
{
|
2010-02-19 16:04:51 +00:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = path;
|
2008-06-09 13:52:45 +00:00
|
|
|
|
|
|
|
assertStorePath(path);
|
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
/* Get the path info. */
|
2010-02-19 16:04:51 +00:00
|
|
|
SQLiteStmtUse use1(stmtQueryPathInfo);
|
2010-02-19 16:43:25 +00:00
|
|
|
|
|
|
|
stmtQueryPathInfo.bind(path);
|
2010-02-18 15:52:57 +00:00
|
|
|
|
|
|
|
int r = sqlite3_step(stmtQueryPathInfo);
|
|
|
|
if (r == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
|
2010-12-05 18:23:19 +00:00
|
|
|
if (r != SQLITE_ROW) throwSQLiteError(db, "querying path in database");
|
2010-01-29 11:53:58 +00:00
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
info.id = sqlite3_column_int(stmtQueryPathInfo, 0);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 1);
|
|
|
|
assert(s);
|
2010-02-19 16:04:51 +00:00
|
|
|
info.hash = parseHashField(path, s);
|
2010-02-18 15:52:57 +00:00
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
info.registrationTime = sqlite3_column_int(stmtQueryPathInfo, 2);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
s = (const char *) sqlite3_column_text(stmtQueryPathInfo, 3);
|
2010-02-19 16:04:51 +00:00
|
|
|
if (s) info.deriver = s;
|
2010-02-18 15:52:57 +00:00
|
|
|
|
2010-11-17 12:40:52 +00:00
|
|
|
/* Note that narSize = NULL yields 0. */
|
|
|
|
info.narSize = sqlite3_column_int64(stmtQueryPathInfo, 4);
|
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
/* Get the references. */
|
2010-02-19 16:04:51 +00:00
|
|
|
SQLiteStmtUse use2(stmtQueryReferences);
|
2010-02-18 15:52:57 +00:00
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtQueryReferences.bind(info.id);
|
2010-02-18 15:52:57 +00:00
|
|
|
|
|
|
|
while ((r = sqlite3_step(stmtQueryReferences)) == SQLITE_ROW) {
|
|
|
|
s = (const char *) sqlite3_column_text(stmtQueryReferences, 0);
|
|
|
|
assert(s);
|
2010-02-19 16:04:51 +00:00
|
|
|
info.references.insert(s);
|
2008-06-09 13:52:45 +00:00
|
|
|
}
|
|
|
|
|
2010-02-18 15:52:57 +00:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("error getting references of `%1%'") % path);
|
2010-02-18 15:52:57 +00:00
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
return info;
|
2006-11-30 17:43:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-02 17:52:18 +00:00
|
|
|
/* Update path info in the database. Currently only updates the
|
2010-12-06 15:29:38 +00:00
|
|
|
narSize field. */
|
|
|
|
void LocalStore::updatePathInfo(const ValidPathInfo & info)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtUpdatePathInfo);
|
|
|
|
if (info.narSize != 0)
|
|
|
|
stmtUpdatePathInfo.bind64(info.narSize);
|
|
|
|
else
|
|
|
|
stmtUpdatePathInfo.bind(); // null
|
2011-12-02 17:52:18 +00:00
|
|
|
stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
|
2010-12-06 15:29:38 +00:00
|
|
|
stmtUpdatePathInfo.bind(info.path);
|
|
|
|
if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
|
|
|
|
throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 15:07:23 +00:00
|
|
|
unsigned long long LocalStore::queryValidPathId(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteStmtUse use(stmtQueryPathInfo);
|
|
|
|
stmtQueryPathInfo.bind(path);
|
|
|
|
int res = sqlite3_step(stmtQueryPathInfo);
|
|
|
|
if (res == SQLITE_ROW) return sqlite3_column_int(stmtQueryPathInfo, 0);
|
|
|
|
if (res == SQLITE_DONE) throw Error(format("path `%1%' is not valid") % path);
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "querying path in database");
|
2010-02-24 15:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
bool LocalStore::isValidPath(const Path & path)
|
2005-01-19 16:59:56 +00:00
|
|
|
{
|
2010-02-19 16:04:51 +00:00
|
|
|
SQLiteStmtUse use(stmtQueryPathInfo);
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtQueryPathInfo.bind(path);
|
2010-02-18 15:52:57 +00:00
|
|
|
int res = sqlite3_step(stmtQueryPathInfo);
|
2010-02-18 15:11:08 +00:00
|
|
|
if (res != SQLITE_DONE && res != SQLITE_ROW)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "querying path in database");
|
2010-02-18 15:11:08 +00:00
|
|
|
return res == SQLITE_ROW;
|
2005-01-19 16:59:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
PathSet LocalStore::queryValidPaths()
|
2006-11-30 17:43:04 +00:00
|
|
|
{
|
2010-02-18 16:51:27 +00:00
|
|
|
SQLiteStmt stmt;
|
|
|
|
stmt.create(db, "select path from ValidPaths");
|
|
|
|
|
|
|
|
PathSet res;
|
|
|
|
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmt)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmt, 0);
|
|
|
|
assert(s);
|
|
|
|
res.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, "error getting valid paths");
|
2010-02-18 16:51:27 +00:00
|
|
|
|
|
|
|
return res;
|
2006-11-30 17:43:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
void LocalStore::queryReferences(const Path & path,
|
|
|
|
PathSet & references)
|
2005-02-07 13:40:40 +00:00
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
ValidPathInfo info = queryPathInfo(path);
|
|
|
|
references.insert(info.references.begin(), info.references.end());
|
2005-02-07 13:40:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 16:21:59 +00:00
|
|
|
void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
|
2005-02-07 13:40:40 +00:00
|
|
|
{
|
2010-02-18 16:21:59 +00:00
|
|
|
assertStorePath(path);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-19 16:04:51 +00:00
|
|
|
SQLiteStmtUse use(stmtQueryReferrers);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtQueryReferrers.bind(path);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-18 16:21:59 +00:00
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryReferrers)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryReferrers, 0);
|
|
|
|
assert(s);
|
|
|
|
referrers.insert(s);
|
|
|
|
}
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-18 16:21:59 +00:00
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("error getting references of `%1%'") % path);
|
2005-02-07 13:40:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-12 16:53:44 +00:00
|
|
|
Path LocalStore::queryDeriver(const Path & path)
|
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
return queryPathInfo(path).deriver;
|
2007-06-12 16:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-22 11:44:17 +00:00
|
|
|
PathSet LocalStore::queryValidDerivers(const Path & path)
|
|
|
|
{
|
|
|
|
assertStorePath(path);
|
|
|
|
|
|
|
|
SQLiteStmtUse use(stmtQueryValidDerivers);
|
|
|
|
stmtQueryValidDerivers.bind(path);
|
|
|
|
|
|
|
|
PathSet derivers;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryValidDerivers)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryValidDerivers, 1);
|
|
|
|
assert(s);
|
|
|
|
derivers.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("error getting valid derivers of `%1%'") % path);
|
2010-02-22 11:44:17 +00:00
|
|
|
|
|
|
|
return derivers;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-22 12:44:36 +00:00
|
|
|
PathSet LocalStore::queryDerivationOutputs(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
2010-02-24 15:07:23 +00:00
|
|
|
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
2010-02-22 12:44:36 +00:00
|
|
|
|
|
|
|
PathSet outputs;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 1);
|
|
|
|
assert(s);
|
|
|
|
outputs.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("error getting outputs of `%1%'") % path);
|
2010-02-22 12:44:36 +00:00
|
|
|
|
|
|
|
return outputs;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-06 06:28:20 +00:00
|
|
|
StringSet LocalStore::queryDerivationOutputNames(const Path & path)
|
|
|
|
{
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
SQLiteStmtUse use(stmtQueryDerivationOutputs);
|
|
|
|
stmtQueryDerivationOutputs.bind(queryValidPathId(path));
|
|
|
|
|
|
|
|
StringSet outputNames;
|
|
|
|
int r;
|
|
|
|
while ((r = sqlite3_step(stmtQueryDerivationOutputs)) == SQLITE_ROW) {
|
|
|
|
const char * s = (const char *) sqlite3_column_text(stmtQueryDerivationOutputs, 0);
|
|
|
|
assert(s);
|
|
|
|
outputNames.insert(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r != SQLITE_DONE)
|
|
|
|
throwSQLiteError(db, format("error getting output names of `%1%'") % path);
|
|
|
|
|
|
|
|
return outputNames;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-02 12:54:35 +00:00
|
|
|
void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run)
|
2003-07-10 15:11:48 +00:00
|
|
|
{
|
2008-08-02 12:54:35 +00:00
|
|
|
if (run.pid != -1) return;
|
|
|
|
|
|
|
|
debug(format("starting substituter program `%1%'") % substituter);
|
|
|
|
|
|
|
|
Pipe toPipe, fromPipe;
|
|
|
|
|
|
|
|
toPipe.create();
|
|
|
|
fromPipe.create();
|
|
|
|
|
|
|
|
run.pid = fork();
|
|
|
|
|
|
|
|
switch (run.pid) {
|
|
|
|
|
|
|
|
case -1:
|
|
|
|
throw SysError("unable to fork");
|
|
|
|
|
|
|
|
case 0: /* child */
|
|
|
|
try {
|
2009-09-23 18:04:55 +00:00
|
|
|
/* Hack to let "make check" succeed on Darwin. The
|
|
|
|
libtool wrapper script sets DYLD_LIBRARY_PATH to our
|
|
|
|
libutil (among others), but Perl also depends on a
|
|
|
|
library named libutil. As a result, substituters
|
|
|
|
written in Perl (i.e. all of them) fail. */
|
|
|
|
unsetenv("DYLD_LIBRARY_PATH");
|
|
|
|
|
2008-08-02 12:54:35 +00:00
|
|
|
fromPipe.readSide.close();
|
|
|
|
toPipe.writeSide.close();
|
|
|
|
if (dup2(toPipe.readSide, STDIN_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdin");
|
|
|
|
if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdout");
|
|
|
|
closeMostFDs(set<int>());
|
|
|
|
execl(substituter.c_str(), substituter.c_str(), "--query", NULL);
|
|
|
|
throw SysError(format("executing `%1%'") % substituter);
|
|
|
|
} catch (std::exception & e) {
|
|
|
|
std::cerr << "error: " << e.what() << std::endl;
|
2004-06-20 19:17:54 +00:00
|
|
|
}
|
2008-08-02 12:54:35 +00:00
|
|
|
quickExit(1);
|
2004-06-20 19:17:54 +00:00
|
|
|
}
|
2003-12-05 11:05:19 +00:00
|
|
|
|
2008-08-02 12:54:35 +00:00
|
|
|
/* Parent. */
|
|
|
|
|
2009-03-28 19:41:53 +00:00
|
|
|
run.to = toPipe.writeSide.borrow();
|
|
|
|
run.from = fromPipe.readSide.borrow();
|
2004-06-20 19:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-28 19:41:53 +00:00
|
|
|
template<class T> T getIntLine(int fd)
|
2008-08-05 10:57:53 +00:00
|
|
|
{
|
2009-03-28 19:41:53 +00:00
|
|
|
string s = readLine(fd);
|
2008-08-05 10:57:53 +00:00
|
|
|
T res;
|
2009-03-28 19:41:53 +00:00
|
|
|
if (!string2Int(s, res)) throw Error("integer expected from stream");
|
2008-08-05 10:57:53 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-12 00:29:28 +00:00
|
|
|
bool LocalStore::hasSubstitutes(const Path & path)
|
2004-06-20 19:17:54 +00:00
|
|
|
{
|
2008-08-02 12:54:35 +00:00
|
|
|
foreach (Paths::iterator, i, substituters) {
|
|
|
|
RunningSubstituter & run(runningSubstituters[*i]);
|
|
|
|
startSubstituter(*i, run);
|
2009-03-28 19:41:53 +00:00
|
|
|
writeLine(run.to, "have\n" + path);
|
|
|
|
if (getIntLine<int>(run.from)) return true;
|
2008-08-02 12:54:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-04 13:15:35 +00:00
|
|
|
bool LocalStore::querySubstitutablePathInfo(const Path & substituter,
|
|
|
|
const Path & path, SubstitutablePathInfo & info)
|
2008-08-02 12:54:35 +00:00
|
|
|
{
|
2008-08-04 13:15:35 +00:00
|
|
|
RunningSubstituter & run(runningSubstituters[substituter]);
|
|
|
|
startSubstituter(substituter, run);
|
2008-08-02 12:54:35 +00:00
|
|
|
|
2009-03-28 19:41:53 +00:00
|
|
|
writeLine(run.to, "info\n" + path);
|
|
|
|
|
|
|
|
if (!getIntLine<int>(run.from)) return false;
|
2008-08-04 13:15:35 +00:00
|
|
|
|
2009-03-28 19:41:53 +00:00
|
|
|
info.deriver = readLine(run.from);
|
2008-08-05 10:57:53 +00:00
|
|
|
if (info.deriver != "") assertStorePath(info.deriver);
|
2009-03-28 19:41:53 +00:00
|
|
|
int nrRefs = getIntLine<int>(run.from);
|
2008-08-04 13:15:35 +00:00
|
|
|
while (nrRefs--) {
|
2009-03-28 19:41:53 +00:00
|
|
|
Path p = readLine(run.from);
|
2008-08-05 10:57:53 +00:00
|
|
|
assertStorePath(p);
|
2008-08-04 13:15:35 +00:00
|
|
|
info.references.insert(p);
|
2008-08-02 12:54:35 +00:00
|
|
|
}
|
2009-03-28 19:41:53 +00:00
|
|
|
info.downloadSize = getIntLine<long long>(run.from);
|
2010-11-17 14:31:42 +00:00
|
|
|
info.narSize = getIntLine<long long>(run.from);
|
2008-08-02 12:54:35 +00:00
|
|
|
|
2008-08-04 13:15:35 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool LocalStore::querySubstitutablePathInfo(const Path & path,
|
|
|
|
SubstitutablePathInfo & info)
|
|
|
|
{
|
|
|
|
foreach (Paths::iterator, i, substituters)
|
|
|
|
if (querySubstitutablePathInfo(*i, path, info)) return true;
|
2008-08-02 12:54:35 +00:00
|
|
|
return false;
|
2004-12-20 13:43:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
Hash LocalStore::queryPathHash(const Path & path)
|
2005-02-09 09:50:29 +00:00
|
|
|
{
|
2008-06-09 13:52:45 +00:00
|
|
|
return queryPathInfo(path).hash;
|
2005-02-09 09:50:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-14 13:25:20 +00:00
|
|
|
void LocalStore::registerValidPath(const ValidPathInfo & info)
|
|
|
|
{
|
|
|
|
ValidPathInfos infos;
|
|
|
|
infos.push_back(info);
|
|
|
|
registerValidPaths(infos);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-24 12:48:00 +00:00
|
|
|
void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
2005-03-02 15:57:06 +00:00
|
|
|
{
|
2010-12-14 13:25:20 +00:00
|
|
|
while (1) {
|
|
|
|
try {
|
|
|
|
SQLiteTxn txn(db);
|
2011-12-30 14:47:14 +00:00
|
|
|
PathSet paths;
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-12-14 13:25:20 +00:00
|
|
|
foreach (ValidPathInfos::const_iterator, i, infos) {
|
|
|
|
assert(i->hash.type == htSHA256);
|
|
|
|
/* !!! Maybe the registration info should be updated if the
|
|
|
|
path is already valid. */
|
|
|
|
if (!isValidPath(i->path)) addValidPath(*i);
|
2011-12-30 14:47:14 +00:00
|
|
|
paths.insert(i->path);
|
2010-12-14 13:25:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach (ValidPathInfos::const_iterator, i, infos) {
|
|
|
|
unsigned long long referrer = queryValidPathId(i->path);
|
|
|
|
foreach (PathSet::iterator, j, i->references)
|
|
|
|
addReference(referrer, queryValidPathId(*j));
|
|
|
|
}
|
2005-01-19 16:39:47 +00:00
|
|
|
|
2011-12-30 14:47:14 +00:00
|
|
|
/* Do a topological sort of the paths. This will throw an
|
|
|
|
error if a cycle is detected and roll back the
|
|
|
|
transaction. Cycles can only occur when a derivation
|
|
|
|
has multiple outputs. */
|
|
|
|
topoSortPaths(*this, paths);
|
|
|
|
|
2010-12-14 13:25:20 +00:00
|
|
|
txn.commit();
|
|
|
|
break;
|
|
|
|
} catch (SQLiteBusy & e) {
|
|
|
|
/* Retry; the `txn' destructor will roll back the current
|
|
|
|
transaction. */
|
|
|
|
}
|
2010-02-24 12:48:00 +00:00
|
|
|
}
|
2003-10-08 15:06:59 +00:00
|
|
|
}
|
2003-07-07 09:25:26 +00:00
|
|
|
|
2003-07-31 16:05:35 +00:00
|
|
|
|
2005-01-31 14:00:43 +00:00
|
|
|
/* Invalidate a path. The caller is responsible for checking that
|
2005-12-13 21:04:48 +00:00
|
|
|
there are no referrers. */
|
2008-06-09 13:52:45 +00:00
|
|
|
void LocalStore::invalidatePath(const Path & path)
|
2003-07-08 09:54:47 +00:00
|
|
|
{
|
2007-08-12 00:29:28 +00:00
|
|
|
debug(format("invalidating path `%1%'") % path);
|
2010-02-22 14:18:55 +00:00
|
|
|
|
2011-07-20 18:10:47 +00:00
|
|
|
drvHashes.erase(path);
|
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
SQLiteStmtUse use(stmtInvalidatePath);
|
2003-07-08 09:54:47 +00:00
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
stmtInvalidatePath.bind(path);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
if (sqlite3_step(stmtInvalidatePath) != SQLITE_DONE)
|
2010-12-05 18:23:19 +00:00
|
|
|
throwSQLiteError(db, format("invalidating path `%1%' in database") % path);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-02-19 16:43:25 +00:00
|
|
|
/* Note that the foreign key constraints on the Refs table take
|
2010-02-22 14:18:55 +00:00
|
|
|
care of deleting the references entries for `path'. */
|
2003-07-08 09:54:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 18:05:14 +00:00
|
|
|
Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
|
|
|
bool recursive, HashType hashAlgo)
|
2003-07-07 09:25:26 +00:00
|
|
|
{
|
2008-12-03 18:05:14 +00:00
|
|
|
Hash h = hashString(hashAlgo, dump);
|
2008-12-03 15:51:17 +00:00
|
|
|
|
2008-12-03 18:05:14 +00:00
|
|
|
Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
|
2003-07-10 15:11:48 +00:00
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 10:27:25 +00:00
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
if (!isValidPath(dstPath)) {
|
2003-07-10 15:11:48 +00:00
|
|
|
|
2003-10-08 15:06:59 +00:00
|
|
|
/* The first check above is an optimisation to prevent
|
|
|
|
unnecessary lock acquisition. */
|
2003-07-22 15:15:15 +00:00
|
|
|
|
2006-06-01 18:13:33 +00:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-07-22 15:15:15 +00:00
|
|
|
|
2003-10-08 15:06:59 +00:00
|
|
|
if (!isValidPath(dstPath)) {
|
2004-06-21 07:46:02 +00:00
|
|
|
|
2006-12-09 00:26:24 +00:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-10-25 14:38:23 +00:00
|
|
|
|
2008-12-03 15:51:17 +00:00
|
|
|
if (recursive) {
|
2008-12-03 18:05:14 +00:00
|
|
|
StringSource source(dump);
|
2008-12-03 15:51:17 +00:00
|
|
|
restorePath(dstPath, source);
|
|
|
|
} else
|
2010-01-29 12:22:58 +00:00
|
|
|
writeFile(dstPath, dump);
|
2005-01-14 13:51:38 +00:00
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2008-12-03 15:51:17 +00:00
|
|
|
|
|
|
|
/* Register the SHA-256 hash of the NAR serialisation of
|
|
|
|
the path in the database. We may just have computed it
|
|
|
|
above (if called with recursive == true and hashAlgo ==
|
|
|
|
sha256); otherwise, compute it here. */
|
2010-11-16 17:11:46 +00:00
|
|
|
HashResult hash;
|
|
|
|
if (recursive) {
|
|
|
|
hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
|
|
|
|
hash.second = dump.size();
|
|
|
|
} else
|
|
|
|
hash = hashPath(htSHA256, dstPath);
|
|
|
|
|
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
registerValidPath(info);
|
2003-08-01 09:01:51 +00:00
|
|
|
}
|
2003-11-22 18:45:56 +00:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-06-16 13:33:38 +00:00
|
|
|
}
|
2003-08-04 07:09:36 +00:00
|
|
|
|
2003-10-08 15:06:59 +00:00
|
|
|
return dstPath;
|
2003-06-16 13:33:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 18:05:14 +00:00
|
|
|
Path LocalStore::addToStore(const Path & _srcPath,
|
|
|
|
bool recursive, HashType hashAlgo, PathFilter & filter)
|
|
|
|
{
|
|
|
|
Path srcPath(absPath(_srcPath));
|
|
|
|
debug(format("adding `%1%' to the store") % srcPath);
|
|
|
|
|
|
|
|
/* Read the whole path into memory. This is not a very scalable
|
|
|
|
method for very large paths, but `copyPath' is mainly used for
|
|
|
|
small files. */
|
|
|
|
StringSink sink;
|
|
|
|
if (recursive)
|
|
|
|
dumpPath(srcPath, sink, filter);
|
|
|
|
else
|
|
|
|
sink.s = readFile(srcPath);
|
|
|
|
|
|
|
|
return addToStoreFromDump(sink.s, baseNameOf(srcPath), recursive, hashAlgo);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 15:06:30 +00:00
|
|
|
Path LocalStore::addTextToStore(const string & name, const string & s,
|
2005-01-25 21:28:25 +00:00
|
|
|
const PathSet & references)
|
2003-10-15 12:42:39 +00:00
|
|
|
{
|
2008-12-03 15:06:30 +00:00
|
|
|
Path dstPath = computeStorePathForText(name, s, references);
|
2004-02-14 21:44:18 +00:00
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 10:27:25 +00:00
|
|
|
|
2006-12-01 20:51:18 +00:00
|
|
|
if (!isValidPath(dstPath)) {
|
2003-10-15 12:42:39 +00:00
|
|
|
|
2006-06-01 18:13:33 +00:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-10-23 10:51:55 +00:00
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
2004-06-21 07:46:02 +00:00
|
|
|
|
2006-12-09 00:26:24 +00:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-06-21 07:46:02 +00:00
|
|
|
|
2010-01-29 12:22:58 +00:00
|
|
|
writeFile(dstPath, s);
|
2003-10-15 12:42:39 +00:00
|
|
|
|
2005-01-19 16:39:47 +00:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2010-11-16 17:11:46 +00:00
|
|
|
|
|
|
|
HashResult hash = hashPath(htSHA256, dstPath);
|
2004-09-09 21:19:20 +00:00
|
|
|
|
2010-11-16 17:11:46 +00:00
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
info.references = references;
|
|
|
|
registerValidPath(info);
|
2003-10-23 10:51:55 +00:00
|
|
|
}
|
2003-11-22 18:45:56 +00:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-10-15 12:42:39 +00:00
|
|
|
}
|
2005-01-14 13:51:38 +00:00
|
|
|
|
|
|
|
return dstPath;
|
2003-10-15 12:42:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
struct HashAndWriteSink : Sink
|
|
|
|
{
|
|
|
|
Sink & writeSink;
|
|
|
|
HashSink hashSink;
|
|
|
|
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
}
|
2011-12-15 16:19:53 +00:00
|
|
|
virtual void operator () (const unsigned char * data, size_t len)
|
2007-02-21 14:31:42 +00:00
|
|
|
{
|
|
|
|
writeSink(data, len);
|
2010-03-09 14:32:03 +00:00
|
|
|
hashSink(data, len);
|
|
|
|
}
|
|
|
|
Hash currentHash()
|
|
|
|
{
|
2011-12-15 16:19:53 +00:00
|
|
|
return hashSink.currentHash().first;
|
2007-02-21 14:31:42 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#define EXPORT_MAGIC 0x4558494e
|
|
|
|
|
|
|
|
|
2007-02-21 17:51:10 +00:00
|
|
|
static void checkSecrecy(const Path & path)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0)
|
|
|
|
throw Error(format("file `%1%' should be secret (inaccessible to everybody else)!") % path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-20 23:17:20 +00:00
|
|
|
void LocalStore::exportPath(const Path & path, bool sign,
|
|
|
|
Sink & sink)
|
|
|
|
{
|
|
|
|
assertStorePath(path);
|
2007-02-21 14:31:42 +00:00
|
|
|
|
2007-02-21 16:23:25 +00:00
|
|
|
addTempRoot(path);
|
2008-03-01 21:05:33 +00:00
|
|
|
if (!isValidPath(path))
|
2007-02-21 16:23:25 +00:00
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
HashAndWriteSink hashAndWriteSink(sink);
|
2007-02-20 23:17:20 +00:00
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
dumpPath(path, hashAndWriteSink);
|
2007-02-20 23:17:20 +00:00
|
|
|
|
2010-03-09 14:32:03 +00:00
|
|
|
/* Refuse to export paths that have changed. This prevents
|
2010-08-24 14:25:33 +00:00
|
|
|
filesystem corruption from spreading to other machines.
|
|
|
|
Don't complain if the stored hash is zero (unknown). */
|
2010-03-09 14:32:03 +00:00
|
|
|
Hash hash = hashAndWriteSink.currentHash();
|
|
|
|
Hash storedHash = queryPathHash(path);
|
2010-08-24 14:25:33 +00:00
|
|
|
if (hash != storedHash && storedHash != Hash(storedHash.type))
|
2010-03-09 14:32:03 +00:00
|
|
|
throw Error(format("hash of path `%1%' has changed from `%2%' to `%3%'!") % path
|
|
|
|
% printHash(storedHash) % printHash(hash));
|
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
writeInt(EXPORT_MAGIC, hashAndWriteSink);
|
|
|
|
|
|
|
|
writeString(path, hashAndWriteSink);
|
2007-02-20 23:17:20 +00:00
|
|
|
|
|
|
|
PathSet references;
|
2008-03-01 21:05:33 +00:00
|
|
|
queryReferences(path, references);
|
2011-12-16 22:31:25 +00:00
|
|
|
writeStrings(references, hashAndWriteSink);
|
2007-02-20 23:17:20 +00:00
|
|
|
|
2008-03-01 21:05:33 +00:00
|
|
|
Path deriver = queryDeriver(path);
|
2007-02-21 14:31:42 +00:00
|
|
|
writeString(deriver, hashAndWriteSink);
|
|
|
|
|
|
|
|
if (sign) {
|
2010-03-09 14:32:03 +00:00
|
|
|
Hash hash = hashAndWriteSink.currentHash();
|
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
writeInt(1, hashAndWriteSink);
|
|
|
|
|
|
|
|
Path tmpDir = createTempDir();
|
|
|
|
AutoDelete delTmp(tmpDir);
|
|
|
|
Path hashFile = tmpDir + "/hash";
|
2010-01-29 12:22:58 +00:00
|
|
|
writeFile(hashFile, printHash(hash));
|
2007-02-21 14:31:42 +00:00
|
|
|
|
2007-02-21 17:51:10 +00:00
|
|
|
Path secretKey = nixConfDir + "/signing-key.sec";
|
|
|
|
checkSecrecy(secretKey);
|
|
|
|
|
2007-02-21 14:31:42 +00:00
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-sign");
|
|
|
|
args.push_back("-inkey");
|
2007-02-21 17:51:10 +00:00
|
|
|
args.push_back(secretKey);
|
2007-02-21 14:31:42 +00:00
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(hashFile);
|
2007-03-01 13:30:46 +00:00
|
|
|
string signature = runProgram(OPENSSL_PATH, true, args);
|
2007-02-21 14:31:42 +00:00
|
|
|
|
|
|
|
writeString(signature, hashAndWriteSink);
|
|
|
|
|
|
|
|
} else
|
|
|
|
writeInt(0, hashAndWriteSink);
|
2007-02-20 23:17:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 15:45:32 +00:00
|
|
|
struct HashAndReadSource : Source
|
|
|
|
{
|
|
|
|
Source & readSource;
|
|
|
|
HashSink hashSink;
|
|
|
|
bool hashing;
|
|
|
|
HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
hashing = true;
|
|
|
|
}
|
2011-12-16 19:44:13 +00:00
|
|
|
size_t read(unsigned char * data, size_t len)
|
2007-02-21 15:45:32 +00:00
|
|
|
{
|
2011-12-16 19:44:13 +00:00
|
|
|
size_t n = readSource.read(data, len);
|
|
|
|
if (hashing) hashSink(data, n);
|
|
|
|
return n;
|
2007-02-21 15:45:32 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-06-21 11:08:09 +00:00
|
|
|
/* Create a temporary directory in the store that won't be
|
|
|
|
garbage-collected. */
|
|
|
|
Path LocalStore::createTempDirInStore()
|
|
|
|
{
|
|
|
|
Path tmpDir;
|
|
|
|
do {
|
|
|
|
/* There is a slight possibility that `tmpDir' gets deleted by
|
|
|
|
the GC between createTempDir() and addTempRoot(), so repeat
|
|
|
|
until `tmpDir' exists. */
|
|
|
|
tmpDir = createTempDir(nixStore);
|
|
|
|
addTempRoot(tmpDir);
|
|
|
|
} while (!pathExists(tmpDir));
|
|
|
|
return tmpDir;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 15:45:32 +00:00
|
|
|
Path LocalStore::importPath(bool requireSignature, Source & source)
|
|
|
|
{
|
|
|
|
HashAndReadSource hashAndReadSource(source);
|
|
|
|
|
|
|
|
/* We don't yet know what store path this archive contains (the
|
|
|
|
store path follows the archive data proper), and besides, we
|
|
|
|
don't know yet whether the signature is valid. */
|
2010-06-21 11:08:09 +00:00
|
|
|
Path tmpDir = createTempDirInStore();
|
|
|
|
AutoDelete delTmp(tmpDir);
|
2007-02-21 15:45:32 +00:00
|
|
|
Path unpacked = tmpDir + "/unpacked";
|
|
|
|
|
|
|
|
restorePath(unpacked, hashAndReadSource);
|
|
|
|
|
|
|
|
unsigned int magic = readInt(hashAndReadSource);
|
|
|
|
if (magic != EXPORT_MAGIC)
|
|
|
|
throw Error("Nix archive cannot be imported; wrong format");
|
|
|
|
|
|
|
|
Path dstPath = readStorePath(hashAndReadSource);
|
|
|
|
|
2012-03-05 17:13:44 +00:00
|
|
|
printMsg(lvlInfo, format("importing path `%1%'") % dstPath);
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
PathSet references = readStorePaths<PathSet>(hashAndReadSource);
|
2007-02-21 15:45:32 +00:00
|
|
|
|
2007-02-27 23:18:57 +00:00
|
|
|
Path deriver = readString(hashAndReadSource);
|
|
|
|
if (deriver != "") assertStorePath(deriver);
|
2007-02-21 15:45:32 +00:00
|
|
|
|
2010-11-16 17:11:46 +00:00
|
|
|
Hash hash = hashAndReadSource.hashSink.finish().first;
|
2007-02-21 15:45:32 +00:00
|
|
|
hashAndReadSource.hashing = false;
|
|
|
|
|
|
|
|
bool haveSignature = readInt(hashAndReadSource) == 1;
|
|
|
|
|
|
|
|
if (requireSignature && !haveSignature)
|
2011-11-23 15:13:37 +00:00
|
|
|
throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
|
2007-02-21 15:45:32 +00:00
|
|
|
|
|
|
|
if (haveSignature) {
|
|
|
|
string signature = readString(hashAndReadSource);
|
|
|
|
|
2007-03-01 12:30:24 +00:00
|
|
|
if (requireSignature) {
|
|
|
|
Path sigFile = tmpDir + "/sig";
|
2010-01-29 12:22:58 +00:00
|
|
|
writeFile(sigFile, signature);
|
2007-03-01 12:30:24 +00:00
|
|
|
|
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-verify");
|
|
|
|
args.push_back("-inkey");
|
|
|
|
args.push_back(nixConfDir + "/signing-key.pub");
|
|
|
|
args.push_back("-pubin");
|
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(sigFile);
|
2007-03-01 13:30:46 +00:00
|
|
|
string hash2 = runProgram(OPENSSL_PATH, true, args);
|
2007-03-01 12:30:24 +00:00
|
|
|
|
|
|
|
/* Note: runProgram() throws an exception if the signature
|
|
|
|
is invalid. */
|
|
|
|
|
|
|
|
if (printHash(hash) != hash2)
|
|
|
|
throw Error(
|
|
|
|
"signed hash doesn't match actual contents of imported "
|
|
|
|
"archive; archive could be corrupt, or someone is trying "
|
|
|
|
"to import a Trojan horse");
|
|
|
|
}
|
2007-02-21 15:45:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the actual import. */
|
|
|
|
|
|
|
|
/* !!! way too much code duplication with addTextToStore() etc. */
|
|
|
|
addTempRoot(dstPath);
|
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
2009-02-02 17:24:10 +00:00
|
|
|
PathLocks outputLock;
|
|
|
|
|
|
|
|
/* Lock the output path. But don't lock if we're being called
|
|
|
|
from a build hook (whose parent process already acquired a
|
|
|
|
lock on this path). */
|
|
|
|
Strings locksHeld = tokenizeString(getEnv("NIX_HELD_LOCKS"));
|
|
|
|
if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end())
|
|
|
|
outputLock.lockPaths(singleton<PathSet, Path>(dstPath));
|
2007-02-21 15:45:32 +00:00
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
|
|
|
|
|
|
|
if (rename(unpacked.c_str(), dstPath.c_str()) == -1)
|
|
|
|
throw SysError(format("cannot move `%1%' to `%2%'")
|
|
|
|
% unpacked % dstPath);
|
|
|
|
|
|
|
|
canonicalisePathMetaData(dstPath);
|
|
|
|
|
|
|
|
/* !!! if we were clever, we could prevent the hashPath()
|
|
|
|
here. */
|
2010-11-16 17:11:46 +00:00
|
|
|
HashResult hash = hashPath(htSHA256, dstPath);
|
|
|
|
|
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = dstPath;
|
|
|
|
info.hash = hash.first;
|
|
|
|
info.narSize = hash.second;
|
|
|
|
info.references = references;
|
|
|
|
info.deriver = deriver != "" && isValidPath(deriver) ? deriver : "";
|
|
|
|
registerValidPath(info);
|
2007-02-21 15:45:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
return dstPath;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
Paths LocalStore::importPaths(bool requireSignature, Source & source)
|
|
|
|
{
|
|
|
|
Paths res;
|
|
|
|
while (true) {
|
|
|
|
unsigned long long n = readLongLong(source);
|
|
|
|
if (n == 0) break;
|
|
|
|
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
|
|
|
|
res.push_back(importPath(requireSignature, source));
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-26 18:43:33 +00:00
|
|
|
void LocalStore::invalidatePathChecked(const Path & path)
|
2003-06-23 14:40:49 +00:00
|
|
|
{
|
2004-02-14 21:44:18 +00:00
|
|
|
assertStorePath(path);
|
2003-07-08 09:54:47 +00:00
|
|
|
|
2010-12-05 18:23:19 +00:00
|
|
|
while (1) {
|
|
|
|
try {
|
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
|
|
|
if (isValidPath(path)) {
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
|
|
|
referrers.erase(path); /* ignore self-references */
|
|
|
|
if (!referrers.empty())
|
2011-12-05 21:04:20 +00:00
|
|
|
throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%")
|
2010-12-05 18:23:19 +00:00
|
|
|
% path % showPaths(referrers));
|
|
|
|
invalidatePath(path);
|
|
|
|
}
|
|
|
|
|
|
|
|
txn.commit();
|
|
|
|
break;
|
|
|
|
} catch (SQLiteBusy & e) { };
|
2005-01-31 14:00:43 +00:00
|
|
|
}
|
2003-06-23 14:40:49 +00:00
|
|
|
}
|
2003-07-17 12:27:55 +00:00
|
|
|
|
|
|
|
|
2008-06-09 13:52:45 +00:00
|
|
|
void LocalStore::verifyStore(bool checkContents)
|
2003-07-17 12:27:55 +00:00
|
|
|
{
|
2010-08-31 11:47:31 +00:00
|
|
|
printMsg(lvlError, format("reading the Nix store..."));
|
2007-01-14 17:28:30 +00:00
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
/* Acquire the global GC lock to prevent a garbage collection. */
|
|
|
|
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
2008-06-09 13:52:45 +00:00
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
Paths entries = readDirectory(nixStore);
|
|
|
|
PathSet store(entries.begin(), entries.end());
|
|
|
|
|
|
|
|
/* Check whether all valid paths actually exist. */
|
|
|
|
printMsg(lvlInfo, "checking path existence...");
|
|
|
|
|
|
|
|
PathSet validPaths2 = queryValidPaths(), validPaths, done;
|
|
|
|
|
|
|
|
foreach (PathSet::iterator, i, validPaths2)
|
|
|
|
verifyPath(*i, store, done, validPaths);
|
2003-07-17 12:27:55 +00:00
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
/* Release the GC lock so that checking content hashes (which can
|
|
|
|
take ages) doesn't block the GC or builds. */
|
|
|
|
fdGCLock.close();
|
|
|
|
|
2010-02-18 16:51:27 +00:00
|
|
|
/* Optionally, check the content hashes (slow). */
|
|
|
|
if (checkContents) {
|
2010-08-31 11:47:31 +00:00
|
|
|
printMsg(lvlInfo, "checking hashes...");
|
2007-01-14 17:28:30 +00:00
|
|
|
|
2011-12-02 17:52:18 +00:00
|
|
|
Hash nullHash(htSHA256);
|
|
|
|
|
2010-02-18 16:51:27 +00:00
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2010-12-06 15:29:38 +00:00
|
|
|
try {
|
|
|
|
ValidPathInfo info = queryPathInfo(*i);
|
|
|
|
|
|
|
|
/* Check the content hash (optionally - slow). */
|
|
|
|
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
|
|
|
|
HashResult current = hashPath(info.hash.type, *i);
|
|
|
|
|
2011-12-02 17:52:18 +00:00
|
|
|
if (info.hash != nullHash && info.hash != current.first) {
|
2010-12-06 15:29:38 +00:00
|
|
|
printMsg(lvlError, format("path `%1%' was modified! "
|
|
|
|
"expected hash `%2%', got `%3%'")
|
|
|
|
% *i % printHash(info.hash) % printHash(current.first));
|
|
|
|
} else {
|
2011-12-02 17:52:18 +00:00
|
|
|
|
|
|
|
bool update = false;
|
|
|
|
|
|
|
|
/* Fill in missing hashes. */
|
|
|
|
if (info.hash == nullHash) {
|
|
|
|
printMsg(lvlError, format("fixing missing hash on `%1%'") % *i);
|
|
|
|
info.hash = current.first;
|
|
|
|
update = true;
|
|
|
|
}
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
/* Fill in missing narSize fields (from old stores). */
|
|
|
|
if (info.narSize == 0) {
|
|
|
|
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
|
|
|
|
info.narSize = current.second;
|
2011-12-02 17:52:18 +00:00
|
|
|
update = true;
|
2010-12-06 15:29:38 +00:00
|
|
|
}
|
2011-12-02 17:52:18 +00:00
|
|
|
|
|
|
|
if (update) updatePathInfo(info);
|
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
}
|
2010-11-16 17:11:46 +00:00
|
|
|
|
2010-12-06 15:29:38 +00:00
|
|
|
} catch (Error & e) {
|
|
|
|
/* It's possible that the path got GC'ed, so ignore
|
|
|
|
errors on invalid paths. */
|
|
|
|
if (isValidPath(*i)) throw;
|
|
|
|
printMsg(lvlError, format("warning: %1%") % e.msg());
|
|
|
|
}
|
2005-02-08 13:23:55 +00:00
|
|
|
}
|
2007-08-13 11:37:39 +00:00
|
|
|
}
|
2010-02-18 15:11:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-31 11:47:31 +00:00
|
|
|
void LocalStore::verifyPath(const Path & path, const PathSet & store,
|
|
|
|
PathSet & done, PathSet & validPaths)
|
|
|
|
{
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
if (done.find(path) != done.end()) return;
|
|
|
|
done.insert(path);
|
|
|
|
|
|
|
|
if (!isStorePath(path)) {
|
|
|
|
printMsg(lvlError, format("path `%1%' is not in the Nix store") % path);
|
|
|
|
invalidatePath(path);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (store.find(baseNameOf(path)) == store.end()) {
|
|
|
|
/* Check any referrers first. If we can invalidate them
|
|
|
|
first, then we can invalidate this path as well. */
|
|
|
|
bool canInvalidate = true;
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
|
|
|
foreach (PathSet::iterator, i, referrers)
|
|
|
|
if (*i != path) {
|
|
|
|
verifyPath(*i, store, done, validPaths);
|
|
|
|
if (validPaths.find(*i) != validPaths.end())
|
|
|
|
canInvalidate = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (canInvalidate) {
|
|
|
|
printMsg(lvlError, format("path `%1%' disappeared, removing from database...") % path);
|
|
|
|
invalidatePath(path);
|
|
|
|
} else
|
|
|
|
printMsg(lvlError, format("path `%1%' disappeared, but it still has valid referrers!") % path);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
validPaths.insert(path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 15:11:08 +00:00
|
|
|
/* Functions for upgrading from the pre-SQLite database. */
|
|
|
|
|
|
|
|
PathSet LocalStore::queryValidPathsOld()
|
|
|
|
{
|
|
|
|
PathSet paths;
|
|
|
|
Strings entries = readDirectory(nixDBPath + "/info");
|
|
|
|
foreach (Strings::iterator, i, entries)
|
|
|
|
if (i->at(0) != '.') paths.insert(nixStore + "/" + *i);
|
|
|
|
return paths;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ValidPathInfo LocalStore::queryPathInfoOld(const Path & path)
|
|
|
|
{
|
|
|
|
ValidPathInfo res;
|
|
|
|
res.path = path;
|
|
|
|
|
|
|
|
/* Read the info file. */
|
2010-02-19 16:04:51 +00:00
|
|
|
string baseName = baseNameOf(path);
|
|
|
|
Path infoFile = (format("%1%/info/%2%") % nixDBPath % baseName).str();
|
2010-02-18 15:11:08 +00:00
|
|
|
if (!pathExists(infoFile))
|
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
string info = readFile(infoFile);
|
|
|
|
|
|
|
|
/* Parse it. */
|
|
|
|
Strings lines = tokenizeString(info, "\n");
|
|
|
|
|
|
|
|
foreach (Strings::iterator, i, lines) {
|
|
|
|
string::size_type p = i->find(':');
|
|
|
|
if (p == string::npos)
|
|
|
|
throw Error(format("corrupt line in `%1%': %2%") % infoFile % *i);
|
|
|
|
string name(*i, 0, p);
|
|
|
|
string value(*i, p + 2);
|
|
|
|
if (name == "References") {
|
|
|
|
Strings refs = tokenizeString(value, " ");
|
|
|
|
res.references = PathSet(refs.begin(), refs.end());
|
|
|
|
} else if (name == "Deriver") {
|
|
|
|
res.deriver = value;
|
|
|
|
} else if (name == "Hash") {
|
|
|
|
res.hash = parseHashField(path, value);
|
|
|
|
} else if (name == "Registered-At") {
|
|
|
|
int n = 0;
|
|
|
|
string2Int(value, n);
|
|
|
|
res.registrationTime = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
2007-08-13 11:37:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-18 13:16:59 +00:00
|
|
|
/* Upgrade from schema 5 (Nix 0.12) to schema 6 (Nix >= 0.15). */
|
|
|
|
void LocalStore::upgradeStore6()
|
2009-11-06 01:15:44 +00:00
|
|
|
{
|
2010-02-18 13:16:59 +00:00
|
|
|
printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)...");
|
|
|
|
|
2010-02-24 16:30:20 +00:00
|
|
|
openDB(true);
|
2010-02-18 13:16:59 +00:00
|
|
|
|
2010-02-18 15:11:08 +00:00
|
|
|
PathSet validPaths = queryValidPathsOld();
|
2010-02-18 13:16:59 +00:00
|
|
|
|
2010-02-18 14:40:07 +00:00
|
|
|
SQLiteTxn txn(db);
|
|
|
|
|
2010-02-18 13:16:59 +00:00
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2011-09-12 09:07:43 +00:00
|
|
|
addValidPath(queryPathInfoOld(*i), false);
|
2010-02-18 13:40:46 +00:00
|
|
|
std::cerr << ".";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::cerr << "|";
|
|
|
|
|
|
|
|
foreach (PathSet::iterator, i, validPaths) {
|
2010-02-18 15:11:08 +00:00
|
|
|
ValidPathInfo info = queryPathInfoOld(*i);
|
2010-02-24 15:07:23 +00:00
|
|
|
unsigned long long referrer = queryValidPathId(*i);
|
|
|
|
foreach (PathSet::iterator, j, info.references)
|
|
|
|
addReference(referrer, queryValidPathId(*j));
|
2010-02-18 13:16:59 +00:00
|
|
|
std::cerr << ".";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::cerr << "\n";
|
|
|
|
|
2010-02-18 14:40:07 +00:00
|
|
|
txn.commit();
|
2009-11-06 01:15:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-09-04 21:06:23 +00:00
|
|
|
}
|