forked from lix-project/lix
Merge pull request #7616 from hercules-ci/fix-3898
Fix foreign key error inserting into NARs #3898
This commit is contained in:
commit
c205d10c66
|
@ -51,13 +51,13 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
|
||||||
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
|
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
|
||||||
@rm -rf $@
|
@rm -rf $@
|
||||||
$(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }'
|
$(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }'
|
||||||
# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
@# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
||||||
$(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g'
|
$(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g'
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
|
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
|
||||||
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
|
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
|
||||||
# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
@# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
||||||
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \
|
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \
|
||||||
| sed -e 's^@docroot@^..^g'>> $@.tmp
|
| sed -e 's^@docroot@^..^g'>> $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
@ -72,7 +72,7 @@ $(d)/conf-file.json: $(bindir)/nix
|
||||||
|
|
||||||
$(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix
|
$(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix
|
||||||
@cat doc/manual/src/language/builtins-prefix.md > $@.tmp
|
@cat doc/manual/src/language/builtins-prefix.md > $@.tmp
|
||||||
# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
@# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
|
||||||
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \
|
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \
|
||||||
| sed -e 's^@docroot@^..^g' >> $@.tmp
|
| sed -e 's^@docroot@^..^g' >> $@.tmp
|
||||||
@cat doc/manual/src/language/builtins-suffix.md >> $@.tmp
|
@cat doc/manual/src/language/builtins-suffix.md >> $@.tmp
|
||||||
|
|
|
@ -56,7 +56,7 @@ public:
|
||||||
void init() override
|
void init() override
|
||||||
{
|
{
|
||||||
// FIXME: do this lazily?
|
// FIXME: do this lazily?
|
||||||
if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
|
if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) {
|
||||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
||||||
priority.setDefault(cacheInfo->priority);
|
priority.setDefault(cacheInfo->priority);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -84,11 +84,10 @@ public:
|
||||||
|
|
||||||
Sync<State> _state;
|
Sync<State> _state;
|
||||||
|
|
||||||
NarInfoDiskCacheImpl()
|
NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite")
|
||||||
{
|
{
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
|
|
||||||
createDirs(dirOf(dbPath));
|
createDirs(dirOf(dbPath));
|
||||||
|
|
||||||
state->db = SQLite(dbPath);
|
state->db = SQLite(dbPath);
|
||||||
|
@ -98,7 +97,7 @@ public:
|
||||||
state->db.exec(schema);
|
state->db.exec(schema);
|
||||||
|
|
||||||
state->insertCache.create(state->db,
|
state->insertCache.create(state->db,
|
||||||
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
|
"insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
|
||||||
|
|
||||||
state->queryCache.create(state->db,
|
state->queryCache.create(state->db,
|
||||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
||||||
|
@ -166,6 +165,8 @@ public:
|
||||||
return i->second;
|
return i->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
|
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
|
||||||
{
|
{
|
||||||
auto i = state.caches.find(uri);
|
auto i = state.caches.find(uri);
|
||||||
|
@ -173,15 +174,21 @@ public:
|
||||||
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
||||||
if (!queryCache.next())
|
if (!queryCache.next())
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
state.caches.emplace(uri,
|
auto cache = Cache {
|
||||||
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
|
.id = (int) queryCache.getInt(0),
|
||||||
|
.storeDir = queryCache.getStr(1),
|
||||||
|
.wantMassQuery = queryCache.getInt(2) != 0,
|
||||||
|
.priority = (int) queryCache.getInt(3),
|
||||||
|
};
|
||||||
|
state.caches.emplace(uri, cache);
|
||||||
}
|
}
|
||||||
return getCache(state, uri);
|
return getCache(state, uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
public:
|
||||||
|
int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
||||||
{
|
{
|
||||||
retrySQLite<void>([&]() {
|
return retrySQLite<int>([&]() {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
SQLiteTxn txn(state->db);
|
SQLiteTxn txn(state->db);
|
||||||
|
|
||||||
|
@ -190,17 +197,29 @@ public:
|
||||||
auto cache(queryCacheRaw(*state, uri));
|
auto cache(queryCacheRaw(*state, uri));
|
||||||
|
|
||||||
if (cache)
|
if (cache)
|
||||||
return;
|
return cache->id;
|
||||||
|
|
||||||
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
|
Cache ret {
|
||||||
assert(sqlite3_changes(state->db) == 1);
|
.id = -1, // set below
|
||||||
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
|
.storeDir = storeDir,
|
||||||
|
.wantMassQuery = wantMassQuery,
|
||||||
|
.priority = priority,
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
|
||||||
|
assert(r.next());
|
||||||
|
ret.id = (int) r.getInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
state->caches[uri] = ret;
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
return ret.id;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<CacheInfo> cacheExists(const std::string & uri) override
|
std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) override
|
||||||
{
|
{
|
||||||
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
|
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
@ -208,6 +227,7 @@ public:
|
||||||
if (!cache)
|
if (!cache)
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
return CacheInfo {
|
return CacheInfo {
|
||||||
|
.id = cache->id,
|
||||||
.wantMassQuery = cache->wantMassQuery,
|
.wantMassQuery = cache->wantMassQuery,
|
||||||
.priority = cache->priority
|
.priority = cache->priority
|
||||||
};
|
};
|
||||||
|
@ -371,4 +391,9 @@ ref<NarInfoDiskCache> getNarInfoDiskCache()
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath)
|
||||||
|
{
|
||||||
|
return make_ref<NarInfoDiskCacheImpl>(dbPath);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,16 +13,17 @@ public:
|
||||||
|
|
||||||
virtual ~NarInfoDiskCache() { }
|
virtual ~NarInfoDiskCache() { }
|
||||||
|
|
||||||
virtual void createCache(const std::string & uri, const Path & storeDir,
|
virtual int createCache(const std::string & uri, const Path & storeDir,
|
||||||
bool wantMassQuery, int priority) = 0;
|
bool wantMassQuery, int priority) = 0;
|
||||||
|
|
||||||
struct CacheInfo
|
struct CacheInfo
|
||||||
{
|
{
|
||||||
|
int id;
|
||||||
bool wantMassQuery;
|
bool wantMassQuery;
|
||||||
int priority;
|
int priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual std::optional<CacheInfo> cacheExists(const std::string & uri) = 0;
|
virtual std::optional<CacheInfo> upToDateCacheExists(const std::string & uri) = 0;
|
||||||
|
|
||||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||||
const std::string & uri, const std::string & hashPart) = 0;
|
const std::string & uri, const std::string & hashPart) = 0;
|
||||||
|
@ -45,4 +46,6 @@ public:
|
||||||
multiple threads. */
|
multiple threads. */
|
||||||
ref<NarInfoDiskCache> getNarInfoDiskCache();
|
ref<NarInfoDiskCache> getNarInfoDiskCache();
|
||||||
|
|
||||||
|
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
|
||||||
|
|
||||||
void init() override
|
void init() override
|
||||||
{
|
{
|
||||||
if (auto cacheInfo = diskCache->cacheExists(getUri())) {
|
if (auto cacheInfo = diskCache->upToDateCacheExists(getUri())) {
|
||||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
||||||
priority.setDefault(cacheInfo->priority);
|
priority.setDefault(cacheInfo->priority);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -41,6 +41,15 @@ SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int ex
|
||||||
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
|
throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void traceSQL(void * x, const char * sql)
|
||||||
|
{
|
||||||
|
// wacky delimiters:
|
||||||
|
// so that we're quite unambiguous without escaping anything
|
||||||
|
// notice instead of trace:
|
||||||
|
// so that this can be enabled without getting the firehose in our face.
|
||||||
|
notice("SQL<[%1%]>", sql);
|
||||||
|
};
|
||||||
|
|
||||||
SQLite::SQLite(const Path & path, bool create)
|
SQLite::SQLite(const Path & path, bool create)
|
||||||
{
|
{
|
||||||
// useSQLiteWAL also indicates what virtual file system we need. Using
|
// useSQLiteWAL also indicates what virtual file system we need. Using
|
||||||
|
@ -58,6 +67,11 @@ SQLite::SQLite(const Path & path, bool create)
|
||||||
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
||||||
SQLiteError::throw_(db, "setting timeout");
|
SQLiteError::throw_(db, "setting timeout");
|
||||||
|
|
||||||
|
if (getEnv("NIX_DEBUG_SQLITE_TRACES") == "1") {
|
||||||
|
// To debug sqlite statements; trace all of them
|
||||||
|
sqlite3_trace(db, &traceSQL, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
exec("pragma foreign_keys = 1");
|
exec("pragma foreign_keys = 1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
123
src/libstore/tests/nar-info-disk-cache.cc
Normal file
123
src/libstore/tests/nar-info-disk-cache.cc
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
#include "nar-info-disk-cache.hh"
|
||||||
|
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
#include <rapidcheck/gtest.h>
|
||||||
|
#include "sqlite.hh"
|
||||||
|
#include <sqlite3.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
TEST(NarInfoDiskCacheImpl, create_and_read) {
|
||||||
|
// This is a large single test to avoid some setup overhead.
|
||||||
|
|
||||||
|
int prio = 12345;
|
||||||
|
bool wantMassQuery = true;
|
||||||
|
|
||||||
|
Path tmpDir = createTempDir();
|
||||||
|
AutoDelete delTmpDir(tmpDir);
|
||||||
|
Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite");
|
||||||
|
|
||||||
|
int savedId;
|
||||||
|
int barId;
|
||||||
|
SQLite db;
|
||||||
|
SQLiteStmt getIds;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto cache = getTestNarInfoDiskCache(dbPath);
|
||||||
|
|
||||||
|
// Set up "background noise" and check that different caches receive different ids
|
||||||
|
{
|
||||||
|
auto bc1 = cache->createCache("https://bar", "/nix/storedir", wantMassQuery, prio);
|
||||||
|
auto bc2 = cache->createCache("https://xyz", "/nix/storedir", false, 12);
|
||||||
|
ASSERT_NE(bc1, bc2);
|
||||||
|
barId = bc1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the fields are saved and returned correctly. This does not test
|
||||||
|
// the select statement yet, because of in-memory caching.
|
||||||
|
savedId = cache->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);;
|
||||||
|
{
|
||||||
|
auto r = cache->upToDateCacheExists("http://foo");
|
||||||
|
ASSERT_TRUE(r);
|
||||||
|
ASSERT_EQ(r->priority, prio);
|
||||||
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
ASSERT_EQ(savedId, r->id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're going to pay special attention to the id field because we had a bug
|
||||||
|
// that changed it.
|
||||||
|
db = SQLite(dbPath);
|
||||||
|
getIds.create(db, "select id from BinaryCaches where url = 'http://foo'");
|
||||||
|
|
||||||
|
{
|
||||||
|
auto q(getIds.use());
|
||||||
|
ASSERT_TRUE(q.next());
|
||||||
|
ASSERT_EQ(savedId, q.getInt(0));
|
||||||
|
ASSERT_FALSE(q.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretend that the caches are older, but keep one up to date, as "background noise"
|
||||||
|
db.exec("update BinaryCaches set timestamp = timestamp - 1 - 7 * 24 * 3600 where url <> 'https://xyz';");
|
||||||
|
|
||||||
|
// This shows that the in-memory cache works
|
||||||
|
{
|
||||||
|
auto r = cache->upToDateCacheExists("http://foo");
|
||||||
|
ASSERT_TRUE(r);
|
||||||
|
ASSERT_EQ(r->priority, prio);
|
||||||
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// We can't clear the in-memory cache, so we use a new cache object. This is
|
||||||
|
// more realistic anyway.
|
||||||
|
auto cache2 = getTestNarInfoDiskCache(dbPath);
|
||||||
|
|
||||||
|
{
|
||||||
|
auto r = cache2->upToDateCacheExists("http://foo");
|
||||||
|
ASSERT_FALSE(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
// "Update", same data, check that the id number is reused
|
||||||
|
cache2->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);
|
||||||
|
|
||||||
|
{
|
||||||
|
auto r = cache2->upToDateCacheExists("http://foo");
|
||||||
|
ASSERT_TRUE(r);
|
||||||
|
ASSERT_EQ(r->priority, prio);
|
||||||
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
ASSERT_EQ(r->id, savedId);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto q(getIds.use());
|
||||||
|
ASSERT_TRUE(q.next());
|
||||||
|
auto currentId = q.getInt(0);
|
||||||
|
ASSERT_FALSE(q.next());
|
||||||
|
ASSERT_EQ(currentId, savedId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the fields can be modified, and the id remains the same
|
||||||
|
{
|
||||||
|
auto r0 = cache2->upToDateCacheExists("https://bar");
|
||||||
|
ASSERT_FALSE(r0);
|
||||||
|
|
||||||
|
cache2->createCache("https://bar", "/nix/storedir", !wantMassQuery, prio + 10);
|
||||||
|
auto r = cache2->upToDateCacheExists("https://bar");
|
||||||
|
ASSERT_EQ(r->wantMassQuery, !wantMassQuery);
|
||||||
|
ASSERT_EQ(r->priority, prio + 10);
|
||||||
|
ASSERT_EQ(r->id, barId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// // Force update (no use case yet; we only retrieve cache metadata when stale based on timestamp)
|
||||||
|
// {
|
||||||
|
// cache2->createCache("https://bar", "/nix/storedir", wantMassQuery, prio + 20);
|
||||||
|
// auto r = cache2->upToDateCacheExists("https://bar");
|
||||||
|
// ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
// ASSERT_EQ(r->priority, prio + 20);
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Reference in a new issue