forked from lix-project/lix
NarInfoDiskCache: Keep BinaryCache.id stable and improve test
Fixes #3898 The entire `BinaryCaches` row used to get replaced after it became stale according to the `timestamp` column. In a concurrent scenario, this leads to foreign key conflicts as different instances of the in-process `state.caches` cache now differ, with the consequence that the older process still tries to use the `id` number of the old record. Furthermore, this phenomenon appears to have caused the cache for actual narinfos to be erased about every week, while the default ttl for narinfos was supposed to be 30 days.
This commit is contained in:
parent
2ceece3ef3
commit
fb94d5cabd
3 changed files with 73 additions and 36 deletions
|
@ -97,7 +97,7 @@ public:
|
||||||
state->db.exec(schema);
|
state->db.exec(schema);
|
||||||
|
|
||||||
state->insertCache.create(state->db,
|
state->insertCache.create(state->db,
|
||||||
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
|
"insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;");
|
||||||
|
|
||||||
state->queryCache.create(state->db,
|
state->queryCache.create(state->db,
|
||||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?");
|
||||||
|
@ -165,6 +165,8 @@ public:
|
||||||
return i->second;
|
return i->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
|
std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
|
||||||
{
|
{
|
||||||
auto i = state.caches.find(uri);
|
auto i = state.caches.find(uri);
|
||||||
|
@ -172,15 +174,21 @@ public:
|
||||||
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
|
||||||
if (!queryCache.next())
|
if (!queryCache.next())
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
state.caches.emplace(uri,
|
auto cache = Cache {
|
||||||
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
|
.id = (int) queryCache.getInt(0),
|
||||||
|
.storeDir = queryCache.getStr(1),
|
||||||
|
.wantMassQuery = queryCache.getInt(2) != 0,
|
||||||
|
.priority = (int) queryCache.getInt(3),
|
||||||
|
};
|
||||||
|
state.caches.emplace(uri, cache);
|
||||||
}
|
}
|
||||||
return getCache(state, uri);
|
return getCache(state, uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
public:
|
||||||
|
int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
||||||
{
|
{
|
||||||
retrySQLite<void>([&]() {
|
return retrySQLite<int>([&]() {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
SQLiteTxn txn(state->db);
|
SQLiteTxn txn(state->db);
|
||||||
|
|
||||||
|
@ -189,13 +197,25 @@ public:
|
||||||
auto cache(queryCacheRaw(*state, uri));
|
auto cache(queryCacheRaw(*state, uri));
|
||||||
|
|
||||||
if (cache)
|
if (cache)
|
||||||
return;
|
return cache->id;
|
||||||
|
|
||||||
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
|
Cache ret {
|
||||||
assert(sqlite3_changes(state->db) == 1);
|
.id = -1, // set below
|
||||||
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
|
.storeDir = storeDir,
|
||||||
|
.wantMassQuery = wantMassQuery,
|
||||||
|
.priority = priority,
|
||||||
|
};
|
||||||
|
|
||||||
|
{
|
||||||
|
auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority));
|
||||||
|
assert(r.next());
|
||||||
|
ret.id = (int) r.getInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
state->caches[uri] = ret;
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
return ret.id;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ public:
|
||||||
|
|
||||||
virtual ~NarInfoDiskCache() { }
|
virtual ~NarInfoDiskCache() { }
|
||||||
|
|
||||||
virtual void createCache(const std::string & uri, const Path & storeDir,
|
virtual int createCache(const std::string & uri, const Path & storeDir,
|
||||||
bool wantMassQuery, int priority) = 0;
|
bool wantMassQuery, int priority) = 0;
|
||||||
|
|
||||||
struct CacheInfo
|
struct CacheInfo
|
||||||
|
|
|
@ -8,69 +8,68 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
RC_GTEST_PROP(
|
TEST(NarInfoDiskCacheImpl, create_and_read) {
|
||||||
NarInfoDiskCacheImpl,
|
int prio = 12345;
|
||||||
create_and_read,
|
bool wantMassQuery = true;
|
||||||
(int prio, bool wantMassQuery)
|
|
||||||
)
|
|
||||||
{
|
|
||||||
Path tmpDir = createTempDir();
|
Path tmpDir = createTempDir();
|
||||||
AutoDelete delTmpDir(tmpDir);
|
AutoDelete delTmpDir(tmpDir);
|
||||||
Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite");
|
Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite");
|
||||||
auto cache = getTestNarInfoDiskCache(dbPath);
|
auto cache = getTestNarInfoDiskCache(dbPath);
|
||||||
|
|
||||||
cache->createCache("other://uri", "/nix/storedir", wantMassQuery, prio);
|
|
||||||
cache->createCache("other://uri-2", "/nix/storedir", wantMassQuery, prio);
|
|
||||||
|
|
||||||
cache->createCache("the://uri", "/nix/storedir", wantMassQuery, prio);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto r = cache->upToDateCacheExists("the://uri");
|
auto bc1 = cache->createCache("https://bar", "/nix/storedir", wantMassQuery, prio);
|
||||||
|
auto bc2 = cache->createCache("https://xyz", "/nix/storedir", false, 12);
|
||||||
|
ASSERT_NE(bc1, bc2);
|
||||||
|
}
|
||||||
|
|
||||||
|
int savedId = cache->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);;
|
||||||
|
{
|
||||||
|
auto r = cache->upToDateCacheExists("http://foo");
|
||||||
ASSERT_TRUE(r);
|
ASSERT_TRUE(r);
|
||||||
ASSERT_EQ(r->priority, prio);
|
ASSERT_EQ(r->priority, prio);
|
||||||
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
ASSERT_EQ(savedId, r->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
SQLite db(dbPath);
|
SQLite db(dbPath);
|
||||||
SQLiteStmt getIds;
|
SQLiteStmt getIds;
|
||||||
getIds.create(db, "SELECT id FROM BinaryCaches WHERE url = 'the://uri'");
|
getIds.create(db, "SELECT id FROM BinaryCaches WHERE url = 'http://foo'");
|
||||||
|
|
||||||
int savedId;
|
|
||||||
{
|
{
|
||||||
auto q(getIds.use());
|
auto q(getIds.use());
|
||||||
ASSERT_TRUE(q.next());
|
ASSERT_TRUE(q.next());
|
||||||
savedId = q.getInt(0);
|
ASSERT_EQ(savedId, q.getInt(0));
|
||||||
ASSERT_FALSE(q.next());
|
ASSERT_FALSE(q.next());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
db.exec("UPDATE BinaryCaches SET timestamp = timestamp - 1 - 7 * 24 * 3600;");
|
db.exec("UPDATE BinaryCaches SET timestamp = timestamp - 1 - 7 * 24 * 3600;");
|
||||||
|
|
||||||
// Relies on memory cache
|
// Relies on memory cache
|
||||||
{
|
{
|
||||||
auto r = cache->upToDateCacheExists("the://uri");
|
auto r = cache->upToDateCacheExists("http://foo");
|
||||||
ASSERT_TRUE(r);
|
ASSERT_TRUE(r);
|
||||||
ASSERT_EQ(r->priority, prio);
|
ASSERT_EQ(r->priority, prio);
|
||||||
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We can't clear the in-memory cache, so we use a new cache object.
|
||||||
auto cache2 = getTestNarInfoDiskCache(dbPath);
|
auto cache2 = getTestNarInfoDiskCache(dbPath);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto r = cache2->upToDateCacheExists("the://uri");
|
auto r = cache2->upToDateCacheExists("http://foo");
|
||||||
ASSERT_FALSE(r);
|
ASSERT_FALSE(r);
|
||||||
}
|
}
|
||||||
|
|
||||||
cache2->createCache("the://uri", "/nix/storedir", wantMassQuery, prio);
|
// Update, same data, check that the id number is reused
|
||||||
|
cache2->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto r = cache->upToDateCacheExists("the://uri");
|
auto r = cache2->upToDateCacheExists("http://foo");
|
||||||
ASSERT_TRUE(r);
|
ASSERT_TRUE(r);
|
||||||
ASSERT_EQ(r->priority, prio);
|
ASSERT_EQ(r->priority, prio);
|
||||||
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
// FIXME, reproduces #3898
|
ASSERT_EQ(r->id, savedId);
|
||||||
// ASSERT_EQ(r->id, savedId);
|
|
||||||
(void) savedId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -78,10 +77,28 @@ RC_GTEST_PROP(
|
||||||
ASSERT_TRUE(q.next());
|
ASSERT_TRUE(q.next());
|
||||||
auto currentId = q.getInt(0);
|
auto currentId = q.getInt(0);
|
||||||
ASSERT_FALSE(q.next());
|
ASSERT_FALSE(q.next());
|
||||||
// FIXME, reproduces #3898
|
ASSERT_EQ(currentId, savedId);
|
||||||
// ASSERT_EQ(currentId, savedId);
|
|
||||||
(void) currentId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check that the fields can be modified
|
||||||
|
{
|
||||||
|
auto r0 = cache2->upToDateCacheExists("https://bar");
|
||||||
|
ASSERT_FALSE(r0);
|
||||||
|
|
||||||
|
cache2->createCache("https://bar", "/nix/storedir", !wantMassQuery, prio + 10);
|
||||||
|
auto r = cache2->upToDateCacheExists("https://bar");
|
||||||
|
ASSERT_EQ(r->wantMassQuery, !wantMassQuery);
|
||||||
|
ASSERT_EQ(r->priority, prio + 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
// // Force update (no use case yet; we only retrieve cache metadata when stale based on timestamp)
|
||||||
|
// {
|
||||||
|
// cache2->createCache("https://bar", "/nix/storedir", wantMassQuery, prio + 20);
|
||||||
|
// auto r = cache2->upToDateCacheExists("https://bar");
|
||||||
|
// ASSERT_EQ(r->wantMassQuery, wantMassQuery);
|
||||||
|
// ASSERT_EQ(r->priority, prio + 20);
|
||||||
|
// }
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue