diff --git a/doc/manual/local.mk b/doc/manual/local.mk index 190f0258a..f43510b6d 100644 --- a/doc/manual/local.mk +++ b/doc/manual/local.mk @@ -51,13 +51,13 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli $(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix @rm -rf $@ $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }' - # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable + @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable $(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g' @mv $@.tmp $@ $(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix @cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp - # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable + @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \ | sed -e 's^@docroot@^..^g'>> $@.tmp @mv $@.tmp $@ @@ -72,7 +72,7 @@ $(d)/conf-file.json: $(bindir)/nix $(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix @cat doc/manual/src/language/builtins-prefix.md > $@.tmp - # @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable + @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \ | sed -e 's^@docroot@^..^g' >> $@.tmp @cat doc/manual/src/language/builtins-suffix.md >> $@.tmp diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 73bcd6e81..1479822a9 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -56,7 +56,7 @@ public: void init() override { // FIXME: do this lazily? - if (auto cacheInfo = diskCache->cacheExists(cacheUri)) { + if (auto cacheInfo = diskCache->upToDateCacheExists(cacheUri)) { wantMassQuery.setDefault(cacheInfo->wantMassQuery); priority.setDefault(cacheInfo->priority); } else { diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 3e0689534..2645f468b 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -84,11 +84,10 @@ public: Sync _state; - NarInfoDiskCacheImpl() + NarInfoDiskCacheImpl(Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite") { auto state(_state.lock()); - Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite"; createDirs(dirOf(dbPath)); state->db = SQLite(dbPath); @@ -98,7 +97,7 @@ public: state->db.exec(schema); state->insertCache.create(state->db, - "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)"); + "insert into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?1, ?2, ?3, ?4, ?5) on conflict (url) do update set timestamp = ?2, storeDir = ?3, wantMassQuery = ?4, priority = ?5 returning id;"); state->queryCache.create(state->db, "select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ? and timestamp > ?"); @@ -166,6 +165,8 @@ public: return i->second; } +private: + std::optional queryCacheRaw(State & state, const std::string & uri) { auto i = state.caches.find(uri); @@ -173,15 +174,21 @@ public: auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl)); if (!queryCache.next()) return std::nullopt; - state.caches.emplace(uri, - Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)}); + auto cache = Cache { + .id = (int) queryCache.getInt(0), + .storeDir = queryCache.getStr(1), + .wantMassQuery = queryCache.getInt(2) != 0, + .priority = (int) queryCache.getInt(3), + }; + state.caches.emplace(uri, cache); } return getCache(state, uri); } - void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override +public: + int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override { - retrySQLite([&]() { + return retrySQLite([&]() { auto state(_state.lock()); SQLiteTxn txn(state->db); @@ -190,17 +197,29 @@ public: auto cache(queryCacheRaw(*state, uri)); if (cache) - return; + return cache->id; - state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec(); - assert(sqlite3_changes(state->db) == 1); - state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority}; + Cache ret { + .id = -1, // set below + .storeDir = storeDir, + .wantMassQuery = wantMassQuery, + .priority = priority, + }; + + { + auto r(state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority)); + assert(r.next()); + ret.id = (int) r.getInt(0); + } + + state->caches[uri] = ret; txn.commit(); + return ret.id; }); } - std::optional cacheExists(const std::string & uri) override + std::optional upToDateCacheExists(const std::string & uri) override { return retrySQLite>([&]() -> std::optional { auto state(_state.lock()); @@ -208,6 +227,7 @@ public: if (!cache) return std::nullopt; return CacheInfo { + .id = cache->id, .wantMassQuery = cache->wantMassQuery, .priority = cache->priority }; @@ -371,4 +391,9 @@ ref getNarInfoDiskCache() return cache; } +ref getTestNarInfoDiskCache(Path dbPath) +{ + return make_ref(dbPath); +} + } diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh index 2dcaa76a4..4877f56d8 100644 --- a/src/libstore/nar-info-disk-cache.hh +++ b/src/libstore/nar-info-disk-cache.hh @@ -13,16 +13,17 @@ public: virtual ~NarInfoDiskCache() { } - virtual void createCache(const std::string & uri, const Path & storeDir, + virtual int createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) = 0; struct CacheInfo { + int id; bool wantMassQuery; int priority; }; - virtual std::optional cacheExists(const std::string & uri) = 0; + virtual std::optional upToDateCacheExists(const std::string & uri) = 0; virtual std::pair> lookupNarInfo( const std::string & uri, const std::string & hashPart) = 0; @@ -45,4 +46,6 @@ public: multiple threads. */ ref getNarInfoDiskCache(); +ref getTestNarInfoDiskCache(Path dbPath); + } diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 844553ad3..8d76eee99 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -238,7 +238,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual void init() override { - if (auto cacheInfo = diskCache->cacheExists(getUri())) { + if (auto cacheInfo = diskCache->upToDateCacheExists(getUri())) { wantMassQuery.setDefault(cacheInfo->wantMassQuery); priority.setDefault(cacheInfo->priority); } else { diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 353dff9fa..871f2f3be 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -41,6 +41,15 @@ SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int ex throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf)); } +static void traceSQL(void * x, const char * sql) +{ + // wacky delimiters: + // so that we're quite unambiguous without escaping anything + // notice instead of trace: + // so that this can be enabled without getting the firehose in our face. + notice("SQL<[%1%]>", sql); +}; + SQLite::SQLite(const Path & path, bool create) { // useSQLiteWAL also indicates what virtual file system we need. Using @@ -58,6 +67,11 @@ SQLite::SQLite(const Path & path, bool create) if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK) SQLiteError::throw_(db, "setting timeout"); + if (getEnv("NIX_DEBUG_SQLITE_TRACES") == "1") { + // To debug sqlite statements; trace all of them + sqlite3_trace(db, &traceSQL, nullptr); + } + exec("pragma foreign_keys = 1"); } diff --git a/src/libstore/tests/nar-info-disk-cache.cc b/src/libstore/tests/nar-info-disk-cache.cc new file mode 100644 index 000000000..b4bdb8329 --- /dev/null +++ b/src/libstore/tests/nar-info-disk-cache.cc @@ -0,0 +1,123 @@ +#include "nar-info-disk-cache.hh" + +#include +#include +#include "sqlite.hh" +#include + + +namespace nix { + +TEST(NarInfoDiskCacheImpl, create_and_read) { + // This is a large single test to avoid some setup overhead. + + int prio = 12345; + bool wantMassQuery = true; + + Path tmpDir = createTempDir(); + AutoDelete delTmpDir(tmpDir); + Path dbPath(tmpDir + "/test-narinfo-disk-cache.sqlite"); + + int savedId; + int barId; + SQLite db; + SQLiteStmt getIds; + + { + auto cache = getTestNarInfoDiskCache(dbPath); + + // Set up "background noise" and check that different caches receive different ids + { + auto bc1 = cache->createCache("https://bar", "/nix/storedir", wantMassQuery, prio); + auto bc2 = cache->createCache("https://xyz", "/nix/storedir", false, 12); + ASSERT_NE(bc1, bc2); + barId = bc1; + } + + // Check that the fields are saved and returned correctly. This does not test + // the select statement yet, because of in-memory caching. + savedId = cache->createCache("http://foo", "/nix/storedir", wantMassQuery, prio);; + { + auto r = cache->upToDateCacheExists("http://foo"); + ASSERT_TRUE(r); + ASSERT_EQ(r->priority, prio); + ASSERT_EQ(r->wantMassQuery, wantMassQuery); + ASSERT_EQ(savedId, r->id); + } + + // We're going to pay special attention to the id field because we had a bug + // that changed it. + db = SQLite(dbPath); + getIds.create(db, "select id from BinaryCaches where url = 'http://foo'"); + + { + auto q(getIds.use()); + ASSERT_TRUE(q.next()); + ASSERT_EQ(savedId, q.getInt(0)); + ASSERT_FALSE(q.next()); + } + + // Pretend that the caches are older, but keep one up to date, as "background noise" + db.exec("update BinaryCaches set timestamp = timestamp - 1 - 7 * 24 * 3600 where url <> 'https://xyz';"); + + // This shows that the in-memory cache works + { + auto r = cache->upToDateCacheExists("http://foo"); + ASSERT_TRUE(r); + ASSERT_EQ(r->priority, prio); + ASSERT_EQ(r->wantMassQuery, wantMassQuery); + } + } + + { + // We can't clear the in-memory cache, so we use a new cache object. This is + // more realistic anyway. + auto cache2 = getTestNarInfoDiskCache(dbPath); + + { + auto r = cache2->upToDateCacheExists("http://foo"); + ASSERT_FALSE(r); + } + + // "Update", same data, check that the id number is reused + cache2->createCache("http://foo", "/nix/storedir", wantMassQuery, prio); + + { + auto r = cache2->upToDateCacheExists("http://foo"); + ASSERT_TRUE(r); + ASSERT_EQ(r->priority, prio); + ASSERT_EQ(r->wantMassQuery, wantMassQuery); + ASSERT_EQ(r->id, savedId); + } + + { + auto q(getIds.use()); + ASSERT_TRUE(q.next()); + auto currentId = q.getInt(0); + ASSERT_FALSE(q.next()); + ASSERT_EQ(currentId, savedId); + } + + // Check that the fields can be modified, and the id remains the same + { + auto r0 = cache2->upToDateCacheExists("https://bar"); + ASSERT_FALSE(r0); + + cache2->createCache("https://bar", "/nix/storedir", !wantMassQuery, prio + 10); + auto r = cache2->upToDateCacheExists("https://bar"); + ASSERT_EQ(r->wantMassQuery, !wantMassQuery); + ASSERT_EQ(r->priority, prio + 10); + ASSERT_EQ(r->id, barId); + } + + // // Force update (no use case yet; we only retrieve cache metadata when stale based on timestamp) + // { + // cache2->createCache("https://bar", "/nix/storedir", wantMassQuery, prio + 20); + // auto r = cache2->upToDateCacheExists("https://bar"); + // ASSERT_EQ(r->wantMassQuery, wantMassQuery); + // ASSERT_EQ(r->priority, prio + 20); + // } + } +} + +}