diff --git a/src/hydra-queue-runner/binary-cache-store.cc b/src/hydra-queue-runner/binary-cache-store.cc index bf9215d5..db25b7c6 100644 --- a/src/hydra-queue-runner/binary-cache-store.cc +++ b/src/hydra-queue-runner/binary-cache-store.cc @@ -32,6 +32,11 @@ void BinaryCacheStore::init() upsertFile(cacheInfoFile, "StoreDir: " + settings.nixStore + "\n"); } +const BinaryCacheStore::Stats & BinaryCacheStore::getStats() +{ + return stats; +} + Path BinaryCacheStore::narInfoFileFor(const Path & storePath) { assertStorePath(storePath); @@ -60,23 +65,36 @@ void BinaryCacheStore::addToCache(const ValidPathInfo & info, narInfo.fileHash = hashString(htSHA256, narXz); narInfo.fileSize = narXz.size(); + auto duration = std::chrono::duration_cast(now2 - now1).count(); printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache") % info.path % info.narSize % ((1.0 - (double) narXz.size() / nar.size()) * 100.0) - % std::chrono::duration_cast(now2 - now1).count()); + % duration); /* Atomically write the NAR file. */ narInfo.url = "nar/" + printHash32(narInfo.fileHash) + ".nar.xz"; - if (!fileExists(narInfo.url)) upsertFile(narInfo.url, narXz); + if (!fileExists(narInfo.url)) { + stats.narWrite++; + upsertFile(narInfo.url, narXz); + } else + stats.narWriteAverted++; + + stats.narWriteBytes += nar.size(); + stats.narWriteCompressedBytes += narXz.size(); + stats.narWriteCompressionTimeMs += duration; /* Atomically write the NAR info file.*/ if (secretKey) narInfo.sign(*secretKey); upsertFile(narInfoFile, narInfo.to_string()); + + stats.narInfoWrite++; } NarInfo BinaryCacheStore::readNarInfo(const Path & storePath) { + stats.narInfoRead++; + auto narInfoFile = narInfoFileFor(storePath); auto narInfo = NarInfo(getFile(narInfoFile), narInfoFile); assert(narInfo.path == storePath); @@ -102,6 +120,9 @@ void BinaryCacheStore::exportPath(const Path & storePath, bool sign, Sink & sink auto nar = getFile(res.url); + stats.narRead++; + stats.narReadCompressedBytes += nar.size(); + /* Decompress the NAR. FIXME: would be nice to have the remote side do this. */ if (res.compression == "none") @@ -111,6 +132,8 @@ void BinaryCacheStore::exportPath(const Path & storePath, bool sign, Sink & sink else throw Error(format("unknown NAR compression type ‘%1%’") % nar); + stats.narReadBytes += nar.size(); + printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar.size()); assert(nar.size() % 8 == 0); diff --git a/src/hydra-queue-runner/binary-cache-store.hh b/src/hydra-queue-runner/binary-cache-store.hh index 8883075b..019b2611 100644 --- a/src/hydra-queue-runner/binary-cache-store.hh +++ b/src/hydra-queue-runner/binary-cache-store.hh @@ -3,6 +3,8 @@ #include "crypto.hh" #include "store-api.hh" +#include + namespace nix { struct NarInfo; @@ -36,8 +38,26 @@ public: virtual void init(); + struct Stats + { + std::atomic narInfoRead{0}; + std::atomic narInfoWrite{0}; + std::atomic narRead{0}; + std::atomic narReadBytes{0}; + std::atomic narReadCompressedBytes{0}; + std::atomic narWrite{0}; + std::atomic narWriteAverted{0}; + std::atomic narWriteBytes{0}; + std::atomic narWriteCompressedBytes{0}; + std::atomic narWriteCompressionTimeMs{0}; + }; + + const Stats & getStats(); + private: + Stats stats; + std::string narInfoFileFor(const Path & storePath); void addToCache(const ValidPathInfo & info, const string & nar); diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index afa4a565..48c6865a 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -23,21 +23,6 @@ State::State() if (hydraData == "") throw Error("$HYDRA_DATA must be set"); logDir = canonPath(hydraData + "/build-logs"); - -#if 0 - auto store = make_ref(getLocalStore(), - "/home/eelco/Misc/Keys/test.nixos.org/secret", - "/home/eelco/Misc/Keys/test.nixos.org/public", - "/tmp/binary-cache"); -#endif - - auto store = std::make_shared( - []() { return openStore(); }, - "/home/eelco/Misc/Keys/test.nixos.org/secret", - "/home/eelco/Misc/Keys/test.nixos.org/public", - "nix-test-cache-3");; - store->init(); - _destStore = store; } @@ -532,8 +517,8 @@ void State::dumpStatus(Connection & conn, bool log) root.attr("nrStepsCopyingTo", nrStepsCopyingTo); root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom); root.attr("nrStepsWaiting", nrStepsWaiting); - root.attr("bytesSent"); out << bytesSent; - root.attr("bytesReceived"); out << bytesReceived; + root.attr("bytesSent", bytesSent); + root.attr("bytesReceived", bytesReceived); root.attr("nrBuildsRead", nrBuildsRead); root.attr("nrBuildsDone", nrBuildsDone); root.attr("nrStepsDone", nrStepsDone); @@ -542,8 +527,8 @@ void State::dumpStatus(Connection & conn, bool log) if (nrStepsDone) { root.attr("totalStepTime", totalStepTime); root.attr("totalStepBuildTime", totalStepBuildTime); - root.attr("avgStepTime"); out << (float) totalStepTime / nrStepsDone; - root.attr("avgStepBuildTime"); out << (float) totalStepBuildTime / nrStepsDone; + root.attr("avgStepTime", (float) totalStepTime / nrStepsDone); + root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone); } root.attr("nrQueueWakeups", nrQueueWakeups); root.attr("nrDispatcherWakeups", nrDispatcherWakeups); @@ -565,8 +550,8 @@ void State::dumpStatus(Connection & conn, bool log) if (m->state->nrStepsDone) { nested2.attr("totalStepTime", s->totalStepTime); nested2.attr("totalStepBuildTime", s->totalStepBuildTime); - nested2.attr("avgStepTime"); out << (float) s->totalStepTime / s->nrStepsDone; - nested2.attr("avgStepBuildTime"); out << (float) s->totalStepBuildTime / s->nrStepsDone; + nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone); + nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone); } } } @@ -577,7 +562,7 @@ void State::dumpStatus(Connection & conn, bool log) for (auto & jobset : *jobsets_) { nested.attr(jobset.first.first + ":" + jobset.first.second); JSONObject nested2(out); - nested2.attr("shareUsed"); out << jobset.second->shareUsed(); + nested2.attr("shareUsed", jobset.second->shareUsed()); nested2.attr("seconds", jobset.second->getSeconds()); } } @@ -597,6 +582,59 @@ void State::dumpStatus(Connection & conn, bool log) nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive)); } } + + auto store = dynamic_cast(&*getDestStore()); + + if (store) { + root.attr("store"); + JSONObject nested(out); + + auto & stats = store->getStats(); + nested.attr("narInfoRead", stats.narInfoRead); + nested.attr("narInfoWrite", stats.narInfoWrite); + nested.attr("narRead", stats.narRead); + nested.attr("narReadBytes", stats.narReadBytes); + nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); + nested.attr("narWrite", stats.narWrite); + nested.attr("narWriteAverted", stats.narWriteAverted); + nested.attr("narWriteBytes", stats.narWriteBytes); + nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); + nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); + nested.attr("narCompressionSavings", + stats.narWriteBytes + ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes + : 0.0); + nested.attr("narCompressionSpeed", // MiB/s + stats.narWriteCompressionTimeMs + ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0); + + auto s3Store = dynamic_cast(&*store); + if (s3Store) { + nested.attr("s3"); + JSONObject nested2(out); + auto & s3Stats = s3Store->getS3Stats(); + nested2.attr("put", s3Stats.put); + nested2.attr("putBytes", s3Stats.putBytes); + nested2.attr("putTimeMs", s3Stats.putTimeMs); + nested2.attr("putSpeed", + s3Stats.putTimeMs + ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0); + nested2.attr("get", s3Stats.get); + nested2.attr("getBytes", s3Stats.getBytes); + nested2.attr("getTimeMs", s3Stats.getTimeMs); + nested2.attr("getSpeed", + s3Stats.getTimeMs + ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0); + nested2.attr("head", s3Stats.head); + nested2.attr("costDollarApprox", + (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 + + s3Stats.put / 1000.0 * 0.005 + + + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); + } + } } if (log) printMsg(lvlInfo, format("status: %1%") % out.str()); @@ -685,6 +723,21 @@ void State::run(BuildID buildOne) if (!lock) throw Error("hydra-queue-runner is already running"); +#if 0 + auto store = make_ref(getLocalStore(), + "/home/eelco/Misc/Keys/test.nixos.org/secret", + "/home/eelco/Misc/Keys/test.nixos.org/public", + "/tmp/binary-cache"); +#endif + + auto store = std::make_shared( + []() { return openStore(); }, + "/home/eelco/Misc/Keys/test.nixos.org/secret", + "/home/eelco/Misc/Keys/test.nixos.org/public", + "nix-test-cache-3");; + store->init(); + _destStore = store; + { auto conn(dbPool.get()); clearBusy(*conn, 0); diff --git a/src/hydra-queue-runner/s3-binary-cache-store.cc b/src/hydra-queue-runner/s3-binary-cache-store.cc index 9d545d29..cec6f9cf 100644 --- a/src/hydra-queue-runner/s3-binary-cache-store.cc +++ b/src/hydra-queue-runner/s3-binary-cache-store.cc @@ -62,8 +62,15 @@ void S3BinaryCacheStore::init() BinaryCacheStore::init(); } +const S3BinaryCacheStore::Stats & S3BinaryCacheStore::getS3Stats() +{ + return stats; +} + bool S3BinaryCacheStore::fileExists(const std::string & path) { + stats.head++; + auto res = client->HeadObject( Aws::S3::Model::HeadObjectRequest() .WithBucket(bucketName) @@ -91,16 +98,21 @@ void S3BinaryCacheStore::upsertFile(const std::string & path, const std::string request.SetBody(stream); + stats.put++; + stats.putBytes += data.size(); + auto now1 = std::chrono::steady_clock::now(); auto result = checkAws(client->PutObject(request)); auto now2 = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(now2 - now1).count(); + printMsg(lvlError, format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms") - % bucketName % path - % data.size() - % std::chrono::duration_cast(now2 - now1).count()); + % bucketName % path % data.size() % duration); + + stats.putTimeMs += duration; } std::string S3BinaryCacheStore::getFile(const std::string & path) @@ -114,6 +126,8 @@ std::string S3BinaryCacheStore::getFile(const std::string & path) return Aws::New("STRINGSTREAM"); }); + stats.get++; + auto now1 = std::chrono::steady_clock::now(); auto result = checkAws(client->GetObject(request)); @@ -122,10 +136,13 @@ std::string S3BinaryCacheStore::getFile(const std::string & path) auto res = dynamic_cast(result.GetBody()).str(); + auto duration = std::chrono::duration_cast(now2 - now1).count(); + printMsg(lvlError, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms") - % bucketName % path - % res.size() - % std::chrono::duration_cast(now2 - now1).count()); + % bucketName % path % res.size() % duration); + + stats.getBytes += res.size(); + stats.getTimeMs += duration; return res; } diff --git a/src/hydra-queue-runner/s3-binary-cache-store.hh b/src/hydra-queue-runner/s3-binary-cache-store.hh index a39a498b..15d717c9 100644 --- a/src/hydra-queue-runner/s3-binary-cache-store.hh +++ b/src/hydra-queue-runner/s3-binary-cache-store.hh @@ -2,6 +2,8 @@ #include "binary-cache-store.hh" +#include + namespace Aws { namespace Client { class ClientConfiguration; } } namespace Aws { namespace S3 { class S3Client; } } @@ -24,8 +26,23 @@ public: void init() override; + struct Stats + { + std::atomic put{0}; + std::atomic putBytes{0}; + std::atomic putTimeMs{0}; + std::atomic get{0}; + std::atomic getBytes{0}; + std::atomic getTimeMs{0}; + std::atomic head{0}; + }; + + const Stats & getS3Stats(); + private: + Stats stats; + ref makeConfig(); protected: