forked from lix-project/hydra
Keep some statistics for the binary cache stores
This commit is contained in:
parent
dc4a00347d
commit
a0f74047da
|
@ -32,6 +32,11 @@ void BinaryCacheStore::init()
|
|||
upsertFile(cacheInfoFile, "StoreDir: " + settings.nixStore + "\n");
|
||||
}
|
||||
|
||||
const BinaryCacheStore::Stats & BinaryCacheStore::getStats()
|
||||
{
|
||||
return stats;
|
||||
}
|
||||
|
||||
Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
|
||||
{
|
||||
assertStorePath(storePath);
|
||||
|
@ -60,23 +65,36 @@ void BinaryCacheStore::addToCache(const ValidPathInfo & info,
|
|||
narInfo.fileHash = hashString(htSHA256, narXz);
|
||||
narInfo.fileSize = narXz.size();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache")
|
||||
% info.path % info.narSize
|
||||
% ((1.0 - (double) narXz.size() / nar.size()) * 100.0)
|
||||
% std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count());
|
||||
% duration);
|
||||
|
||||
/* Atomically write the NAR file. */
|
||||
narInfo.url = "nar/" + printHash32(narInfo.fileHash) + ".nar.xz";
|
||||
if (!fileExists(narInfo.url)) upsertFile(narInfo.url, narXz);
|
||||
if (!fileExists(narInfo.url)) {
|
||||
stats.narWrite++;
|
||||
upsertFile(narInfo.url, narXz);
|
||||
} else
|
||||
stats.narWriteAverted++;
|
||||
|
||||
stats.narWriteBytes += nar.size();
|
||||
stats.narWriteCompressedBytes += narXz.size();
|
||||
stats.narWriteCompressionTimeMs += duration;
|
||||
|
||||
/* Atomically write the NAR info file.*/
|
||||
if (secretKey) narInfo.sign(*secretKey);
|
||||
|
||||
upsertFile(narInfoFile, narInfo.to_string());
|
||||
|
||||
stats.narInfoWrite++;
|
||||
}
|
||||
|
||||
NarInfo BinaryCacheStore::readNarInfo(const Path & storePath)
|
||||
{
|
||||
stats.narInfoRead++;
|
||||
|
||||
auto narInfoFile = narInfoFileFor(storePath);
|
||||
auto narInfo = NarInfo(getFile(narInfoFile), narInfoFile);
|
||||
assert(narInfo.path == storePath);
|
||||
|
@ -102,6 +120,9 @@ void BinaryCacheStore::exportPath(const Path & storePath, bool sign, Sink & sink
|
|||
|
||||
auto nar = getFile(res.url);
|
||||
|
||||
stats.narRead++;
|
||||
stats.narReadCompressedBytes += nar.size();
|
||||
|
||||
/* Decompress the NAR. FIXME: would be nice to have the remote
|
||||
side do this. */
|
||||
if (res.compression == "none")
|
||||
|
@ -111,6 +132,8 @@ void BinaryCacheStore::exportPath(const Path & storePath, bool sign, Sink & sink
|
|||
else
|
||||
throw Error(format("unknown NAR compression type ‘%1%’") % nar);
|
||||
|
||||
stats.narReadBytes += nar.size();
|
||||
|
||||
printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar.size());
|
||||
|
||||
assert(nar.size() % 8 == 0);
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#include "crypto.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarInfo;
|
||||
|
@ -36,8 +38,26 @@ public:
|
|||
|
||||
virtual void init();
|
||||
|
||||
struct Stats
|
||||
{
|
||||
std::atomic<uint64_t> narInfoRead{0};
|
||||
std::atomic<uint64_t> narInfoWrite{0};
|
||||
std::atomic<uint64_t> narRead{0};
|
||||
std::atomic<uint64_t> narReadBytes{0};
|
||||
std::atomic<uint64_t> narReadCompressedBytes{0};
|
||||
std::atomic<uint64_t> narWrite{0};
|
||||
std::atomic<uint64_t> narWriteAverted{0};
|
||||
std::atomic<uint64_t> narWriteBytes{0};
|
||||
std::atomic<uint64_t> narWriteCompressedBytes{0};
|
||||
std::atomic<uint64_t> narWriteCompressionTimeMs{0};
|
||||
};
|
||||
|
||||
const Stats & getStats();
|
||||
|
||||
private:
|
||||
|
||||
Stats stats;
|
||||
|
||||
std::string narInfoFileFor(const Path & storePath);
|
||||
|
||||
void addToCache(const ValidPathInfo & info, const string & nar);
|
||||
|
|
|
@ -23,21 +23,6 @@ State::State()
|
|||
if (hydraData == "") throw Error("$HYDRA_DATA must be set");
|
||||
|
||||
logDir = canonPath(hydraData + "/build-logs");
|
||||
|
||||
#if 0
|
||||
auto store = make_ref<LocalBinaryCacheStore>(getLocalStore(),
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/secret",
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/public",
|
||||
"/tmp/binary-cache");
|
||||
#endif
|
||||
|
||||
auto store = std::make_shared<S3BinaryCacheStore>(
|
||||
[]() { return openStore(); },
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/secret",
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/public",
|
||||
"nix-test-cache-3");;
|
||||
store->init();
|
||||
_destStore = store;
|
||||
}
|
||||
|
||||
|
||||
|
@ -532,8 +517,8 @@ void State::dumpStatus(Connection & conn, bool log)
|
|||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("bytesSent"); out << bytesSent;
|
||||
root.attr("bytesReceived"); out << bytesReceived;
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
|
@ -542,8 +527,8 @@ void State::dumpStatus(Connection & conn, bool log)
|
|||
if (nrStepsDone) {
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime"); out << (float) totalStepTime / nrStepsDone;
|
||||
root.attr("avgStepBuildTime"); out << (float) totalStepBuildTime / nrStepsDone;
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
|
@ -565,8 +550,8 @@ void State::dumpStatus(Connection & conn, bool log)
|
|||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime"); out << (float) s->totalStepTime / s->nrStepsDone;
|
||||
nested2.attr("avgStepBuildTime"); out << (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -577,7 +562,7 @@ void State::dumpStatus(Connection & conn, bool log)
|
|||
for (auto & jobset : *jobsets_) {
|
||||
nested.attr(jobset.first.first + ":" + jobset.first.second);
|
||||
JSONObject nested2(out);
|
||||
nested2.attr("shareUsed"); out << jobset.second->shareUsed();
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
}
|
||||
}
|
||||
|
@ -597,6 +582,59 @@ void State::dumpStatus(Connection & conn, bool log)
|
|||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
}
|
||||
}
|
||||
|
||||
auto store = dynamic_cast<S3BinaryCacheStore *>(&*getDestStore());
|
||||
|
||||
if (store) {
|
||||
root.attr("store");
|
||||
JSONObject nested(out);
|
||||
|
||||
auto & stats = store->getStats();
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
nested.attr("s3");
|
||||
JSONObject nested2(out);
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (log) printMsg(lvlInfo, format("status: %1%") % out.str());
|
||||
|
@ -685,6 +723,21 @@ void State::run(BuildID buildOne)
|
|||
if (!lock)
|
||||
throw Error("hydra-queue-runner is already running");
|
||||
|
||||
#if 0
|
||||
auto store = make_ref<LocalBinaryCacheStore>(getLocalStore(),
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/secret",
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/public",
|
||||
"/tmp/binary-cache");
|
||||
#endif
|
||||
|
||||
auto store = std::make_shared<S3BinaryCacheStore>(
|
||||
[]() { return openStore(); },
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/secret",
|
||||
"/home/eelco/Misc/Keys/test.nixos.org/public",
|
||||
"nix-test-cache-3");;
|
||||
store->init();
|
||||
_destStore = store;
|
||||
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
clearBusy(*conn, 0);
|
||||
|
|
|
@ -62,8 +62,15 @@ void S3BinaryCacheStore::init()
|
|||
BinaryCacheStore::init();
|
||||
}
|
||||
|
||||
const S3BinaryCacheStore::Stats & S3BinaryCacheStore::getS3Stats()
|
||||
{
|
||||
return stats;
|
||||
}
|
||||
|
||||
bool S3BinaryCacheStore::fileExists(const std::string & path)
|
||||
{
|
||||
stats.head++;
|
||||
|
||||
auto res = client->HeadObject(
|
||||
Aws::S3::Model::HeadObjectRequest()
|
||||
.WithBucket(bucketName)
|
||||
|
@ -91,16 +98,21 @@ void S3BinaryCacheStore::upsertFile(const std::string & path, const std::string
|
|||
|
||||
request.SetBody(stream);
|
||||
|
||||
stats.put++;
|
||||
stats.putBytes += data.size();
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
auto result = checkAws(client->PutObject(request));
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
|
||||
printMsg(lvlError, format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
|
||||
% bucketName % path
|
||||
% data.size()
|
||||
% std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count());
|
||||
% bucketName % path % data.size() % duration);
|
||||
|
||||
stats.putTimeMs += duration;
|
||||
}
|
||||
|
||||
std::string S3BinaryCacheStore::getFile(const std::string & path)
|
||||
|
@ -114,6 +126,8 @@ std::string S3BinaryCacheStore::getFile(const std::string & path)
|
|||
return Aws::New<std::stringstream>("STRINGSTREAM");
|
||||
});
|
||||
|
||||
stats.get++;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
auto result = checkAws(client->GetObject(request));
|
||||
|
@ -122,10 +136,13 @@ std::string S3BinaryCacheStore::getFile(const std::string & path)
|
|||
|
||||
auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
|
||||
printMsg(lvlError, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
|
||||
% bucketName % path
|
||||
% res.size()
|
||||
% std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count());
|
||||
% bucketName % path % res.size() % duration);
|
||||
|
||||
stats.getBytes += res.size();
|
||||
stats.getTimeMs += duration;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
#include "binary-cache-store.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace Aws { namespace Client { class ClientConfiguration; } }
|
||||
namespace Aws { namespace S3 { class S3Client; } }
|
||||
|
||||
|
@ -24,8 +26,23 @@ public:
|
|||
|
||||
void init() override;
|
||||
|
||||
struct Stats
|
||||
{
|
||||
std::atomic<uint64_t> put{0};
|
||||
std::atomic<uint64_t> putBytes{0};
|
||||
std::atomic<uint64_t> putTimeMs{0};
|
||||
std::atomic<uint64_t> get{0};
|
||||
std::atomic<uint64_t> getBytes{0};
|
||||
std::atomic<uint64_t> getTimeMs{0};
|
||||
std::atomic<uint64_t> head{0};
|
||||
};
|
||||
|
||||
const Stats & getS3Stats();
|
||||
|
||||
private:
|
||||
|
||||
Stats stats;
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> makeConfig();
|
||||
|
||||
protected:
|
||||
|
|
Loading…
Reference in a new issue