From b50a105ca7d22d8d57165d3416430a34c3c38378 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 20 Apr 2016 15:29:40 +0200 Subject: [PATCH] S3BinaryCacheStore: Use disk cache --- src/hydra-queue-runner/build-result.cc | 4 +- src/hydra-queue-runner/hydra-queue-runner.cc | 98 +++++++++---------- .../s3-binary-cache-store.cc | 48 +++++---- .../s3-binary-cache-store.hh | 4 +- 4 files changed, 83 insertions(+), 71 deletions(-) diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index 6b2741ea..5a5177ce 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -21,8 +21,8 @@ BuildOutput getBuildOutput(nix::ref store, store->computeFSClosure(output, closure); for (auto & path : closure) { auto info = store->queryPathInfo(path); - res.closureSize += info.narSize; - if (outputs.find(path) != outputs.end()) res.size += info.narSize; + res.closureSize += info->narSize; + if (outputs.find(path) != outputs.end()) res.size += info->narSize; } /* Get build products. */ diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 2dac0dbd..baacb1cc 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -630,60 +630,58 @@ void State::dumpStatus(Connection & conn, bool log) } } - auto store = dynamic_cast(&*getDestStore()); + auto store = getDestStore(); - if (store) { - root.attr("store"); - JSONObject nested(out); + root.attr("store"); + JSONObject nested(out); - auto & stats = store->getStats(); - nested.attr("narInfoRead", stats.narInfoRead); - nested.attr("narInfoReadAverted", stats.narInfoReadAverted); - nested.attr("narInfoMissing", stats.narInfoMissing); - nested.attr("narInfoWrite", stats.narInfoWrite); - nested.attr("narInfoCacheSize", stats.narInfoCacheSize); - nested.attr("narRead", stats.narRead); - nested.attr("narReadBytes", stats.narReadBytes); - nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); - nested.attr("narWrite", stats.narWrite); - nested.attr("narWriteAverted", stats.narWriteAverted); - nested.attr("narWriteBytes", stats.narWriteBytes); - nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); - nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); - nested.attr("narCompressionSavings", - stats.narWriteBytes - ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes + auto & stats = store->getStats(); + nested.attr("narInfoRead", stats.narInfoRead); + nested.attr("narInfoReadAverted", stats.narInfoReadAverted); + nested.attr("narInfoMissing", stats.narInfoMissing); + nested.attr("narInfoWrite", stats.narInfoWrite); + nested.attr("narInfoCacheSize", stats.pathInfoCacheSize); + nested.attr("narRead", stats.narRead); + nested.attr("narReadBytes", stats.narReadBytes); + nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes); + nested.attr("narWrite", stats.narWrite); + nested.attr("narWriteAverted", stats.narWriteAverted); + nested.attr("narWriteBytes", stats.narWriteBytes); + nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes); + nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs); + nested.attr("narCompressionSavings", + stats.narWriteBytes + ? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes + : 0.0); + nested.attr("narCompressionSpeed", // MiB/s + stats.narWriteCompressionTimeMs + ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) + : 0.0); + + auto s3Store = dynamic_cast(&*store); + if (s3Store) { + nested.attr("s3"); + JSONObject nested2(out); + auto & s3Stats = s3Store->getS3Stats(); + nested2.attr("put", s3Stats.put); + nested2.attr("putBytes", s3Stats.putBytes); + nested2.attr("putTimeMs", s3Stats.putTimeMs); + nested2.attr("putSpeed", + s3Stats.putTimeMs + ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) : 0.0); - nested.attr("narCompressionSpeed", // MiB/s - stats.narWriteCompressionTimeMs - ? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0) + nested2.attr("get", s3Stats.get); + nested2.attr("getBytes", s3Stats.getBytes); + nested2.attr("getTimeMs", s3Stats.getTimeMs); + nested2.attr("getSpeed", + s3Stats.getTimeMs + ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) : 0.0); - - auto s3Store = dynamic_cast(&*store); - if (s3Store) { - nested.attr("s3"); - JSONObject nested2(out); - auto & s3Stats = s3Store->getS3Stats(); - nested2.attr("put", s3Stats.put); - nested2.attr("putBytes", s3Stats.putBytes); - nested2.attr("putTimeMs", s3Stats.putTimeMs); - nested2.attr("putSpeed", - s3Stats.putTimeMs - ? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("get", s3Stats.get); - nested2.attr("getBytes", s3Stats.getBytes); - nested2.attr("getTimeMs", s3Stats.getTimeMs); - nested2.attr("getSpeed", - s3Stats.getTimeMs - ? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0) - : 0.0); - nested2.attr("head", s3Stats.head); - nested2.attr("costDollarApprox", - (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 - + s3Stats.put / 1000.0 * 0.005 + - + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); - } + nested2.attr("head", s3Stats.head); + nested2.attr("costDollarApprox", + (s3Stats.get + s3Stats.head) / 10000.0 * 0.004 + + s3Stats.put / 1000.0 * 0.005 + + + s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09); } } diff --git a/src/hydra-queue-runner/s3-binary-cache-store.cc b/src/hydra-queue-runner/s3-binary-cache-store.cc index 05726f4e..f03a8c4c 100644 --- a/src/hydra-queue-runner/s3-binary-cache-store.cc +++ b/src/hydra-queue-runner/s3-binary-cache-store.cc @@ -1,6 +1,7 @@ #include "s3-binary-cache-store.hh" #include "nar-info.hh" +#include "nar-info-disk-cache.hh" #include #include @@ -38,6 +39,12 @@ S3BinaryCacheStore::S3BinaryCacheStore(std::shared_ptr localStore, , config(makeConfig()) , client(make_ref(*config)) { + diskCache = getNarInfoDiskCache(); +} + +std::string S3BinaryCacheStore::getUri() +{ + return "s3://" + bucketName; } ref S3BinaryCacheStore::makeConfig() @@ -50,24 +57,29 @@ ref S3BinaryCacheStore::makeConfig() void S3BinaryCacheStore::init() { - /* Create the bucket if it doesn't already exists. */ - // FIXME: HeadBucket would be more appropriate, but doesn't return - // an easily parsed 404 message. - auto res = client->GetBucketLocation( - Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName)); + if (!diskCache->cacheExists(getUri())) { - if (!res.IsSuccess()) { - if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET) - throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage()); + /* Create the bucket if it doesn't already exists. */ + // FIXME: HeadBucket would be more appropriate, but doesn't return + // an easily parsed 404 message. + auto res = client->GetBucketLocation( + Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName)); - checkAws(format("AWS error creating bucket ‘%s’") % bucketName, - client->CreateBucket( - Aws::S3::Model::CreateBucketRequest() - .WithBucket(bucketName) - .WithCreateBucketConfiguration( - Aws::S3::Model::CreateBucketConfiguration() - /* .WithLocationConstraint( - Aws::S3::Model::BucketLocationConstraint::US) */ ))); + if (!res.IsSuccess()) { + if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET) + throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage()); + + checkAws(format("AWS error creating bucket ‘%s’") % bucketName, + client->CreateBucket( + Aws::S3::Model::CreateBucketRequest() + .WithBucket(bucketName) + .WithCreateBucketConfiguration( + Aws::S3::Model::CreateBucketConfiguration() + /* .WithLocationConstraint( + Aws::S3::Model::BucketLocationConstraint::US) */ ))); + } + + diskCache->createCache(getUri()); } BinaryCacheStore::init(); @@ -82,10 +94,10 @@ const S3BinaryCacheStore::Stats & S3BinaryCacheStore::getS3Stats() fetches the .narinfo file, rather than first checking for its existence via a HEAD request. Since .narinfos are small, doing a GET is unlikely to be slower than HEAD. */ -bool S3BinaryCacheStore::isValidPath(const Path & storePath) +bool S3BinaryCacheStore::isValidPathUncached(const Path & storePath) { try { - readNarInfo(storePath); + queryPathInfo(storePath); return true; } catch (InvalidPath & e) { return false; diff --git a/src/hydra-queue-runner/s3-binary-cache-store.hh b/src/hydra-queue-runner/s3-binary-cache-store.hh index b8d7a370..ffcce1eb 100644 --- a/src/hydra-queue-runner/s3-binary-cache-store.hh +++ b/src/hydra-queue-runner/s3-binary-cache-store.hh @@ -25,6 +25,8 @@ public: void init() override; + std::string getUri(); + struct Stats { std::atomic put{0}; @@ -38,7 +40,7 @@ public: const Stats & getS3Stats(); - bool isValidPath(const Path & storePath) override; + bool isValidPathUncached(const Path & storePath) override; private: