forked from lix-project/hydra
Remove s3binarystore (moved to nix in d155d80)
This commit is contained in:
parent
706e06e8d7
commit
492d16074c
3 changed files with 0 additions and 260 deletions
|
@ -3,7 +3,6 @@ bin_PROGRAMS = hydra-queue-runner
|
||||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
||||||
builder.cc build-result.cc build-remote.cc \
|
builder.cc build-result.cc build-remote.cc \
|
||||||
build-result.hh counter.hh token-server.hh state.hh db.hh \
|
build-result.hh counter.hh token-server.hh state.hh db.hh \
|
||||||
s3-binary-cache-store.hh s3-binary-cache-store.cc \
|
|
||||||
finally.hh
|
finally.hh
|
||||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
|
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
|
||||||
|
|
||||||
|
|
|
@ -1,198 +0,0 @@
|
||||||
#include "s3-binary-cache-store.hh"
|
|
||||||
|
|
||||||
#include "nar-info.hh"
|
|
||||||
#include "nar-info-disk-cache.hh"
|
|
||||||
|
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
|
||||||
#include <aws/s3/S3Client.h>
|
|
||||||
#include <aws/s3/model/CreateBucketRequest.h>
|
|
||||||
#include <aws/s3/model/GetBucketLocationRequest.h>
|
|
||||||
#include <aws/s3/model/GetObjectRequest.h>
|
|
||||||
#include <aws/s3/model/HeadObjectRequest.h>
|
|
||||||
#include <aws/s3/model/PutObjectRequest.h>
|
|
||||||
|
|
||||||
namespace nix {
|
|
||||||
|
|
||||||
struct S3Error : public Error
|
|
||||||
{
|
|
||||||
Aws::S3::S3Errors err;
|
|
||||||
S3Error(Aws::S3::S3Errors err, const FormatOrString & fs)
|
|
||||||
: Error(fs), err(err) { };
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Helper: given an Outcome<R, E>, return R in case of success, or
|
|
||||||
throw an exception in case of an error. */
|
|
||||||
template<typename R, typename E>
|
|
||||||
R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
|
|
||||||
{
|
|
||||||
if (!outcome.IsSuccess())
|
|
||||||
throw S3Error(
|
|
||||||
outcome.GetError().GetErrorType(),
|
|
||||||
fs.s + ": " + outcome.GetError().GetMessage());
|
|
||||||
return outcome.GetResultWithOwnership();
|
|
||||||
}
|
|
||||||
|
|
||||||
S3BinaryCacheStore::S3BinaryCacheStore(std::shared_ptr<Store> localStore,
|
|
||||||
const Path & secretKeyFile, const std::string & bucketName)
|
|
||||||
: BinaryCacheStore(localStore, secretKeyFile)
|
|
||||||
, bucketName(bucketName)
|
|
||||||
, config(makeConfig())
|
|
||||||
, client(make_ref<Aws::S3::S3Client>(*config))
|
|
||||||
{
|
|
||||||
diskCache = getNarInfoDiskCache();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string S3BinaryCacheStore::getUri()
|
|
||||||
{
|
|
||||||
return "s3://" + bucketName;
|
|
||||||
}
|
|
||||||
|
|
||||||
ref<Aws::Client::ClientConfiguration> S3BinaryCacheStore::makeConfig()
|
|
||||||
{
|
|
||||||
auto res = make_ref<Aws::Client::ClientConfiguration>();
|
|
||||||
res->region = Aws::Region::US_EAST_1;
|
|
||||||
res->requestTimeoutMs = 600 * 1000;
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void S3BinaryCacheStore::init()
|
|
||||||
{
|
|
||||||
if (!diskCache->cacheExists(getUri())) {
|
|
||||||
|
|
||||||
/* Create the bucket if it doesn't already exists. */
|
|
||||||
// FIXME: HeadBucket would be more appropriate, but doesn't return
|
|
||||||
// an easily parsed 404 message.
|
|
||||||
auto res = client->GetBucketLocation(
|
|
||||||
Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
|
|
||||||
|
|
||||||
if (!res.IsSuccess()) {
|
|
||||||
if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET)
|
|
||||||
throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage());
|
|
||||||
|
|
||||||
checkAws(format("AWS error creating bucket ‘%s’") % bucketName,
|
|
||||||
client->CreateBucket(
|
|
||||||
Aws::S3::Model::CreateBucketRequest()
|
|
||||||
.WithBucket(bucketName)
|
|
||||||
.WithCreateBucketConfiguration(
|
|
||||||
Aws::S3::Model::CreateBucketConfiguration()
|
|
||||||
/* .WithLocationConstraint(
|
|
||||||
Aws::S3::Model::BucketLocationConstraint::US) */ )));
|
|
||||||
}
|
|
||||||
|
|
||||||
diskCache->createCache(getUri());
|
|
||||||
}
|
|
||||||
|
|
||||||
BinaryCacheStore::init();
|
|
||||||
}
|
|
||||||
|
|
||||||
const S3BinaryCacheStore::Stats & S3BinaryCacheStore::getS3Stats()
|
|
||||||
{
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is a specialisation of isValidPath() that optimistically
|
|
||||||
fetches the .narinfo file, rather than first checking for its
|
|
||||||
existence via a HEAD request. Since .narinfos are small, doing a
|
|
||||||
GET is unlikely to be slower than HEAD. */
|
|
||||||
bool S3BinaryCacheStore::isValidPathUncached(const Path & storePath)
|
|
||||||
{
|
|
||||||
try {
|
|
||||||
queryPathInfo(storePath);
|
|
||||||
return true;
|
|
||||||
} catch (InvalidPath & e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool S3BinaryCacheStore::fileExists(const std::string & path)
|
|
||||||
{
|
|
||||||
stats.head++;
|
|
||||||
|
|
||||||
auto res = client->HeadObject(
|
|
||||||
Aws::S3::Model::HeadObjectRequest()
|
|
||||||
.WithBucket(bucketName)
|
|
||||||
.WithKey(path));
|
|
||||||
|
|
||||||
if (!res.IsSuccess()) {
|
|
||||||
auto & error = res.GetError();
|
|
||||||
if (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN // FIXME
|
|
||||||
&& error.GetMessage().find("404") != std::string::npos)
|
|
||||||
return false;
|
|
||||||
throw Error(format("AWS error fetching ‘%s’: %s") % path % error.GetMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void S3BinaryCacheStore::upsertFile(const std::string & path, const std::string & data)
|
|
||||||
{
|
|
||||||
auto request =
|
|
||||||
Aws::S3::Model::PutObjectRequest()
|
|
||||||
.WithBucket(bucketName)
|
|
||||||
.WithKey(path);
|
|
||||||
|
|
||||||
auto stream = std::make_shared<std::stringstream>(data);
|
|
||||||
|
|
||||||
request.SetBody(stream);
|
|
||||||
|
|
||||||
stats.put++;
|
|
||||||
stats.putBytes += data.size();
|
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
auto result = checkAws(format("AWS error uploading ‘%s’") % path,
|
|
||||||
client->PutObject(request));
|
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
||||||
|
|
||||||
printMsg(lvlInfo, format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
|
|
||||||
% bucketName % path % data.size() % duration);
|
|
||||||
|
|
||||||
stats.putTimeMs += duration;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<std::string> S3BinaryCacheStore::getFile(const std::string & path)
|
|
||||||
{
|
|
||||||
printMsg(lvlDebug, format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
|
|
||||||
|
|
||||||
auto request =
|
|
||||||
Aws::S3::Model::GetObjectRequest()
|
|
||||||
.WithBucket(bucketName)
|
|
||||||
.WithKey(path);
|
|
||||||
|
|
||||||
request.SetResponseStreamFactory([&]() {
|
|
||||||
return Aws::New<std::stringstream>("STRINGSTREAM");
|
|
||||||
});
|
|
||||||
|
|
||||||
stats.get++;
|
|
||||||
|
|
||||||
try {
|
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
auto result = checkAws(format("AWS error fetching ‘%s’") % path,
|
|
||||||
client->GetObject(request));
|
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
auto res = dynamic_cast<std::stringstream &>(result.GetBody()).str();
|
|
||||||
|
|
||||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
||||||
|
|
||||||
printMsg(lvlTalkative, format("downloaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
|
|
||||||
% bucketName % path % res.size() % duration);
|
|
||||||
|
|
||||||
stats.getBytes += res.size();
|
|
||||||
stats.getTimeMs += duration;
|
|
||||||
|
|
||||||
return std::make_shared<std::string>(res);
|
|
||||||
|
|
||||||
} catch (S3Error & e) {
|
|
||||||
if (e.err == Aws::S3::S3Errors::NO_SUCH_KEY) return 0;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "binary-cache-store.hh"
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
|
|
||||||
namespace Aws { namespace Client { class ClientConfiguration; } }
|
|
||||||
namespace Aws { namespace S3 { class S3Client; } }
|
|
||||||
|
|
||||||
namespace nix {
|
|
||||||
|
|
||||||
class S3BinaryCacheStore : public BinaryCacheStore
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
|
|
||||||
std::string bucketName;
|
|
||||||
|
|
||||||
ref<Aws::Client::ClientConfiguration> config;
|
|
||||||
ref<Aws::S3::S3Client> client;
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
S3BinaryCacheStore(std::shared_ptr<Store> localStore,
|
|
||||||
const Path & secretKeyFile, const std::string & bucketName);
|
|
||||||
|
|
||||||
void init() override;
|
|
||||||
|
|
||||||
std::string getUri();
|
|
||||||
|
|
||||||
struct Stats
|
|
||||||
{
|
|
||||||
std::atomic<uint64_t> put{0};
|
|
||||||
std::atomic<uint64_t> putBytes{0};
|
|
||||||
std::atomic<uint64_t> putTimeMs{0};
|
|
||||||
std::atomic<uint64_t> get{0};
|
|
||||||
std::atomic<uint64_t> getBytes{0};
|
|
||||||
std::atomic<uint64_t> getTimeMs{0};
|
|
||||||
std::atomic<uint64_t> head{0};
|
|
||||||
};
|
|
||||||
|
|
||||||
const Stats & getS3Stats();
|
|
||||||
|
|
||||||
bool isValidPathUncached(const Path & storePath) override;
|
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
Stats stats;
|
|
||||||
|
|
||||||
ref<Aws::Client::ClientConfiguration> makeConfig();
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
bool fileExists(const std::string & path) override;
|
|
||||||
|
|
||||||
void upsertFile(const std::string & path, const std::string & data) override;
|
|
||||||
|
|
||||||
std::shared_ptr<std::string> getFile(const std::string & path) override;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
Loading…
Reference in a new issue