S3BinaryCacheStore: Allow disabling multipart uploads

The use of TransferManager has several issues, including that it
doesn't allow setting a Content-Encoding without a patch, and it
doesn't handle exceptions in worker threads (causing termination on
memory allocation failure).

Fixes #2493.
This commit is contained in:
Eelco Dolstra 2018-10-30 14:25:00 +01:00
parent 0163e8928c
commit 9f99d62480
No known key found for this signature in database
GPG key ID: 8170B4726D7198DE

View file

@ -173,6 +173,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"}; const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"}; const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"}; const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
const Setting<bool> multipartUpload{
this, false, "multipart-upload", "whether to use multi-part uploads"};
const Setting<uint64_t> bufferSize{ const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"}; this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
@ -261,8 +263,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor> static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads); executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
std::call_once(transferManagerCreated, [&]() { std::call_once(transferManagerCreated, [&]()
{
if (multipartUpload) {
TransferManagerConfiguration transferConfig(executor.get()); TransferManagerConfiguration transferConfig(executor.get());
transferConfig.s3Client = s3Helper.client; transferConfig.s3Client = s3Helper.client;
@ -282,10 +285,13 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
}; };
transferManager = TransferManager::Create(transferConfig); transferManager = TransferManager::Create(transferConfig);
}
}); });
auto now1 = std::chrono::steady_clock::now(); auto now1 = std::chrono::steady_clock::now();
if (transferManager) {
std::shared_ptr<TransferHandle> transferHandle = std::shared_ptr<TransferHandle> transferHandle =
transferManager->UploadFile( transferManager->UploadFile(
stream, bucketName, path, mimeType, stream, bucketName, path, mimeType,
@ -302,6 +308,26 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state", throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
bucketName, path); bucketName, path);
} else {
auto request =
Aws::S3::Model::PutObjectRequest()
.WithBucket(bucketName)
.WithKey(path);
request.SetContentType(mimeType);
if (contentEncoding != "")
request.SetContentEncoding(contentEncoding);
auto stream = std::make_shared<istringstream_nocopy>(data);
request.SetBody(stream);
auto result = checkAws(fmt("AWS error uploading '%s'", path),
s3Helper.client->PutObject(request));
}
printTalkative("upload of '%s' completed", path); printTalkative("upload of '%s' completed", path);
auto now2 = std::chrono::steady_clock::now(); auto now2 = std::chrono::steady_clock::now();