S3BinaryCacheStore: Don't use the transfer status callback
This callback is executed on a different thread, so exceptions thrown from the callback are not caught: Aug 08 16:25:48 chef hydra-queue-runner[11967]: terminate called after throwing an instance of 'nix::Error' Aug 08 16:25:48 chef hydra-queue-runner[11967]: what(): AWS error: failed to upload 's3://nix-cache/19dbddlfb0vp68g68y19p9fswrgl0bg7.ls' Therefore, just check the transfer status after it completes. Also include the S3 error message in the exception.
This commit is contained in:
parent
103ad1598c
commit
a0b971dd9c
|
@ -296,36 +296,13 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
||||||
const std::shared_ptr<const TransferHandle>
|
const std::shared_ptr<const TransferHandle>
|
||||||
&transferHandle) {
|
&transferHandle) {
|
||||||
//FIXME: find a way to properly abort the multipart upload.
|
//FIXME: find a way to properly abort the multipart upload.
|
||||||
checkInterrupt();
|
//checkInterrupt();
|
||||||
debug("upload progress ('%s'): '%d' of '%d' bytes",
|
debug("upload progress ('%s'): '%d' of '%d' bytes",
|
||||||
path,
|
path,
|
||||||
transferHandle->GetBytesTransferred(),
|
transferHandle->GetBytesTransferred(),
|
||||||
transferHandle->GetBytesTotalSize());
|
transferHandle->GetBytesTotalSize());
|
||||||
};
|
};
|
||||||
|
|
||||||
transferConfig.transferStatusUpdatedCallback =
|
|
||||||
[&](const TransferManager *,
|
|
||||||
const std::shared_ptr<const TransferHandle>
|
|
||||||
&transferHandle) {
|
|
||||||
switch (transferHandle->GetStatus()) {
|
|
||||||
case TransferStatus::COMPLETED:
|
|
||||||
printTalkative("upload of '%s' completed", path);
|
|
||||||
stats.put++;
|
|
||||||
stats.putBytes += data.size();
|
|
||||||
break;
|
|
||||||
case TransferStatus::IN_PROGRESS:
|
|
||||||
break;
|
|
||||||
case TransferStatus::FAILED:
|
|
||||||
throw Error("AWS error: failed to upload 's3://%s/%s'",
|
|
||||||
bucketName, path);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw Error("AWS error: transfer status of 's3://%s/%s' "
|
|
||||||
"in unexpected state",
|
|
||||||
bucketName, path);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
std::shared_ptr<TransferManager> transferManager =
|
std::shared_ptr<TransferManager> transferManager =
|
||||||
TransferManager::Create(transferConfig);
|
TransferManager::Create(transferConfig);
|
||||||
|
|
||||||
|
@ -339,6 +316,16 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
||||||
|
|
||||||
transferHandle->WaitUntilFinished();
|
transferHandle->WaitUntilFinished();
|
||||||
|
|
||||||
|
if (transferHandle->GetStatus() == TransferStatus::FAILED)
|
||||||
|
throw Error("AWS error: failed to upload 's3://%s/%s': %s",
|
||||||
|
bucketName, path, transferHandle->GetLastError().GetMessage());
|
||||||
|
|
||||||
|
if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
|
||||||
|
throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
|
||||||
|
bucketName, path);
|
||||||
|
|
||||||
|
printTalkative("upload of '%s' completed", path);
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
auto now2 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
auto duration =
|
auto duration =
|
||||||
|
@ -349,6 +336,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
||||||
bucketName % path % data.size() % duration);
|
bucketName % path % data.size() % duration);
|
||||||
|
|
||||||
stats.putTimeMs += duration;
|
stats.putTimeMs += duration;
|
||||||
|
stats.putBytes += data.size();
|
||||||
|
stats.put++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void upsertFile(const std::string & path, const std::string & data,
|
void upsertFile(const std::string & path, const std::string & data,
|
||||||
|
|
Loading…
Reference in a new issue