From 982d049d3bb2097b232774f8fd3ff97af538a826 Mon Sep 17 00:00:00 2001 From: eldritch horrors Date: Sun, 27 Oct 2024 18:27:11 +0100 Subject: [PATCH] libstore: remove FileTransferResult::data return it as a separate item in a pair instead. this will let us remove enqueueDownload() in favor of returning metadata from download() itself Change-Id: I74fad2ca15f920da1eefabc950c2baa2c360f2ba --- src/libfetchers/tarball.cc | 7 +-- src/libstore/filetransfer.cc | 63 ++++++++++++++----------- src/libstore/filetransfer.hh | 3 +- src/libstore/http-binary-cache-store.cc | 2 +- src/nix/upgrade-nix.cc | 4 +- tests/unit/libstore/filetransfer.cc | 2 +- 6 files changed, 44 insertions(+), 37 deletions(-) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 13ba44b51..e7e934b2a 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -43,8 +43,9 @@ DownloadFileResult downloadFile( if (cached) headers.emplace_back("If-None-Match", getStrAttr(cached->infoAttrs, "etag")); FileTransferResult res; + std::string data; try { - res = getFileTransfer()->enqueueDownload(url, headers).get(); + std::tie(res, data) = getFileTransfer()->enqueueDownload(url, headers).get(); } catch (FileTransferError & e) { if (cached) { warn("%s; using cached version", e.msg()); @@ -69,8 +70,8 @@ DownloadFileResult downloadFile( storePath = std::move(cached->storePath); } else { StringSink sink; - sink << dumpString(res.data); - auto hash = hashString(HashType::SHA256, res.data); + sink << dumpString(data); + auto hash = hashString(HashType::SHA256, data); ValidPathInfo info { *store, name, diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index b68c636fa..6f78dad94 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -48,9 +48,12 @@ struct curlFileTransfer : public FileTransfer FileTransferResult result; Activity act; std::optional uploadData; + std::string downloadData; bool noBody = false; // \equiv HTTP HEAD, don't download data bool done = false; // whether either the success or failure function has been called - std::packaged_task callback; + std::packaged_task< + std::pair(std::exception_ptr, FileTransferResult)> + callback; std::function dataCallback; CURL * req = 0; bool active = false; // whether the handle has been added to the multi object @@ -105,9 +108,9 @@ struct curlFileTransfer : public FileTransfer {uri}, parentAct) , uploadData(std::move(uploadData)) , noBody(noBody) - , callback([cb{std::move(callback)}] (std::exception_ptr ex, FileTransferResult r) { + , callback([this, cb{std::move(callback)}] (std::exception_ptr ex, FileTransferResult r) { cb(ex); - return r; + return std::pair{std::move(r), std::move(downloadData)}; }) , dataCallback(std::move(dataCallback)) { @@ -159,7 +162,7 @@ struct curlFileTransfer : public FileTransfer writtenToSink += realSize; dataCallback(*this, {static_cast(contents), realSize}); } else { - this->result.data.append(static_cast(contents), realSize); + this->downloadData.append(static_cast(contents), realSize); } return realSize; @@ -183,7 +186,7 @@ struct curlFileTransfer : public FileTransfer static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase); if (std::smatch match; std::regex_match(line, match, statusLine)) { result.etag = ""; - result.data.clear(); + downloadData.clear(); bodySize = 0; statusMsg = trim(match.str(1)); acceptRanges = false; @@ -328,7 +331,7 @@ struct curlFileTransfer : public FileTransfer if (writtenToSink) curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink); - result.data.clear(); + downloadData.clear(); bodySize = 0; } @@ -348,8 +351,8 @@ struct curlFileTransfer : public FileTransfer // wrapping user `callback`s instead is not possible because the // Callback api expects std::functions, and copying Callbacks is // not possible due the promises they hold. - if (code == CURLE_OK && !dataCallback && result.data.length() > 0) { - result.data = decompress(encoding, result.data); + if (code == CURLE_OK && !dataCallback && downloadData.length() > 0) { + downloadData = decompress(encoding, downloadData); } if (writeException) @@ -417,7 +420,7 @@ struct curlFileTransfer : public FileTransfer std::optional response; if (!successfulStatuses.count(httpStatus)) - response = std::move(result.data); + response = std::move(downloadData); auto exc = code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted ? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", verb(), uri) @@ -657,7 +660,7 @@ struct curlFileTransfer : public FileTransfer } #endif - std::future + std::future> enqueueDownload(const std::string & uri, const Headers & headers = {}) override { return enqueueFileTransfer(uri, headers, std::nullopt, false); @@ -668,7 +671,7 @@ struct curlFileTransfer : public FileTransfer enqueueFileTransfer(uri, headers, std::move(data), false).get(); } - std::future enqueueFileTransfer( + std::future> enqueueFileTransfer( const std::string & uri, const Headers & headers, std::optional data, @@ -689,7 +692,7 @@ struct curlFileTransfer : public FileTransfer ); } - std::future enqueueFileTransfer( + std::future> enqueueFileTransfer( const std::string & uri, const Headers & headers, std::invocable auto callback, @@ -701,28 +704,32 @@ struct curlFileTransfer : public FileTransfer /* Ugly hack to support s3:// URIs. */ if (uri.starts_with("s3://")) { // FIXME: do this on a worker thread - return std::async(std::launch::deferred, [uri]() -> FileTransferResult { + return std::async( + std::launch::deferred, + [uri]() -> std::pair { #if ENABLE_S3 - auto [bucketName, key, params] = parseS3Uri(uri); + auto [bucketName, key, params] = parseS3Uri(uri); - std::string profile = getOr(params, "profile", ""); - std::string region = getOr(params, "region", Aws::Region::US_EAST_1); - std::string scheme = getOr(params, "scheme", ""); - std::string endpoint = getOr(params, "endpoint", ""); + std::string profile = getOr(params, "profile", ""); + std::string region = getOr(params, "region", Aws::Region::US_EAST_1); + std::string scheme = getOr(params, "scheme", ""); + std::string endpoint = getOr(params, "endpoint", ""); - S3Helper s3Helper(profile, region, scheme, endpoint); + S3Helper s3Helper(profile, region, scheme, endpoint); - // FIXME: implement ETag - auto s3Res = s3Helper.getObject(bucketName, key); - FileTransferResult res; - if (!s3Res.data) - throw FileTransferError(NotFound, "S3 object '%s' does not exist", uri); - res.data = std::move(*s3Res.data); - return res; + // FIXME: implement ETag + auto s3Res = s3Helper.getObject(bucketName, key); + FileTransferResult res; + if (!s3Res.data) + throw FileTransferError(NotFound, "S3 object '%s' does not exist", uri); + return {res, std::move(*s3Res.data)}; #else - throw nix::Error("cannot download '%s' because Lix is not built with S3 support", uri); + throw nix::Error( + "cannot download '%s' because Lix is not built with S3 support", uri + ); #endif - }); + } + ); } return enqueueItem(std::make_shared( diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 2900fdee5..8ee88a7a8 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -57,7 +57,6 @@ struct FileTransferResult bool cached = false; std::string etag; std::string effectiveUri; - std::string data; /* An "immutable" URL for this resource (i.e. one whose contents will never change), as returned by the `Link: ; rel="immutable"` header. */ @@ -74,7 +73,7 @@ struct FileTransfer * Enqueues a download request, returning a future for the result of * the download. The future may throw a FileTransferError exception. */ - virtual std::future + virtual std::future> enqueueDownload(const std::string & uri, const Headers & headers = {}) = 0; /** diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 4afec752f..80524b58d 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -161,7 +161,7 @@ protected: checkEnabled(); try { - return std::move(getFileTransfer()->enqueueDownload(makeURI(path)).get().data); + return std::move(getFileTransfer()->enqueueDownload(makeURI(path)).get().second); } catch (FileTransferError & e) { if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) return {}; diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index bf0d47000..c6cc74672 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -285,11 +285,11 @@ struct CmdUpgradeNix : MixDryRun, EvalCommand Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); // FIXME: use nixos.org? - auto res = getFileTransfer()->enqueueDownload(storePathsUrl).get(); + auto [res, data] = getFileTransfer()->enqueueDownload(storePathsUrl).get(); auto state = std::make_unique(SearchPath{}, store); auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); + state->eval(state->parseExprFromString(data, state->rootPath(CanonPath("/no-such-path"))), *v); Bindings & bindings(*state->allocBindings(0)); auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; diff --git a/tests/unit/libstore/filetransfer.cc b/tests/unit/libstore/filetransfer.cc index 3d4a8993d..0e0a2439e 100644 --- a/tests/unit/libstore/filetransfer.cc +++ b/tests/unit/libstore/filetransfer.cc @@ -215,7 +215,7 @@ TEST(FileTransfer, usesIntermediateLinkHeaders) {"200 ok", "content-length: 1\r\n", [] { return "a"; }}, }); auto ft = makeFileTransfer(0); - auto result = ft->enqueueDownload(fmt("http://[::1]:%d/first", port)).get(); + auto [result, _data] = ft->enqueueDownload(fmt("http://[::1]:%d/first", port)).get(); ASSERT_EQ(result.immutableUrl, "http://foo"); }