2019-02-12 17:23:11 +00:00
|
|
|
#include "flake.hh"
|
2018-11-29 18:18:36 +00:00
|
|
|
#include "primops.hh"
|
|
|
|
#include "eval-inline.hh"
|
|
|
|
#include "fetchGit.hh"
|
|
|
|
#include "download.hh"
|
2019-02-21 05:53:01 +00:00
|
|
|
#include "args.hh"
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-02-21 05:53:01 +00:00
|
|
|
#include <iostream>
|
2018-11-29 18:18:36 +00:00
|
|
|
#include <queue>
|
2018-11-30 15:11:15 +00:00
|
|
|
#include <regex>
|
2018-11-29 18:18:36 +00:00
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
2019-04-15 12:08:18 +00:00
|
|
|
/* Read a registry. */
|
2019-03-21 08:30:16 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> readRegistry(const Path & path)
|
2019-02-12 21:43:22 +00:00
|
|
|
{
|
2019-03-21 08:30:16 +00:00
|
|
|
auto registry = std::make_shared<FlakeRegistry>();
|
2019-02-12 21:43:22 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
if (!pathExists(path))
|
|
|
|
return std::make_shared<FlakeRegistry>();
|
2019-03-26 11:48:57 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
2019-03-26 11:48:57 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("flake registry '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto flakes = json["flakes"];
|
2019-04-08 17:03:00 +00:00
|
|
|
for (auto i = flakes.begin(); i != flakes.end(); ++i)
|
|
|
|
registry->entries.emplace(i.key(), FlakeRef(i->value("uri", "")));
|
2019-02-12 21:43:22 +00:00
|
|
|
|
|
|
|
return registry;
|
|
|
|
}
|
|
|
|
|
2019-04-15 12:08:18 +00:00
|
|
|
/* Write a registry to a file. */
|
2019-04-16 12:27:54 +00:00
|
|
|
void writeRegistry(const FlakeRegistry & registry, const Path & path)
|
2019-02-25 12:46:37 +00:00
|
|
|
{
|
2019-03-29 15:18:25 +00:00
|
|
|
nlohmann::json json;
|
2019-02-21 05:53:01 +00:00
|
|
|
json["version"] = 1;
|
2019-04-08 17:03:00 +00:00
|
|
|
for (auto elem : registry.entries)
|
|
|
|
json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} };
|
2019-03-26 11:48:57 +00:00
|
|
|
createDirs(dirOf(path));
|
2019-02-25 12:46:37 +00:00
|
|
|
writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file.
|
|
|
|
}
|
|
|
|
|
2019-03-29 15:18:25 +00:00
|
|
|
LockFile::FlakeEntry readFlakeEntry(nlohmann::json json)
|
|
|
|
{
|
|
|
|
FlakeRef flakeRef(json["uri"]);
|
|
|
|
if (!flakeRef.isImmutable())
|
2019-05-07 21:20:42 +00:00
|
|
|
throw Error("cannot use mutable flake '%s' in pure mode", flakeRef);
|
2019-03-29 15:18:25 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"]));
|
2019-03-29 15:18:25 +00:00
|
|
|
|
|
|
|
auto nonFlakeRequires = json["nonFlakeRequires"];
|
|
|
|
|
|
|
|
for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
|
|
|
|
FlakeRef flakeRef(i->value("uri", ""));
|
|
|
|
if (!flakeRef.isImmutable())
|
2019-04-19 09:43:56 +00:00
|
|
|
throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef);
|
2019-05-01 09:38:48 +00:00
|
|
|
LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
|
|
|
|
entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto requires = json["requires"];
|
|
|
|
|
|
|
|
for (auto i = requires.begin(); i != requires.end(); ++i)
|
|
|
|
entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
LockFile readLockFile(const Path & path)
|
|
|
|
{
|
|
|
|
LockFile lockFile;
|
|
|
|
|
|
|
|
if (!pathExists(path))
|
|
|
|
return lockFile;
|
|
|
|
|
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
|
|
|
|
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("lock file '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto nonFlakeRequires = json["nonFlakeRequires"];
|
|
|
|
|
|
|
|
for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
|
|
|
|
FlakeRef flakeRef(i->value("uri", ""));
|
2019-05-01 09:38:48 +00:00
|
|
|
LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
|
2019-03-29 15:18:25 +00:00
|
|
|
if (!flakeRef.isImmutable())
|
2019-05-01 09:38:48 +00:00
|
|
|
throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path);
|
|
|
|
lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto requires = json["requires"];
|
|
|
|
|
|
|
|
for (auto i = requires.begin(); i != requires.end(); ++i)
|
|
|
|
lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
|
|
|
|
|
|
|
|
return lockFile;
|
|
|
|
}
|
|
|
|
|
2019-04-16 12:27:54 +00:00
|
|
|
nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry)
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
|
|
|
nlohmann::json json;
|
|
|
|
json["uri"] = entry.ref.to_string();
|
2019-05-01 15:01:03 +00:00
|
|
|
json["contentHash"] = entry.contentHash.to_string(SRI);
|
|
|
|
for (auto & x : entry.nonFlakeEntries) {
|
|
|
|
json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
|
|
|
|
json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI);
|
|
|
|
}
|
2019-03-29 15:18:25 +00:00
|
|
|
for (auto & x : entry.flakeEntries)
|
2019-04-16 14:18:47 +00:00
|
|
|
json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
|
2019-03-29 15:18:25 +00:00
|
|
|
return json;
|
|
|
|
}
|
|
|
|
|
2019-04-16 12:27:54 +00:00
|
|
|
void writeLockFile(const LockFile & lockFile, const Path & path)
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
|
|
|
nlohmann::json json;
|
|
|
|
json["version"] = 1;
|
2019-04-16 12:23:10 +00:00
|
|
|
json["nonFlakeRequires"] = nlohmann::json::object();
|
2019-05-01 15:01:03 +00:00
|
|
|
for (auto & x : lockFile.nonFlakeEntries) {
|
|
|
|
json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
|
|
|
|
json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI);
|
|
|
|
}
|
2019-04-16 12:23:10 +00:00
|
|
|
json["requires"] = nlohmann::json::object();
|
2019-03-29 15:18:25 +00:00
|
|
|
for (auto & x : lockFile.flakeEntries)
|
2019-04-16 14:18:47 +00:00
|
|
|
json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
|
2019-03-29 15:18:25 +00:00
|
|
|
createDirs(dirOf(path));
|
2019-05-08 16:20:35 +00:00
|
|
|
writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> getGlobalRegistry()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-05-07 20:29:16 +00:00
|
|
|
return readRegistry(evalSettings.flakeRegistry);
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
Path getUserRegistryPath()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-04-08 17:03:00 +00:00
|
|
|
return getHome() + "/.config/nix/registry.json";
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> getUserRegistry()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-04-08 17:03:00 +00:00
|
|
|
return readRegistry(getUserRegistryPath());
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> getFlagRegistry(RegistryOverrides registryOverrides)
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-03-21 08:30:16 +00:00
|
|
|
auto flagRegistry = std::make_shared<FlakeRegistry>();
|
|
|
|
for (auto const & x : registryOverrides) {
|
|
|
|
flagRegistry->entries.insert_or_assign(FlakeRef(x.first), FlakeRef(x.second));
|
|
|
|
}
|
|
|
|
return flagRegistry;
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-04-30 09:03:31 +00:00
|
|
|
// This always returns a vector with flakeReg, userReg, globalReg.
|
2019-03-21 08:30:16 +00:00
|
|
|
// If one of them doesn't exist, the registry is left empty but does exist.
|
|
|
|
const Registries EvalState::getFlakeRegistries()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-03-21 08:30:16 +00:00
|
|
|
Registries registries;
|
2019-03-21 08:30:16 +00:00
|
|
|
registries.push_back(getFlagRegistry(registryOverrides));
|
2019-04-25 08:49:56 +00:00
|
|
|
registries.push_back(getUserRegistry());
|
|
|
|
registries.push_back(getGlobalRegistry());
|
2019-03-21 08:30:16 +00:00
|
|
|
return registries;
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
2019-04-30 10:47:15 +00:00
|
|
|
std::vector<FlakeRef> pastSearches = {});
|
|
|
|
|
|
|
|
FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Registries & registries, std::vector<FlakeRef> pastSearches)
|
|
|
|
{
|
|
|
|
std::string errorMsg = "found cycle in flake registries: ";
|
|
|
|
for (FlakeRef oldRef : pastSearches) {
|
|
|
|
errorMsg += oldRef.to_string();
|
|
|
|
if (oldRef == newRef)
|
|
|
|
throw Error(errorMsg);
|
|
|
|
errorMsg += " - ";
|
|
|
|
}
|
|
|
|
pastSearches.push_back(newRef);
|
|
|
|
return lookupFlake(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
|
|
|
std::vector<FlakeRef> pastSearches)
|
2019-02-12 17:23:11 +00:00
|
|
|
{
|
2019-04-16 13:02:02 +00:00
|
|
|
if (registries.empty() && !flakeRef.isDirect())
|
2019-04-19 09:43:56 +00:00
|
|
|
throw Error("indirect flake reference '%s' is not allowed", flakeRef);
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
for (std::shared_ptr<FlakeRegistry> registry : registries) {
|
|
|
|
auto i = registry->entries.find(flakeRef);
|
|
|
|
if (i != registry->entries.end()) {
|
|
|
|
auto newRef = i->second;
|
2019-04-30 10:47:15 +00:00
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto j = registry->entries.find(flakeRef.baseRef());
|
|
|
|
if (j != registry->entries.end()) {
|
|
|
|
auto newRef = j->second;
|
|
|
|
newRef.ref = flakeRef.ref;
|
|
|
|
newRef.rev = flakeRef.rev;
|
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
2019-02-12 21:43:22 +00:00
|
|
|
}
|
2019-04-08 17:03:00 +00:00
|
|
|
}
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
if (!flakeRef.isDirect())
|
2019-04-19 09:43:56 +00:00
|
|
|
throw Error("could not resolve flake reference '%s'", flakeRef);
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
return flakeRef;
|
2019-02-12 17:23:11 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
// Lookups happen here too
|
|
|
|
static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-29 18:18:36 +00:00
|
|
|
{
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = lookupFlake(state, flakeRef,
|
|
|
|
impureIsAllowed ? state.getFlakeRegistries() : std::vector<std::shared_ptr<FlakeRegistry>>());
|
|
|
|
|
|
|
|
if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable())
|
|
|
|
throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef);
|
2019-04-17 11:54:06 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
// This only downloads only one revision of the repo, not the entire history.
|
2019-05-01 09:38:48 +00:00
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&resolvedRef.data)) {
|
2018-12-12 12:20:59 +00:00
|
|
|
|
|
|
|
// FIXME: use regular /archive URLs instead? api.github.com
|
|
|
|
// might have stricter rate limits.
|
2019-02-12 17:23:11 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s",
|
|
|
|
refData->owner, refData->repo,
|
2019-05-01 09:38:48 +00:00
|
|
|
resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false)
|
|
|
|
: resolvedRef.ref ? *resolvedRef.ref : "master");
|
2019-02-25 15:20:50 +00:00
|
|
|
|
2019-04-10 10:12:44 +00:00
|
|
|
std::string accessToken = settings.githubAccessToken.get();
|
|
|
|
if (accessToken != "")
|
|
|
|
url += "?access_token=" + accessToken;
|
|
|
|
|
2019-02-25 15:23:45 +00:00
|
|
|
auto result = getDownloader()->downloadCached(state.store, url, true, "source",
|
2019-05-01 09:38:48 +00:00
|
|
|
Hash(), nullptr, resolvedRef.rev ? 1000000000 : settings.tarballTtl);
|
2018-12-12 12:20:59 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
if (!result.etag)
|
|
|
|
throw Error("did not receive an ETag header from '%s'", url);
|
2018-12-12 12:20:59 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"')
|
|
|
|
throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url);
|
|
|
|
|
2019-05-07 21:20:42 +00:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1);
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo info(ref);
|
2019-05-15 13:38:24 +00:00
|
|
|
info.storePath = result.storePath;
|
2019-02-25 15:20:50 +00:00
|
|
|
|
|
|
|
return info;
|
2018-12-12 12:20:59 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
// This downloads the entire git history
|
2019-05-01 09:38:48 +00:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsGit>(&resolvedRef.data)) {
|
|
|
|
auto gitInfo = exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source");
|
2019-05-07 21:20:42 +00:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.ref = gitInfo.ref;
|
|
|
|
ref.rev = gitInfo.rev;
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo info(ref);
|
2019-02-25 15:20:50 +00:00
|
|
|
info.storePath = gitInfo.storePath;
|
2019-04-08 20:46:25 +00:00
|
|
|
info.revCount = gitInfo.revCount;
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsPath>(&resolvedRef.data)) {
|
2019-04-08 20:46:25 +00:00
|
|
|
if (!pathExists(refData->path + "/.git"))
|
|
|
|
throw Error("flake '%s' does not reference a Git repository", refData->path);
|
2019-04-19 09:34:23 +00:00
|
|
|
auto gitInfo = exportGit(state.store, refData->path, {}, {}, "source");
|
2019-05-07 21:20:42 +00:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.ref = gitInfo.ref;
|
|
|
|
ref.rev = gitInfo.rev;
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo info(ref);
|
2019-04-08 20:46:25 +00:00
|
|
|
info.storePath = gitInfo.storePath;
|
|
|
|
info.revCount = gitInfo.revCount;
|
2019-02-25 15:20:50 +00:00
|
|
|
return info;
|
2018-11-30 15:11:15 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-02-12 17:23:11 +00:00
|
|
|
else abort();
|
2018-11-30 15:11:15 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within `fetchFlake`, which is used here.
|
2019-03-29 15:18:25 +00:00
|
|
|
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-30 15:11:15 +00:00
|
|
|
{
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
|
|
|
|
debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-05-01 08:34:23 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-02-25 15:20:50 +00:00
|
|
|
|
2019-04-16 13:40:58 +00:00
|
|
|
state.store->assertStorePath(sourceInfo.storePath);
|
2018-11-30 15:11:15 +00:00
|
|
|
|
2019-02-12 19:35:03 +00:00
|
|
|
if (state.allowedPaths)
|
2019-05-15 13:38:24 +00:00
|
|
|
state.allowedPaths->insert(state.store->toRealPath(sourceInfo.storePath));
|
2019-02-12 19:35:03 +00:00
|
|
|
|
2019-05-01 18:38:41 +00:00
|
|
|
// Guard against symlink attacks.
|
2019-05-01 09:38:48 +00:00
|
|
|
Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix");
|
2019-05-15 13:38:24 +00:00
|
|
|
Path realFlakeFile = state.store->toRealPath(flakeFile);
|
|
|
|
if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath)))
|
|
|
|
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath);
|
2019-05-01 09:38:48 +00:00
|
|
|
|
|
|
|
Flake flake(flakeRef, sourceInfo);
|
2019-05-01 15:01:03 +00:00
|
|
|
flake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash;
|
2019-05-01 09:38:48 +00:00
|
|
|
|
2019-05-15 13:38:24 +00:00
|
|
|
if (!pathExists(realFlakeFile))
|
2019-05-01 16:07:36 +00:00
|
|
|
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir);
|
2019-04-19 09:43:56 +00:00
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
Value vInfo;
|
2019-05-15 13:38:24 +00:00
|
|
|
state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack
|
2018-11-29 18:18:36 +00:00
|
|
|
|
|
|
|
state.forceAttrs(vInfo);
|
|
|
|
|
|
|
|
if (auto name = vInfo.attrs->get(state.sName))
|
2019-02-12 17:23:11 +00:00
|
|
|
flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos);
|
2018-11-29 18:18:36 +00:00
|
|
|
else
|
|
|
|
throw Error("flake lacks attribute 'name'");
|
|
|
|
|
|
|
|
if (auto description = vInfo.attrs->get(state.sDescription))
|
|
|
|
flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos);
|
|
|
|
|
|
|
|
if (auto requires = vInfo.attrs->get(state.symbols.create("requires"))) {
|
|
|
|
state.forceList(*(**requires).value, *(**requires).pos);
|
|
|
|
for (unsigned int n = 0; n < (**requires).value->listSize(); ++n)
|
2019-02-12 21:43:22 +00:00
|
|
|
flake.requires.push_back(FlakeRef(state.forceStringNoCtx(
|
|
|
|
*(**requires).value->listElems()[n], *(**requires).pos)));
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
if (std::optional<Attr *> nonFlakeRequires = vInfo.attrs->get(state.symbols.create("nonFlakeRequires"))) {
|
|
|
|
state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos);
|
|
|
|
for (Attr attr : *(*(**nonFlakeRequires).value).attrs) {
|
|
|
|
std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos);
|
|
|
|
FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri);
|
|
|
|
flake.nonFlakeRequires.insert_or_assign(attr.name, nonFlakeRef);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
if (auto provides = vInfo.attrs->get(state.symbols.create("provides"))) {
|
|
|
|
state.forceFunction(*(**provides).value, *(**provides).pos);
|
|
|
|
flake.vProvides = (**provides).value;
|
|
|
|
} else
|
|
|
|
throw Error("flake lacks attribute 'provides'");
|
|
|
|
|
|
|
|
return flake;
|
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
// Get the `NonFlake` corresponding to a `FlakeRef`.
|
2019-03-21 08:30:16 +00:00
|
|
|
NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias)
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo sourceInfo = fetchFlake(state, flakeRef);
|
|
|
|
debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
NonFlake nonFlake(flakeRef, sourceInfo);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
state.store->assertStorePath(nonFlake.storePath);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
if (state.allowedPaths)
|
|
|
|
state.allowedPaths->insert(nonFlake.storePath);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 15:01:03 +00:00
|
|
|
nonFlake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash;
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
nonFlake.alias = alias;
|
2019-03-21 08:30:16 +00:00
|
|
|
|
|
|
|
return nonFlake;
|
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
LockFile entryToLockFile(const LockFile::FlakeEntry & entry)
|
2018-11-29 18:18:36 +00:00
|
|
|
{
|
2019-04-16 14:18:47 +00:00
|
|
|
LockFile lockFile;
|
2019-05-01 09:38:48 +00:00
|
|
|
lockFile.flakeEntries = entry.flakeEntries;
|
|
|
|
lockFile.nonFlakeEntries = entry.nonFlakeEntries;
|
|
|
|
return lockFile;
|
|
|
|
}
|
2019-04-16 14:18:47 +00:00
|
|
|
|
2019-05-14 09:34:45 +00:00
|
|
|
LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake)
|
|
|
|
{
|
|
|
|
LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash);
|
|
|
|
|
|
|
|
for (auto & info : resolvedFlake.flakeDeps)
|
|
|
|
entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second));
|
|
|
|
|
|
|
|
for (auto & nonFlake : resolvedFlake.nonFlakeDeps) {
|
|
|
|
LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.hash);
|
|
|
|
entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool allowedToWrite(HandleLockFile handle)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
2019-05-21 13:03:54 +00:00
|
|
|
return handle == UpdateLockFile || handle == RecreateLockFile;
|
2019-05-14 09:34:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool recreateLockFile(HandleLockFile handle)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
2019-05-21 13:03:54 +00:00
|
|
|
return handle == RecreateLockFile || handle == UseNewLockFile;
|
2019-05-14 09:34:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
|
|
|
if (handle == AllPure) return false;
|
|
|
|
else if (handle == TopRefUsesRegistries) return isTopRef;
|
|
|
|
else if (handle == UpdateLockFile) return true;
|
|
|
|
else if (handle == UseUpdatedLockFile) return true;
|
|
|
|
else if (handle == RecreateLockFile) return true;
|
|
|
|
else if (handle == UseNewLockFile) return true;
|
|
|
|
else assert(false);
|
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef,
|
2019-05-14 09:34:45 +00:00
|
|
|
HandleLockFile handleLockFile, LockFile lockFile = {}, bool topRef = false)
|
2019-05-01 09:38:48 +00:00
|
|
|
{
|
2019-05-14 09:34:45 +00:00
|
|
|
Flake flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef));
|
2019-04-16 14:18:47 +00:00
|
|
|
|
2019-04-19 12:23:35 +00:00
|
|
|
ResolvedFlake deps(flake);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
for (auto & nonFlakeInfo : flake.nonFlakeRequires) {
|
|
|
|
FlakeRef ref = nonFlakeInfo.second;
|
|
|
|
auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first);
|
2019-05-01 09:38:48 +00:00
|
|
|
if (i != lockFile.nonFlakeEntries.end()) {
|
|
|
|
NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first);
|
|
|
|
if (nonFlake.hash != i->second.contentHash)
|
2019-05-21 13:03:54 +00:00
|
|
|
throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
|
2019-05-01 09:38:48 +00:00
|
|
|
deps.nonFlakeDeps.push_back(nonFlake);
|
|
|
|
} else {
|
2019-05-14 09:34:45 +00:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-05-21 13:03:54 +00:00
|
|
|
throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first);
|
2019-05-01 09:38:48 +00:00
|
|
|
deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first));
|
|
|
|
}
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-04-16 14:18:47 +00:00
|
|
|
for (auto newFlakeRef : flake.requires) {
|
|
|
|
auto i = lockFile.flakeEntries.find(newFlakeRef);
|
2019-05-01 09:38:48 +00:00
|
|
|
if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible
|
2019-05-14 09:34:45 +00:00
|
|
|
ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second));
|
2019-05-01 09:38:48 +00:00
|
|
|
if (newResFlake.flake.hash != i->second.contentHash)
|
2019-05-21 13:03:54 +00:00
|
|
|
throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
|
2019-05-14 09:34:45 +00:00
|
|
|
deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake);
|
2019-05-01 09:38:48 +00:00
|
|
|
} else {
|
2019-05-14 09:34:45 +00:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-05-21 13:03:54 +00:00
|
|
|
throw Error("cannot update flake dependency '%s' in pure mode", newFlakeRef.to_string());
|
2019-05-14 09:34:45 +00:00
|
|
|
deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile));
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-04-16 14:18:47 +00:00
|
|
|
}
|
2019-02-12 20:05:44 +00:00
|
|
|
|
2019-03-29 15:18:25 +00:00
|
|
|
return deps;
|
|
|
|
}
|
2019-02-12 20:55:43 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
/* Given a flake reference, recursively fetch it and its dependencies.
|
|
|
|
FIXME: this should return a graph of flakes.
|
|
|
|
*/
|
2019-05-14 09:34:45 +00:00
|
|
|
ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile)
|
2019-05-01 09:38:48 +00:00
|
|
|
{
|
2019-05-14 09:34:45 +00:00
|
|
|
Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true));
|
2019-05-21 12:55:43 +00:00
|
|
|
LockFile oldLockFile;
|
2019-05-01 09:38:48 +00:00
|
|
|
|
2019-05-14 09:34:45 +00:00
|
|
|
if (!recreateLockFile (handleLockFile)) {
|
2019-05-01 09:38:48 +00:00
|
|
|
// If recreateLockFile, start with an empty lockfile
|
2019-05-21 12:55:43 +00:00
|
|
|
oldLockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-05-01 09:38:48 +00:00
|
|
|
|
2019-05-21 12:55:43 +00:00
|
|
|
LockFile lockFile(oldLockFile);
|
|
|
|
|
2019-05-14 09:34:45 +00:00
|
|
|
ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true);
|
|
|
|
lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake));
|
2019-02-12 21:43:22 +00:00
|
|
|
|
2019-05-21 12:55:43 +00:00
|
|
|
if (!(lockFile == oldLockFile)) {
|
|
|
|
if (allowedToWrite(handleLockFile)) {
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsPath>(&topRef.data)) {
|
|
|
|
writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
|
|
|
|
|
|
|
|
// Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store.
|
|
|
|
runProgram("git", true, { "-C", refData->path, "add",
|
|
|
|
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" });
|
2019-05-21 13:03:54 +00:00
|
|
|
} else
|
|
|
|
warn("cannot write lockfile of remote flake '%s'", topRef);
|
2019-05-21 12:55:43 +00:00
|
|
|
} else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries)
|
2019-05-21 13:03:54 +00:00
|
|
|
warn("using updated lockfile without writing it to file");
|
2019-05-21 12:55:43 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-05-14 09:34:45 +00:00
|
|
|
return resFlake;
|
2019-02-21 05:53:01 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 12:55:43 +00:00
|
|
|
void updateLockFile(EvalState & state, const FlakeUri & flakeUri, bool recreateLockFile)
|
2019-02-21 05:53:01 +00:00
|
|
|
{
|
2019-05-14 09:34:45 +00:00
|
|
|
FlakeRef flakeRef(flakeUri);
|
|
|
|
resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile);
|
2019-02-21 05:53:01 +00:00
|
|
|
}
|
|
|
|
|
2019-04-19 12:23:35 +00:00
|
|
|
void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v)
|
2018-11-29 18:18:36 +00:00
|
|
|
{
|
2019-04-16 11:56:08 +00:00
|
|
|
// Construct the resulting attrset '{description, provides,
|
|
|
|
// ...}'. This attrset is passed lazily as an argument to 'provides'.
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-04-19 12:23:35 +00:00
|
|
|
state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-05-14 09:34:45 +00:00
|
|
|
for (auto info : resFlake.flakeDeps) {
|
|
|
|
const ResolvedFlake newResFlake = info.second;
|
2019-04-19 12:23:35 +00:00
|
|
|
auto vFlake = state.allocAttr(v, newResFlake.flake.id);
|
|
|
|
callFlake(state, newResFlake, *vFlake);
|
2019-04-16 11:56:08 +00:00
|
|
|
}
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-04-19 12:23:35 +00:00
|
|
|
for (const NonFlake nonFlake : resFlake.nonFlakeDeps) {
|
|
|
|
auto vNonFlake = state.allocAttr(v, nonFlake.alias);
|
2019-04-16 11:56:08 +00:00
|
|
|
state.mkAttrs(*vNonFlake, 4);
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
state.store->isValidPath(nonFlake.storePath);
|
|
|
|
mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.storePath, {nonFlake.storePath});
|
2019-05-08 11:38:32 +00:00
|
|
|
|
|
|
|
// FIXME: add rev, shortRev, revCount, ...
|
2019-04-16 11:56:08 +00:00
|
|
|
}
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-04-19 12:23:35 +00:00
|
|
|
mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description);
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
auto & path = resFlake.flake.storePath;
|
2019-04-16 13:40:58 +00:00
|
|
|
state.store->isValidPath(path);
|
|
|
|
mkString(*state.allocAttr(v, state.sOutPath), path, {path});
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-05-08 11:38:32 +00:00
|
|
|
if (resFlake.flake.resolvedRef.rev) {
|
|
|
|
mkString(*state.allocAttr(v, state.symbols.create("rev")),
|
|
|
|
resFlake.flake.resolvedRef.rev->gitRev());
|
|
|
|
mkString(*state.allocAttr(v, state.symbols.create("shortRev")),
|
|
|
|
resFlake.flake.resolvedRef.rev->gitShortRev());
|
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
if (resFlake.flake.revCount)
|
|
|
|
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.revCount);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-04-16 11:56:08 +00:00
|
|
|
auto vProvides = state.allocAttr(v, state.symbols.create("provides"));
|
2019-04-19 12:23:35 +00:00
|
|
|
mkApp(*vProvides, *resFlake.flake.vProvides, v);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-04-16 14:29:44 +00:00
|
|
|
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
|
|
|
|
|
2019-04-16 11:56:08 +00:00
|
|
|
v.attrs->sort();
|
|
|
|
}
|
2019-02-12 20:55:43 +00:00
|
|
|
|
2019-04-16 11:56:08 +00:00
|
|
|
// Return the `provides` of the top flake, while assigning to `v` the provides
|
|
|
|
// of the dependencies as well.
|
2019-05-14 09:34:45 +00:00
|
|
|
void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, HandleLockFile handle, Value & v)
|
2019-04-16 11:56:08 +00:00
|
|
|
{
|
2019-05-14 09:34:45 +00:00
|
|
|
callFlake(state, resolveFlake(state, flakeRef, handle), v);
|
2019-02-12 20:55:43 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 15:18:25 +00:00
|
|
|
// This function is exposed to be used in nix files.
|
2019-02-12 20:55:43 +00:00
|
|
|
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
2019-04-16 13:02:02 +00:00
|
|
|
makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos),
|
2019-05-14 09:34:45 +00:00
|
|
|
evalSettings.pureEval ? AllPure : UseUpdatedLockFile, v);
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static RegisterPrimOp r2("getFlake", 1, prim_getFlake);
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
void gitCloneFlake (std::string flakeUri, EvalState & state, Registries registries,
|
|
|
|
Path endDirectory)
|
|
|
|
{
|
|
|
|
FlakeRef flakeRef(flakeUri);
|
|
|
|
flakeRef = lookupFlake(state, flakeRef, registries);
|
|
|
|
|
|
|
|
std::string uri;
|
|
|
|
|
|
|
|
Strings args = {"clone"};
|
|
|
|
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&flakeRef.data)) {
|
|
|
|
uri = "git@github.com:" + refData->owner + "/" + refData->repo + ".git";
|
|
|
|
args.push_back(uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
} else if (auto refData = std::get_if<FlakeRef::IsGit>(&flakeRef.data)) {
|
|
|
|
args.push_back(refData->uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (endDirectory != "")
|
|
|
|
args.push_back(endDirectory);
|
|
|
|
|
|
|
|
runProgram("git", true, args);
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|