2019-02-12 17:23:11 +00:00
|
|
|
#include "flake.hh"
|
2018-11-29 18:18:36 +00:00
|
|
|
#include "primops.hh"
|
|
|
|
#include "eval-inline.hh"
|
|
|
|
#include "fetchGit.hh"
|
|
|
|
#include "download.hh"
|
2019-02-21 05:53:01 +00:00
|
|
|
#include "args.hh"
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-02-21 05:53:01 +00:00
|
|
|
#include <iostream>
|
2018-11-29 18:18:36 +00:00
|
|
|
#include <queue>
|
2018-11-30 15:11:15 +00:00
|
|
|
#include <regex>
|
2019-05-28 18:34:02 +00:00
|
|
|
#include <ctime>
|
|
|
|
#include <iomanip>
|
2018-11-29 18:18:36 +00:00
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
2019-05-29 13:31:07 +00:00
|
|
|
using namespace flake;
|
|
|
|
|
|
|
|
namespace flake {
|
|
|
|
|
2019-04-15 12:08:18 +00:00
|
|
|
/* Read a registry. */
|
2019-03-21 08:30:16 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> readRegistry(const Path & path)
|
2019-02-12 21:43:22 +00:00
|
|
|
{
|
2019-03-21 08:30:16 +00:00
|
|
|
auto registry = std::make_shared<FlakeRegistry>();
|
2019-02-12 21:43:22 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
if (!pathExists(path))
|
|
|
|
return std::make_shared<FlakeRegistry>();
|
2019-03-26 11:48:57 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
2019-03-26 11:48:57 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("flake registry '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto flakes = json["flakes"];
|
2019-04-08 17:03:00 +00:00
|
|
|
for (auto i = flakes.begin(); i != flakes.end(); ++i)
|
|
|
|
registry->entries.emplace(i.key(), FlakeRef(i->value("uri", "")));
|
2019-02-12 21:43:22 +00:00
|
|
|
|
|
|
|
return registry;
|
|
|
|
}
|
|
|
|
|
2019-04-15 12:08:18 +00:00
|
|
|
/* Write a registry to a file. */
|
2019-04-16 12:27:54 +00:00
|
|
|
void writeRegistry(const FlakeRegistry & registry, const Path & path)
|
2019-02-25 12:46:37 +00:00
|
|
|
{
|
2019-03-29 15:18:25 +00:00
|
|
|
nlohmann::json json;
|
2019-06-04 17:10:35 +00:00
|
|
|
json["version"] = 2;
|
2019-04-08 17:03:00 +00:00
|
|
|
for (auto elem : registry.entries)
|
|
|
|
json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} };
|
2019-03-26 11:48:57 +00:00
|
|
|
createDirs(dirOf(path));
|
2019-02-25 12:46:37 +00:00
|
|
|
writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file.
|
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
NonFlakeDep::NonFlakeDep(const nlohmann::json & json)
|
|
|
|
: ref(json["uri"])
|
|
|
|
, narHash(Hash((std::string) json["narHash"]))
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
2019-06-04 17:10:35 +00:00
|
|
|
if (!ref.isImmutable())
|
|
|
|
throw Error("lockfile contains mutable flakeref '%s'", ref);
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
nlohmann::json NonFlakeDep::toJson() const
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
2019-06-04 17:10:35 +00:00
|
|
|
nlohmann::json json;
|
|
|
|
json["uri"] = ref.to_string();
|
|
|
|
json["narHash"] = narHash.to_string(SRI);
|
|
|
|
return json;
|
|
|
|
}
|
2019-03-29 15:18:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
FlakeDep::FlakeDep(const nlohmann::json & json)
|
|
|
|
: FlakeInputs(json)
|
|
|
|
, id(json["id"])
|
|
|
|
, ref(json["uri"])
|
|
|
|
, narHash(Hash((std::string) json["narHash"]))
|
|
|
|
{
|
|
|
|
if (!ref.isImmutable())
|
|
|
|
throw Error("lockfile contains mutable flakeref '%s'", ref);
|
|
|
|
}
|
2019-03-29 15:18:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
nlohmann::json FlakeDep::toJson() const
|
|
|
|
{
|
|
|
|
auto json = FlakeInputs::toJson();
|
|
|
|
json["id"] = id;
|
|
|
|
json["uri"] = ref.to_string();
|
|
|
|
json["narHash"] = narHash.to_string(SRI);
|
|
|
|
return json;
|
|
|
|
}
|
2019-03-29 15:18:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
FlakeInputs::FlakeInputs(const nlohmann::json & json)
|
|
|
|
{
|
2019-05-29 21:09:23 +00:00
|
|
|
auto nonFlakeInputs = json["nonFlakeInputs"];
|
2019-06-04 17:10:35 +00:00
|
|
|
for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i)
|
|
|
|
nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i));
|
2019-03-29 15:18:25 +00:00
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
auto inputs = json["inputs"];
|
|
|
|
for (auto i = inputs.begin(); i != inputs.end(); ++i)
|
2019-06-04 17:10:35 +00:00
|
|
|
flakeDeps.insert_or_assign(i.key(), FlakeDep(*i));
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
nlohmann::json FlakeInputs::toJson() const
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
|
|
|
nlohmann::json json;
|
2019-06-04 17:10:35 +00:00
|
|
|
{
|
|
|
|
auto j = nlohmann::json::object();
|
|
|
|
for (auto & i : nonFlakeDeps)
|
|
|
|
j[i.first] = i.second.toJson();
|
|
|
|
json["nonFlakeInputs"] = std::move(j);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
auto j = nlohmann::json::object();
|
|
|
|
for (auto & i : flakeDeps)
|
|
|
|
j[i.first.to_string()] = i.second.toJson();
|
|
|
|
json["inputs"] = std::move(j);
|
2019-05-01 15:01:03 +00:00
|
|
|
}
|
2019-03-29 15:18:25 +00:00
|
|
|
return json;
|
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
nlohmann::json LockFile::toJson() const
|
|
|
|
{
|
|
|
|
auto json = FlakeInputs::toJson();
|
|
|
|
json["version"] = 2;
|
|
|
|
return json;
|
|
|
|
}
|
|
|
|
|
|
|
|
LockFile readLockFile(const Path & path)
|
|
|
|
{
|
|
|
|
if (pathExists(path)) {
|
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
|
|
|
|
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 2)
|
|
|
|
throw Error("lock file '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
return LockFile(json);
|
|
|
|
} else
|
|
|
|
return LockFile();
|
|
|
|
}
|
|
|
|
|
2019-05-31 18:10:56 +00:00
|
|
|
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
|
2019-03-29 15:18:25 +00:00
|
|
|
{
|
2019-06-04 17:10:35 +00:00
|
|
|
stream << lockFile.toJson().dump(4); // '4' = indentation in json file
|
2019-05-31 18:10:56 +00:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
|
|
|
void writeLockFile(const LockFile & lockFile, const Path & path)
|
|
|
|
{
|
2019-03-29 15:18:25 +00:00
|
|
|
createDirs(dirOf(path));
|
2019-05-31 18:10:56 +00:00
|
|
|
writeFile(path, fmt("%s\n", lockFile));
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
Path getUserRegistryPath()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-04-08 17:03:00 +00:00
|
|
|
return getHome() + "/.config/nix/registry.json";
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> getUserRegistry()
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-04-08 17:03:00 +00:00
|
|
|
return readRegistry(getUserRegistryPath());
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
std::shared_ptr<FlakeRegistry> getFlagRegistry(RegistryOverrides registryOverrides)
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-03-21 08:30:16 +00:00
|
|
|
auto flagRegistry = std::make_shared<FlakeRegistry>();
|
|
|
|
for (auto const & x : registryOverrides) {
|
|
|
|
flagRegistry->entries.insert_or_assign(FlakeRef(x.first), FlakeRef(x.second));
|
|
|
|
}
|
|
|
|
return flagRegistry;
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
2019-04-30 10:47:15 +00:00
|
|
|
std::vector<FlakeRef> pastSearches = {});
|
|
|
|
|
|
|
|
FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Registries & registries, std::vector<FlakeRef> pastSearches)
|
|
|
|
{
|
|
|
|
std::string errorMsg = "found cycle in flake registries: ";
|
|
|
|
for (FlakeRef oldRef : pastSearches) {
|
|
|
|
errorMsg += oldRef.to_string();
|
|
|
|
if (oldRef == newRef)
|
|
|
|
throw Error(errorMsg);
|
|
|
|
errorMsg += " - ";
|
|
|
|
}
|
|
|
|
pastSearches.push_back(newRef);
|
|
|
|
return lookupFlake(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
|
|
|
std::vector<FlakeRef> pastSearches)
|
2019-02-12 17:23:11 +00:00
|
|
|
{
|
2019-04-16 13:02:02 +00:00
|
|
|
if (registries.empty() && !flakeRef.isDirect())
|
2019-04-19 09:43:56 +00:00
|
|
|
throw Error("indirect flake reference '%s' is not allowed", flakeRef);
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
for (std::shared_ptr<FlakeRegistry> registry : registries) {
|
|
|
|
auto i = registry->entries.find(flakeRef);
|
|
|
|
if (i != registry->entries.end()) {
|
|
|
|
auto newRef = i->second;
|
2019-04-30 10:47:15 +00:00
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto j = registry->entries.find(flakeRef.baseRef());
|
|
|
|
if (j != registry->entries.end()) {
|
|
|
|
auto newRef = j->second;
|
|
|
|
newRef.ref = flakeRef.ref;
|
|
|
|
newRef.rev = flakeRef.rev;
|
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
2019-02-12 21:43:22 +00:00
|
|
|
}
|
2019-04-08 17:03:00 +00:00
|
|
|
}
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
if (!flakeRef.isDirect())
|
2019-04-19 09:43:56 +00:00
|
|
|
throw Error("could not resolve flake reference '%s'", flakeRef);
|
2019-04-16 13:02:02 +00:00
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
return flakeRef;
|
2019-02-12 17:23:11 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
// Lookups happen here too
|
|
|
|
static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-29 18:18:36 +00:00
|
|
|
{
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = lookupFlake(state, flakeRef,
|
|
|
|
impureIsAllowed ? state.getFlakeRegistries() : std::vector<std::shared_ptr<FlakeRegistry>>());
|
|
|
|
|
|
|
|
if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable())
|
|
|
|
throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef);
|
2019-04-17 11:54:06 +00:00
|
|
|
|
2019-05-28 18:34:02 +00:00
|
|
|
auto doGit = [&](const GitInfo & gitInfo) {
|
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.ref = gitInfo.ref;
|
|
|
|
ref.rev = gitInfo.rev;
|
|
|
|
SourceInfo info(ref);
|
|
|
|
info.storePath = gitInfo.storePath;
|
|
|
|
info.revCount = gitInfo.revCount;
|
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
|
|
|
info.lastModified = gitInfo.lastModified;
|
|
|
|
return info;
|
|
|
|
};
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
// This only downloads only one revision of the repo, not the entire history.
|
2019-05-01 09:38:48 +00:00
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&resolvedRef.data)) {
|
2018-12-12 12:20:59 +00:00
|
|
|
|
|
|
|
// FIXME: use regular /archive URLs instead? api.github.com
|
|
|
|
// might have stricter rate limits.
|
2019-02-12 17:23:11 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s",
|
|
|
|
refData->owner, refData->repo,
|
2019-05-01 09:38:48 +00:00
|
|
|
resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false)
|
|
|
|
: resolvedRef.ref ? *resolvedRef.ref : "master");
|
2019-02-25 15:20:50 +00:00
|
|
|
|
2019-04-10 10:12:44 +00:00
|
|
|
std::string accessToken = settings.githubAccessToken.get();
|
|
|
|
if (accessToken != "")
|
|
|
|
url += "?access_token=" + accessToken;
|
|
|
|
|
2019-05-22 21:36:29 +00:00
|
|
|
CachedDownloadRequest request(url);
|
|
|
|
request.unpack = true;
|
|
|
|
request.name = "source";
|
|
|
|
request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl;
|
2019-05-28 20:35:41 +00:00
|
|
|
request.getLastModified = true;
|
2019-05-22 21:36:29 +00:00
|
|
|
auto result = getDownloader()->downloadCached(state.store, request);
|
2018-12-12 12:20:59 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
if (!result.etag)
|
|
|
|
throw Error("did not receive an ETag header from '%s'", url);
|
2018-12-12 12:20:59 +00:00
|
|
|
|
2019-02-25 15:20:50 +00:00
|
|
|
if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"')
|
|
|
|
throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url);
|
|
|
|
|
2019-05-07 21:20:42 +00:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1);
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo info(ref);
|
2019-05-15 13:38:24 +00:00
|
|
|
info.storePath = result.storePath;
|
2019-05-28 11:07:15 +00:00
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
2019-05-28 20:35:41 +00:00
|
|
|
info.lastModified = result.lastModified;
|
2019-02-25 15:20:50 +00:00
|
|
|
|
|
|
|
return info;
|
2018-12-12 12:20:59 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 17:03:00 +00:00
|
|
|
// This downloads the entire git history
|
2019-05-01 09:38:48 +00:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsGit>(&resolvedRef.data)) {
|
2019-05-28 18:34:02 +00:00
|
|
|
return doGit(exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"));
|
2019-04-08 20:46:25 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsPath>(&resolvedRef.data)) {
|
2019-04-08 20:46:25 +00:00
|
|
|
if (!pathExists(refData->path + "/.git"))
|
|
|
|
throw Error("flake '%s' does not reference a Git repository", refData->path);
|
2019-05-28 18:34:02 +00:00
|
|
|
return doGit(exportGit(state.store, refData->path, {}, {}, "source"));
|
2018-11-30 15:11:15 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-02-12 17:23:11 +00:00
|
|
|
else abort();
|
2018-11-30 15:11:15 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within `fetchFlake`, which is used here.
|
2019-03-29 15:18:25 +00:00
|
|
|
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-30 15:11:15 +00:00
|
|
|
{
|
2019-05-01 09:38:48 +00:00
|
|
|
SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
|
|
|
|
debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-05-01 08:34:23 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-02-25 15:20:50 +00:00
|
|
|
|
2019-04-16 13:40:58 +00:00
|
|
|
state.store->assertStorePath(sourceInfo.storePath);
|
2018-11-30 15:11:15 +00:00
|
|
|
|
2019-02-12 19:35:03 +00:00
|
|
|
if (state.allowedPaths)
|
2019-05-15 13:38:24 +00:00
|
|
|
state.allowedPaths->insert(state.store->toRealPath(sourceInfo.storePath));
|
2019-02-12 19:35:03 +00:00
|
|
|
|
2019-05-01 18:38:41 +00:00
|
|
|
// Guard against symlink attacks.
|
2019-05-01 09:38:48 +00:00
|
|
|
Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix");
|
2019-05-15 13:38:24 +00:00
|
|
|
Path realFlakeFile = state.store->toRealPath(flakeFile);
|
|
|
|
if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath)))
|
|
|
|
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath);
|
2019-05-01 09:38:48 +00:00
|
|
|
|
|
|
|
Flake flake(flakeRef, sourceInfo);
|
|
|
|
|
2019-05-15 13:38:24 +00:00
|
|
|
if (!pathExists(realFlakeFile))
|
2019-05-01 16:07:36 +00:00
|
|
|
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir);
|
2019-04-19 09:43:56 +00:00
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
Value vInfo;
|
2019-05-15 13:38:24 +00:00
|
|
|
state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack
|
2018-11-29 18:18:36 +00:00
|
|
|
|
|
|
|
state.forceAttrs(vInfo);
|
|
|
|
|
2019-05-29 13:12:22 +00:00
|
|
|
auto sEpoch = state.symbols.create("epoch");
|
|
|
|
|
|
|
|
if (auto epoch = vInfo.attrs->get(sEpoch)) {
|
2019-05-22 12:31:40 +00:00
|
|
|
flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos);
|
2019-06-03 12:47:47 +00:00
|
|
|
if (flake.epoch > 201906)
|
2019-05-22 12:31:40 +00:00
|
|
|
throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch);
|
|
|
|
} else
|
|
|
|
throw Error("flake '%s' lacks attribute 'epoch'", flakeRef);
|
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
if (auto name = vInfo.attrs->get(state.sName))
|
2019-02-12 17:23:11 +00:00
|
|
|
flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos);
|
2018-11-29 18:18:36 +00:00
|
|
|
else
|
2019-05-22 12:31:40 +00:00
|
|
|
throw Error("flake '%s' lacks attribute 'name'", flakeRef);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
|
|
|
if (auto description = vInfo.attrs->get(state.sDescription))
|
|
|
|
flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos);
|
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
auto sInputs = state.symbols.create("inputs");
|
2019-05-29 13:12:22 +00:00
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
if (auto inputs = vInfo.attrs->get(sInputs)) {
|
|
|
|
state.forceList(*(**inputs).value, *(**inputs).pos);
|
|
|
|
for (unsigned int n = 0; n < (**inputs).value->listSize(); ++n)
|
|
|
|
flake.inputs.push_back(FlakeRef(state.forceStringNoCtx(
|
|
|
|
*(**inputs).value->listElems()[n], *(**inputs).pos)));
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
auto sNonFlakeInputs = state.symbols.create("nonFlakeInputs");
|
2019-05-29 13:12:22 +00:00
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
if (std::optional<Attr *> nonFlakeInputs = vInfo.attrs->get(sNonFlakeInputs)) {
|
|
|
|
state.forceAttrs(*(**nonFlakeInputs).value, *(**nonFlakeInputs).pos);
|
|
|
|
for (Attr attr : *(*(**nonFlakeInputs).value).attrs) {
|
2019-03-21 08:30:16 +00:00
|
|
|
std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos);
|
|
|
|
FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri);
|
2019-05-29 21:09:23 +00:00
|
|
|
flake.nonFlakeInputs.insert_or_assign(attr.name, nonFlakeRef);
|
2019-03-21 08:30:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
auto sOutputs = state.symbols.create("outputs");
|
2019-05-29 13:12:22 +00:00
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
if (auto outputs = vInfo.attrs->get(sOutputs)) {
|
|
|
|
state.forceFunction(*(**outputs).value, *(**outputs).pos);
|
|
|
|
flake.vOutputs = (**outputs).value;
|
2018-11-29 18:18:36 +00:00
|
|
|
} else
|
2019-05-29 21:09:23 +00:00
|
|
|
throw Error("flake '%s' lacks attribute 'outputs'", flakeRef);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-05-29 13:12:22 +00:00
|
|
|
for (auto & attr : *vInfo.attrs) {
|
|
|
|
if (attr.name != sEpoch &&
|
|
|
|
attr.name != state.sName &&
|
|
|
|
attr.name != state.sDescription &&
|
2019-05-29 21:09:23 +00:00
|
|
|
attr.name != sInputs &&
|
|
|
|
attr.name != sNonFlakeInputs &&
|
|
|
|
attr.name != sOutputs)
|
2019-05-29 13:12:22 +00:00
|
|
|
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
|
|
|
flakeRef, attr.name, *attr.pos);
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
return flake;
|
|
|
|
}
|
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
// Get the `NonFlake` corresponding to a `FlakeRef`.
|
2019-05-28 08:51:45 +00:00
|
|
|
NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false)
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
2019-05-28 10:58:28 +00:00
|
|
|
auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
|
2019-05-01 09:38:48 +00:00
|
|
|
debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
NonFlake nonFlake(flakeRef, sourceInfo);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-28 10:58:28 +00:00
|
|
|
state.store->assertStorePath(nonFlake.sourceInfo.storePath);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
if (state.allowedPaths)
|
2019-05-28 10:58:28 +00:00
|
|
|
state.allowedPaths->insert(nonFlake.sourceInfo.storePath);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-03-21 08:30:16 +00:00
|
|
|
nonFlake.alias = alias;
|
2019-03-21 08:30:16 +00:00
|
|
|
|
|
|
|
return nonFlake;
|
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool allowedToWrite(HandleLockFile handle)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
2019-05-21 13:03:54 +00:00
|
|
|
return handle == UpdateLockFile || handle == RecreateLockFile;
|
2019-05-14 09:34:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool recreateLockFile(HandleLockFile handle)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
2019-05-21 13:03:54 +00:00
|
|
|
return handle == RecreateLockFile || handle == UseNewLockFile;
|
2019-05-14 09:34:45 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 13:03:54 +00:00
|
|
|
bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
|
2019-05-14 09:34:45 +00:00
|
|
|
{
|
|
|
|
if (handle == AllPure) return false;
|
|
|
|
else if (handle == TopRefUsesRegistries) return isTopRef;
|
|
|
|
else if (handle == UpdateLockFile) return true;
|
|
|
|
else if (handle == UseUpdatedLockFile) return true;
|
|
|
|
else if (handle == RecreateLockFile) return true;
|
|
|
|
else if (handle == UseNewLockFile) return true;
|
|
|
|
else assert(false);
|
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
static std::pair<Flake, FlakeDep> updateLocks(
|
|
|
|
EvalState & state,
|
|
|
|
const FlakeRef & flakeRef,
|
|
|
|
HandleLockFile handleLockFile,
|
|
|
|
const FlakeInputs & oldEntry,
|
|
|
|
bool topRef)
|
2019-05-01 09:38:48 +00:00
|
|
|
{
|
2019-06-04 17:10:35 +00:00
|
|
|
auto flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef));
|
|
|
|
|
|
|
|
FlakeDep newEntry(
|
|
|
|
flake.id,
|
|
|
|
flake.sourceInfo.resolvedRef,
|
|
|
|
flake.sourceInfo.narHash);
|
|
|
|
|
|
|
|
for (auto & input : flake.nonFlakeInputs) {
|
|
|
|
auto & id = input.first;
|
|
|
|
auto & ref = input.second;
|
|
|
|
auto i = oldEntry.nonFlakeDeps.find(id);
|
|
|
|
if (i != oldEntry.nonFlakeDeps.end()) {
|
|
|
|
newEntry.nonFlakeDeps.insert_or_assign(i->first, i->second);
|
2019-05-01 09:38:48 +00:00
|
|
|
} else {
|
2019-05-14 09:34:45 +00:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-06-04 17:10:35 +00:00
|
|
|
throw Error("cannot update non-flake dependency '%s' in pure mode", id);
|
|
|
|
auto nonFlake = getNonFlake(state, ref, id, allowedToUseRegistries(handleLockFile, false));
|
|
|
|
newEntry.nonFlakeDeps.insert_or_assign(id,
|
|
|
|
NonFlakeDep(
|
|
|
|
nonFlake.sourceInfo.resolvedRef,
|
|
|
|
nonFlake.sourceInfo.narHash));
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-03-21 08:30:16 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
for (auto & inputRef : flake.inputs) {
|
|
|
|
auto i = oldEntry.flakeDeps.find(inputRef);
|
|
|
|
if (i != oldEntry.flakeDeps.end()) {
|
|
|
|
newEntry.flakeDeps.insert_or_assign(inputRef, i->second);
|
2019-05-01 09:38:48 +00:00
|
|
|
} else {
|
2019-05-14 09:34:45 +00:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-06-04 17:10:35 +00:00
|
|
|
throw Error("cannot update flake dependency '%s' in pure mode", inputRef);
|
|
|
|
newEntry.flakeDeps.insert_or_assign(inputRef,
|
|
|
|
updateLocks(state, inputRef, handleLockFile, {}, false).second);
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-04-16 14:18:47 +00:00
|
|
|
}
|
2019-02-12 20:05:44 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
return {flake, newEntry};
|
2019-03-29 15:18:25 +00:00
|
|
|
}
|
2019-02-12 20:55:43 +00:00
|
|
|
|
2019-05-01 09:38:48 +00:00
|
|
|
/* Given a flake reference, recursively fetch it and its dependencies.
|
|
|
|
FIXME: this should return a graph of flakes.
|
|
|
|
*/
|
2019-05-14 09:34:45 +00:00
|
|
|
ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile)
|
2019-05-01 09:38:48 +00:00
|
|
|
{
|
2019-06-04 17:10:35 +00:00
|
|
|
auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true));
|
|
|
|
|
2019-05-21 12:55:43 +00:00
|
|
|
LockFile oldLockFile;
|
2019-05-01 09:38:48 +00:00
|
|
|
|
2019-05-31 18:12:59 +00:00
|
|
|
if (!recreateLockFile(handleLockFile)) {
|
2019-05-01 09:38:48 +00:00
|
|
|
// If recreateLockFile, start with an empty lockfile
|
2019-05-31 18:12:59 +00:00
|
|
|
// FIXME: symlink attack
|
|
|
|
oldLockFile = readLockFile(
|
|
|
|
state.store->toRealPath(flake.sourceInfo.storePath)
|
|
|
|
+ "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock");
|
2019-05-01 09:38:48 +00:00
|
|
|
}
|
2019-05-01 09:38:48 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
// FIXME: get rid of duplicate getFlake call
|
|
|
|
LockFile lockFile(updateLocks(
|
|
|
|
state, topRef, handleLockFile, oldLockFile, true).second);
|
2019-02-12 21:43:22 +00:00
|
|
|
|
2019-05-21 12:55:43 +00:00
|
|
|
if (!(lockFile == oldLockFile)) {
|
|
|
|
if (allowedToWrite(handleLockFile)) {
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsPath>(&topRef.data)) {
|
|
|
|
writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
|
|
|
|
|
|
|
|
// Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store.
|
|
|
|
runProgram("git", true, { "-C", refData->path, "add",
|
|
|
|
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" });
|
2019-05-21 13:03:54 +00:00
|
|
|
} else
|
|
|
|
warn("cannot write lockfile of remote flake '%s'", topRef);
|
2019-05-21 12:55:43 +00:00
|
|
|
} else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries)
|
2019-05-21 13:03:54 +00:00
|
|
|
warn("using updated lockfile without writing it to file");
|
2019-05-21 12:55:43 +00:00
|
|
|
}
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
return ResolvedFlake(std::move(flake), std::move(lockFile));
|
2019-02-21 05:53:01 +00:00
|
|
|
}
|
|
|
|
|
2019-05-16 20:48:16 +00:00
|
|
|
void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile)
|
2019-02-21 05:53:01 +00:00
|
|
|
{
|
2019-05-14 09:34:45 +00:00
|
|
|
resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile);
|
2019-02-21 05:53:01 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 12:01:08 +00:00
|
|
|
static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs)
|
|
|
|
{
|
|
|
|
auto & path = sourceInfo.storePath;
|
2019-06-04 17:10:35 +00:00
|
|
|
assert(state.store->isValidPath(path));
|
|
|
|
// FIXME: turn into fetchGit etc.
|
|
|
|
// FIXME: check narHash.
|
2019-05-28 12:01:08 +00:00
|
|
|
mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path});
|
|
|
|
|
|
|
|
if (sourceInfo.resolvedRef.rev) {
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("rev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitRev());
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("shortRev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitShortRev());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sourceInfo.revCount)
|
|
|
|
mkInt(*state.allocAttr(vAttrs, state.symbols.create("revCount")), *sourceInfo.revCount);
|
2019-05-28 18:34:02 +00:00
|
|
|
|
|
|
|
if (sourceInfo.lastModified)
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("lastModified")),
|
|
|
|
fmt("%s",
|
|
|
|
std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S")));
|
2019-05-28 12:01:08 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
/* Helper primop to make callFlake (below) fetch/call its inputs
|
|
|
|
lazily. Note that this primop cannot be called by user code since
|
|
|
|
it doesn't appear in 'builtins'. */
|
|
|
|
static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
|
|
|
auto lazyFlake = (FlakeDep *) args[0]->attrs;
|
|
|
|
auto flake = getFlake(state, lazyFlake->ref, false);
|
|
|
|
callFlake(state, flake, *lazyFlake, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
void callFlake(EvalState & state,
|
|
|
|
const Flake & flake,
|
|
|
|
const FlakeInputs & inputs,
|
|
|
|
Value & v)
|
2018-11-29 18:18:36 +00:00
|
|
|
{
|
2019-05-29 21:09:23 +00:00
|
|
|
// Construct the resulting attrset '{description, outputs,
|
|
|
|
// ...}'. This attrset is passed lazily as an argument to 'outputs'.
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
state.mkAttrs(v,
|
|
|
|
inputs.flakeDeps.size() +
|
|
|
|
inputs.nonFlakeDeps.size() + 8);
|
|
|
|
|
|
|
|
for (auto & dep : inputs.flakeDeps) {
|
|
|
|
auto vFlake = state.allocAttr(v, dep.second.id);
|
|
|
|
auto vPrimOp = state.allocValue();
|
|
|
|
static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake"));
|
|
|
|
vPrimOp->type = tPrimOp;
|
|
|
|
vPrimOp->primOp = primOp;
|
|
|
|
auto vArg = state.allocValue();
|
|
|
|
vArg->type = tNull;
|
|
|
|
// FIXME: leak
|
|
|
|
vArg->attrs = (Bindings *) new FlakeDep(dep.second); // evil! also inefficient
|
|
|
|
mkApp(*vFlake, *vPrimOp, *vArg);
|
2019-04-16 11:56:08 +00:00
|
|
|
}
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
for (auto & dep : inputs.nonFlakeDeps) {
|
|
|
|
auto vNonFlake = state.allocAttr(v, dep.first);
|
2019-05-28 12:01:08 +00:00
|
|
|
state.mkAttrs(*vNonFlake, 8);
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
auto nonFlake = getNonFlake(state, dep.second.ref, dep.first);
|
|
|
|
|
|
|
|
assert(state.store->isValidPath(nonFlake.sourceInfo.storePath));
|
|
|
|
|
2019-05-28 10:58:28 +00:00
|
|
|
mkString(*state.allocAttr(*vNonFlake, state.sOutPath),
|
|
|
|
nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath});
|
2019-05-08 11:38:32 +00:00
|
|
|
|
2019-05-28 12:01:08 +00:00
|
|
|
emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake);
|
2019-04-16 11:56:08 +00:00
|
|
|
}
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
mkString(*state.allocAttr(v, state.sDescription), flake.description);
|
2019-04-08 20:46:25 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
emitSourceInfoAttrs(state, flake.sourceInfo, v);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-05-29 21:09:23 +00:00
|
|
|
auto vOutputs = state.allocAttr(v, state.symbols.create("outputs"));
|
2019-06-04 17:10:35 +00:00
|
|
|
mkApp(*vOutputs, *flake.vOutputs, v);
|
2018-11-29 18:18:36 +00:00
|
|
|
|
2019-04-16 14:29:44 +00:00
|
|
|
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
|
|
|
|
|
2019-04-16 11:56:08 +00:00
|
|
|
v.attrs->sort();
|
|
|
|
}
|
2019-02-12 20:55:43 +00:00
|
|
|
|
2019-06-04 17:10:35 +00:00
|
|
|
void callFlake(EvalState & state,
|
|
|
|
const ResolvedFlake & resFlake,
|
|
|
|
Value & v)
|
|
|
|
{
|
|
|
|
callFlake(state, resFlake.flake, resFlake.lockFile, v);
|
|
|
|
}
|
|
|
|
|
2019-03-29 15:18:25 +00:00
|
|
|
// This function is exposed to be used in nix files.
|
2019-02-12 20:55:43 +00:00
|
|
|
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
2019-05-29 13:44:48 +00:00
|
|
|
callFlake(state, resolveFlake(state, state.forceStringNoCtx(*args[0], pos),
|
|
|
|
evalSettings.pureEval ? AllPure : UseUpdatedLockFile), v);
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static RegisterPrimOp r2("getFlake", 1, prim_getFlake);
|
|
|
|
|
2019-05-16 20:48:16 +00:00
|
|
|
void gitCloneFlake(FlakeRef flakeRef, EvalState & state, Registries registries, const Path & destDir)
|
2019-03-21 08:30:16 +00:00
|
|
|
{
|
|
|
|
flakeRef = lookupFlake(state, flakeRef, registries);
|
|
|
|
|
|
|
|
std::string uri;
|
|
|
|
|
|
|
|
Strings args = {"clone"};
|
|
|
|
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&flakeRef.data)) {
|
|
|
|
uri = "git@github.com:" + refData->owner + "/" + refData->repo + ".git";
|
|
|
|
args.push_back(uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
} else if (auto refData = std::get_if<FlakeRef::IsGit>(&flakeRef.data)) {
|
|
|
|
args.push_back(refData->uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-16 20:48:16 +00:00
|
|
|
if (destDir != "")
|
|
|
|
args.push_back(destDir);
|
2019-03-21 08:30:16 +00:00
|
|
|
|
|
|
|
runProgram("git", true, args);
|
|
|
|
}
|
|
|
|
|
2018-11-29 18:18:36 +00:00
|
|
|
}
|
2019-05-29 13:31:07 +00:00
|
|
|
|
|
|
|
std::shared_ptr<flake::FlakeRegistry> EvalState::getGlobalFlakeRegistry()
|
|
|
|
{
|
|
|
|
std::call_once(_globalFlakeRegistryInit, [&]() {
|
|
|
|
auto path = evalSettings.flakeRegistry;
|
|
|
|
|
|
|
|
if (!hasPrefix(path, "/")) {
|
|
|
|
CachedDownloadRequest request(evalSettings.flakeRegistry);
|
|
|
|
request.name = "flake-registry.json";
|
|
|
|
request.gcRoot = true;
|
|
|
|
path = getDownloader()->downloadCached(store, request).path;
|
|
|
|
}
|
|
|
|
|
|
|
|
_globalFlakeRegistry = readRegistry(path);
|
|
|
|
});
|
|
|
|
|
|
|
|
return _globalFlakeRegistry;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This always returns a vector with flakeReg, userReg, globalReg.
|
|
|
|
// If one of them doesn't exist, the registry is left empty but does exist.
|
|
|
|
const Registries EvalState::getFlakeRegistries()
|
|
|
|
{
|
|
|
|
Registries registries;
|
|
|
|
registries.push_back(getFlagRegistry(registryOverrides));
|
|
|
|
registries.push_back(getUserRegistry());
|
|
|
|
registries.push_back(getGlobalFlakeRegistry());
|
|
|
|
return registries;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|