Change the lock file to a graph

This enables support for cycles between flakes.
This commit is contained in:
Eelco Dolstra 2020-03-12 22:06:57 +01:00
parent e188fe7c6d
commit ae9119167e
7 changed files with 303 additions and 253 deletions

View file

@ -1,22 +1,27 @@
locks: rootSrc: rootSubdir:
lockFileStr: rootSrc: rootSubdir:
let
callFlake = sourceInfo: subdir: locks:
lockFile = builtins.fromJSON lockFileStr;
allNodes =
builtins.mapAttrs
(key: node:
let
flake = import (sourceInfo + "/" + subdir + "/flake.nix");
inputs = builtins.mapAttrs (n: v:
if v.flake or true
then callFlake (fetchTree (removeAttrs v.locked ["dir"])) (v.locked.dir or "") v.inputs
else fetchTree v.locked) locks;
sourceInfo = if key == lockFile.root then rootSrc else fetchTree (removeAttrs node.locked ["dir"]);
subdir = if key == lockFile.root then rootSubdir else node.locked.dir or "";
flake = import (sourceInfo + (if subdir != "" then "/" else "") + subdir + "/flake.nix");
inputs = builtins.mapAttrs (inputName: key: allNodes.${key}) (node.inputs or {});
outputs = flake.outputs (inputs // { self = result; });
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
in
assert flake.edition == 201909;
if node.flake or true then
assert flake.edition or flake.epoch or 0 == 201909;
assert builtins.isFunction flake.outputs;
result
else
sourceInfo
)
lockFile.nodes;
result;
in callFlake rootSrc rootSubdir (builtins.fromJSON locks).inputs
in allNodes.${lockFile.root}

View file

@ -293,52 +293,6 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup
return getFlake(state, originalRef, {}, allowLookup, flakeCache);
}
static void flattenLockFile(
const LockedInputs & inputs,
const InputPath & prefix,
std::map<InputPath, const LockedInput *> & res)
{
for (auto &[id, input] : inputs.inputs) {
auto inputPath(prefix);
inputPath.push_back(id);
res.emplace(inputPath, &input);
flattenLockFile(input, inputPath, res);
}
}
static std::string diffLockFiles(const LockedInputs & oldLocks, const LockedInputs & newLocks)
{
std::map<InputPath, const LockedInput *> oldFlat, newFlat;
flattenLockFile(oldLocks, {}, oldFlat);
flattenLockFile(newLocks, {}, newFlat);
auto i = oldFlat.begin();
auto j = newFlat.begin();
std::string res;
while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
++i;
} else {
if (!(i->second->lockedRef == j->second->lockedRef)) {
assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
res += fmt("* Updated '%s': '%s' -> '%s'\n",
concatStringsSep("/", i->first),
i->second->lockedRef,
j->second->lockedRef);
}
++i;
++j;
}
}
return res;
}
/* Compute an in-memory lock file for the specified top-level flake,
and optionally write it to file, it the flake is writable. */
LockedFlake lockFlake(
@ -380,8 +334,8 @@ LockedFlake lockFlake(
/* Recurse into the flake inputs. */
std::function<void(
const FlakeInputs & flakeInputs,
const LockedInputs & oldLocks,
LockedInputs & newLocks,
std::shared_ptr<const Node> oldLocks,
std::shared_ptr<Node> newLocks,
const InputPath & inputPathPrefix)>
updateLocks;
@ -389,8 +343,8 @@ LockedFlake lockFlake(
updateLocks = [&](
const FlakeInputs & flakeInputs,
const LockedInputs & oldLocks,
LockedInputs & newLocks,
std::shared_ptr<const Node> oldLocks,
std::shared_ptr<Node> newLocks,
const InputPath & inputPathPrefix)
{
/* Get the overrides (i.e. attributes of the form
@ -428,14 +382,16 @@ LockedFlake lockFlake(
input of the dwarffs input of the root flake),
but if it's from an override, it's relative to
the *root* of the lock file. */
auto follows = (hasOverride ? newLockFile : newLocks).findInput(*input.follows);
auto follows = (hasOverride ? newLockFile.root : newLocks)->findInput(*input.follows);
if (follows)
newLocks.inputs.insert_or_assign(id, **follows);
newLocks->inputs.insert_or_assign(id, follows);
else
/* We haven't processed the source of the
"follows" yet (e.g. "dwarffs/nixpkgs"). So
we'll need another round of the fixpoint
iteration. */
// FIXME: now that LockFile is a graph, we
// could pre-create the missing node.
unresolved.push_back(inputPath);
continue;
}
@ -443,16 +399,25 @@ LockedFlake lockFlake(
/* Do we have an entry in the existing lock file? And
we don't have a --update-input flag for this
input? */
auto oldLock =
auto oldLockIt =
lockFlags.inputUpdates.count(inputPath)
? oldLocks.inputs.end()
: oldLocks.inputs.find(id);
? oldLocks->inputs.end()
: oldLocks->inputs.find(id);
if (oldLock != oldLocks.inputs.end() && oldLock->second.originalRef == input.ref && !hasOverride) {
std::shared_ptr<const LockedNode> oldLock;
if (oldLockIt != oldLocks->inputs.end()) {
oldLock = std::dynamic_pointer_cast<const LockedNode>(oldLockIt->second);
assert(oldLock);
}
if (oldLock
&& oldLock->originalRef == input.ref
&& !hasOverride)
{
/* Copy the input from the old lock file if its
flakeref didn't change and there is no override
from a higher level flake. */
newLocks.inputs.insert_or_assign(id, oldLock->second);
newLocks->inputs.insert_or_assign(id, std::make_shared<LockedNode>(*oldLock));
/* If we have an --update-input flag for an input
of this input, then we must fetch the flake to
@ -466,11 +431,11 @@ LockedFlake lockFlake(
if (hasChildUpdate) {
auto inputFlake = getFlake(
state, oldLock->second.lockedRef, oldLock->second.info, false, flakeCache);
state, oldLock->lockedRef, oldLock->info, false, flakeCache);
updateLocks(inputFlake.inputs,
(const LockedInputs &) oldLock->second,
newLocks.inputs.find(id)->second,
oldLock,
newLocks->inputs.find(id)->second,
inputPath);
} else {
@ -480,12 +445,14 @@ LockedFlake lockFlake(
check those. */
FlakeInputs fakeInputs;
for (auto & i : oldLock->second.inputs)
fakeInputs.emplace(i.first, FlakeInput { .ref = i.second.originalRef });
for (auto & i : oldLock->inputs)
fakeInputs.emplace(i.first, FlakeInput {
.ref = std::dynamic_pointer_cast<LockedNode>(i.second)->originalRef
});
updateLocks(fakeInputs,
oldLock->second,
newLocks.inputs.find(id)->second,
oldLock,
newLocks->inputs.find(id)->second,
inputPath);
}
@ -499,8 +466,8 @@ LockedFlake lockFlake(
if (input.isFlake) {
auto inputFlake = getFlake(state, input.ref, {}, lockFlags.useRegistries, flakeCache);
newLocks.inputs.insert_or_assign(id,
LockedInput(inputFlake.lockedRef, inputFlake.originalRef, inputFlake.sourceInfo->info));
newLocks->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(inputFlake.lockedRef, inputFlake.originalRef, inputFlake.sourceInfo->info));
/* Recursively process the inputs of this
flake. Also, unless we already have this
@ -515,25 +482,25 @@ LockedFlake lockFlake(
Finally cleanup([&]() { parents.pop_back(); });
updateLocks(inputFlake.inputs,
oldLock != oldLocks.inputs.end()
? (const LockedInputs &) oldLock->second
oldLock
? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock"),
newLocks.inputs.find(id)->second,
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
newLocks->inputs.find(id)->second,
inputPath);
}
else {
auto [sourceInfo, lockedRef] = fetchOrSubstituteTree(
state, input.ref, {}, lockFlags.useRegistries, flakeCache);
newLocks.inputs.insert_or_assign(id,
LockedInput(lockedRef, input.ref, sourceInfo.info, false));
newLocks->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, input.ref, sourceInfo.info, false));
}
}
}
};
updateLocks(flake.inputs, oldLockFile, newLockFile, {});
updateLocks(flake.inputs, oldLockFile.root, newLockFile.root, {});
/* Check if there is a cycle in the "follows" inputs. */
if (!unresolved.empty() && unresolved == prevUnresolved) {
@ -619,8 +586,7 @@ LockedFlake lockFlake(
}
void callFlake(EvalState & state,
const Flake & flake,
const LockedInputs & lockedInputs,
const LockedFlake & lockedFlake,
Value & vRes)
{
auto vLocks = state.allocValue();
@ -629,11 +595,11 @@ void callFlake(EvalState & state,
auto vTmp1 = state.allocValue();
auto vTmp2 = state.allocValue();
mkString(*vLocks, lockedInputs.to_string());
mkString(*vLocks, lockedFlake.lockFile.to_string());
emitTreeAttrs(state, *flake.sourceInfo, flake.lockedRef.input, *vRootSrc);
emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc);
mkString(*vRootSubdir, flake.lockedRef.subdir);
mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
static Value * vCallFlake = nullptr;
@ -649,13 +615,6 @@ void callFlake(EvalState & state,
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
}
void callFlake(EvalState & state,
const LockedFlake & lockedFlake,
Value & v)
{
callFlake(state, lockedFlake.flake, lockedFlake.lockFile, v);
}
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
callFlake(state,

View file

@ -96,13 +96,7 @@ LockedFlake lockFlake(
void callFlake(
EvalState & state,
const Flake & flake,
const LockedInputs & inputs,
Value & v);
void callFlake(
EvalState & state,
const LockedFlake & resFlake,
const LockedFlake & lockedFlake,
Value & v);
}

View file

@ -67,9 +67,8 @@ static TreeInfo parseTreeInfo(const nlohmann::json & json)
throw Error("attribute 'info' missing in lock file");
}
LockedInput::LockedInput(const nlohmann::json & json)
: LockedInputs(json)
, lockedRef(getFlakeRef(json, "url", "uri", "locked"))
LockedNode::LockedNode(const nlohmann::json & json)
: lockedRef(getFlakeRef(json, "url", "uri", "locked"))
, originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original"))
, info(parseTreeInfo(json))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
@ -90,103 +89,139 @@ static nlohmann::json treeInfoToJson(const TreeInfo & info)
return json;
}
nlohmann::json LockedInput::toJson() const
{
auto json = LockedInputs::toJson();
json["original"] = fetchers::attrsToJson(originalRef.toAttrs());
json["locked"] = fetchers::attrsToJson(lockedRef.toAttrs());
json["info"] = treeInfoToJson(info);
if (!isFlake) json["flake"] = false;
return json;
}
StorePath LockedInput::computeStorePath(Store & store) const
StorePath LockedNode::computeStorePath(Store & store) const
{
return info.computeStorePath(store);
}
LockedInputs::LockedInputs(const nlohmann::json & json)
{
for (auto & i : json["inputs"].items())
inputs.insert_or_assign(i.key(), LockedInput(i.value()));
}
nlohmann::json LockedInputs::toJson() const
{
nlohmann::json json;
{
auto j = nlohmann::json::object();
for (auto & i : inputs)
j[i.first] = i.second.toJson();
json["inputs"] = std::move(j);
}
return json;
}
std::string LockedInputs::to_string() const
{
return toJson().dump(2);
}
bool LockedInputs::isImmutable() const
{
for (auto & i : inputs)
if (!i.second.lockedRef.input->isImmutable() || !i.second.isImmutable()) return false;
return true;
}
std::optional<LockedInput *> LockedInputs::findInput(const InputPath & path)
std::shared_ptr<Node> Node::findInput(const InputPath & path)
{
assert(!path.empty());
LockedInputs * pos = this;
auto pos = shared_from_this();
for (auto & elem : path) {
auto i = pos->inputs.find(elem);
if (i == pos->inputs.end())
return {};
pos = &i->second;
pos = i->second;
}
return (LockedInput *) pos;
return pos;
}
void LockedInputs::removeInput(const InputPath & path)
LockFile::LockFile(const nlohmann::json & json, const Path & path)
{
assert(!path.empty());
auto version = json.value("version", 0);
if (version < 3 || version > 5)
throw Error("lock file '%s' has unsupported version %d", path, version);
LockedInputs * pos = this;
if (version < 5) {
std::function<void(Node & node, const nlohmann::json & json)> getInputs;
for (size_t n = 0; n < path.size(); n++) {
auto i = pos->inputs.find(path[n]);
if (i == pos->inputs.end()) return;
if (n + 1 == path.size())
pos->inputs.erase(i);
else
pos = &i->second;
getInputs = [&](Node & node, const nlohmann::json & json)
{
for (auto & i : json["inputs"].items()) {
auto input = std::make_shared<LockedNode>(i.value());
getInputs(*input, i.value());
node.inputs.insert_or_assign(i.key(), input);
}
};
getInputs(*root, json);
}
else {
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
getInputs = [&](Node & node, const nlohmann::json & jsonNode)
{
if (jsonNode.find("inputs") == jsonNode.end()) return;
for (auto & i : jsonNode["inputs"].items()) {
std::string inputKey = i.value();
auto k = nodeMap.find(inputKey);
if (k == nodeMap.end()) {
auto jsonNode2 = json["nodes"][inputKey];
auto input = std::make_shared<LockedNode>(jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, jsonNode2);
}
node.inputs.insert_or_assign(i.key(), k->second);
}
};
std::string rootKey = json["root"];
nodeMap.insert_or_assign(rootKey, root);
getInputs(*root, json["nodes"][rootKey]);
}
}
nlohmann::json LockFile::toJson() const
{
auto json = LockedInputs::toJson();
json["version"] = 4;
nlohmann::json nodes;
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
std::unordered_set<std::string> keys;
std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
{
auto k = nodeKeys.find(node);
if (k != nodeKeys.end())
return k->second;
if (!keys.insert(key).second) {
for (int n = 2; ; ++n) {
auto k = fmt("%s_%d", key, n);
if (keys.insert(k).second) {
key = k;
break;
}
}
}
nodeKeys.insert_or_assign(node, key);
auto n = nlohmann::json::object();
if (!node->inputs.empty()) {
auto inputs = nlohmann::json::object();
for (auto & i : node->inputs)
inputs[i.first] = dumpNode(i.first, i.second);
n["inputs"] = std::move(inputs);
}
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs());
n["info"] = treeInfoToJson(lockedNode->info);
if (!lockedNode->isFlake) n["flake"] = false;
}
nodes[key] = std::move(n);
return key;
};
nlohmann::json json;
json["version"] = 5;
json["root"] = dumpNode("root", root);
json["nodes"] = std::move(nodes);
return json;
}
std::string LockFile::to_string() const
{
return toJson().dump(2);
}
LockFile LockFile::read(const Path & path)
{
if (pathExists(path)) {
auto json = nlohmann::json::parse(readFile(path));
auto version = json.value("version", 0);
if (version != 3 && version != 4)
throw Error("lock file '%s' has unsupported version %d", path, version);
return LockFile(json);
} else
return LockFile();
if (!pathExists(path)) return LockFile();
return LockFile(nlohmann::json::parse(readFile(path)), path);
}
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
@ -201,6 +236,35 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this));
}
bool LockFile::isImmutable() const
{
std::unordered_set<std::shared_ptr<const Node>> nodes;
std::function<void(std::shared_ptr<const Node> node)> visit;
visit = [&](std::shared_ptr<const Node> node)
{
if (!nodes.insert(node).second) return;
for (auto & i : node->inputs) visit(i.second);
};
visit(root);
for (auto & i : nodes) {
if (i == root) continue;
auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
if (lockedNode && !lockedNode->lockedRef.input->isImmutable()) return false;
}
return true;
}
bool LockFile::operator ==(const LockFile & other) const
{
// FIXME: slow
return toJson() == other.toJson();
}
InputPath parseInputPath(std::string_view s)
{
InputPath path;
@ -217,4 +281,52 @@ InputPath parseInputPath(std::string_view s)
return path;
}
static void flattenLockFile(
std::shared_ptr<const Node> node,
const InputPath & prefix,
std::map<InputPath, std::shared_ptr<const LockedNode>> & res)
{
// FIXME: handle cycles
for (auto &[id, input] : node->inputs) {
auto inputPath(prefix);
inputPath.push_back(id);
if (auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input))
res.emplace(inputPath, lockedInput);
flattenLockFile(input, inputPath, res);
}
}
std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks)
{
std::map<InputPath, std::shared_ptr<const LockedNode>> oldFlat, newFlat;
flattenLockFile(oldLocks.root, {}, oldFlat);
flattenLockFile(newLocks.root, {}, newFlat);
auto i = oldFlat.begin();
auto j = newFlat.begin();
std::string res;
while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
++i;
} else {
if (!(i->second->lockedRef == j->second->lockedRef)) {
assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
res += fmt("* Updated '%s': '%s' -> '%s'\n",
concatStringsSep("/", i->first),
i->second->lockedRef,
j->second->lockedRef);
}
++i;
++j;
}
}
return res;
}
}

View file

@ -15,35 +15,26 @@ using namespace fetchers;
typedef std::vector<FlakeId> InputPath;
struct LockedInput;
/* Lock file information about the dependencies of a flake. */
struct LockedInputs
/* A node in the lock file. It has outgoing edges to other nodes (its
inputs). Only the root node has this type; all other nodes have
type LockedNode. */
struct Node : std::enable_shared_from_this<Node>
{
std::map<FlakeId, LockedInput> inputs;
std::map<FlakeId, std::shared_ptr<Node>> inputs;
LockedInputs() {};
LockedInputs(const nlohmann::json & json);
virtual ~Node() { }
nlohmann::json toJson() const;
std::string to_string() const;
bool isImmutable() const;
std::optional<LockedInput *> findInput(const InputPath & path);
void removeInput(const InputPath & path);
std::shared_ptr<Node> findInput(const InputPath & path);
};
/* Lock file information about a flake input. */
struct LockedInput : LockedInputs
/* A non-root node in the lock file. */
struct LockedNode : Node
{
FlakeRef lockedRef, originalRef;
TreeInfo info;
bool isFlake = true;
LockedInput(
LockedNode(
const FlakeRef & lockedRef,
const FlakeRef & originalRef,
const TreeInfo & info,
@ -51,51 +42,36 @@ struct LockedInput : LockedInputs
: lockedRef(lockedRef), originalRef(originalRef), info(info), isFlake(isFlake)
{ }
LockedInput(const nlohmann::json & json);
bool operator ==(const LockedInput & other) const
{
return
lockedRef == other.lockedRef
&& originalRef == other.originalRef
&& info == other.info
&& inputs == other.inputs
&& isFlake == other.isFlake;
}
nlohmann::json toJson() const;
LockedNode(const nlohmann::json & json);
StorePath computeStorePath(Store & store) const;
};
/* An entire lock file. Note that this cannot be a FlakeInput for the
top-level flake, because then the lock file would need to contain
the hash of the top-level flake, but committing the lock file
would invalidate that hash. */
struct LockFile : LockedInputs
struct LockFile
{
bool operator ==(const LockFile & other) const
{
return inputs == other.inputs;
}
std::shared_ptr<Node> root = std::make_shared<Node>();
LockFile() {}
LockFile(const nlohmann::json & json) : LockedInputs(json) {}
LockFile(LockedInput && dep)
{
inputs = std::move(dep.inputs);
}
LockFile() {};
LockFile(const nlohmann::json & json, const Path & path);
nlohmann::json toJson() const;
std::string to_string() const;
static LockFile read(const Path & path);
void write(const Path & path) const;
bool isImmutable() const;
bool operator ==(const LockFile & other) const;
};
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile);
InputPath parseInputPath(std::string_view s);
std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks);
}

View file

@ -199,24 +199,25 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
stopProgressBar();
if (json)
std::cout << ((LockedInputs &) flake.lockFile).toJson() << "\n";
std::cout << flake.lockFile.toJson() << "\n";
else {
std::cout << fmt("%s\n", flake.flake.lockedRef);
std::function<void(const LockedInputs & inputs, const std::string & prefix)> recurse;
std::function<void(const Node & node, const std::string & prefix)> recurse;
recurse = [&](const LockedInputs & inputs, const std::string & prefix)
recurse = [&](const Node & node, const std::string & prefix)
{
for (const auto & [i, input] : enumerate(inputs.inputs)) {
for (const auto & [i, input] : enumerate(node.inputs)) {
//auto tree2 = tree.child(i + 1 == inputs.inputs.size());
bool last = i + 1 == inputs.inputs.size();
bool last = i + 1 == node.inputs.size();
std::cout << fmt("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s\n",
prefix + (last ? treeLast : treeConn), input.first, input.second.lockedRef);
recurse(input.second, prefix + (last ? treeNull : treeLine));
prefix + (last ? treeLast : treeConn), input.first,
std::dynamic_pointer_cast<const LockedNode>(input.second)->lockedRef);
recurse(*input.second, prefix + (last ? treeNull : treeLine));
}
};
recurse(flake.lockFile, "");
recurse(*flake.lockFile.root, "");
}
}
};
@ -664,23 +665,26 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
if (jsonRoot)
jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
std::function<void(const LockedInputs & inputs, std::optional<JSONObject> & jsonObj)> traverse;
traverse = [&](const LockedInputs & inputs, std::optional<JSONObject> & jsonObj)
// FIXME: use graph output, handle cycles.
std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse;
traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj)
{
auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>();
for (auto & input : inputs.inputs) {
for (auto & input : node.inputs) {
auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input.second);
assert(lockedInput);
auto jsonObj3 = jsonObj2 ? jsonObj2->object(input.first) : std::optional<JSONObject>();
if (!dryRun)
input.second.lockedRef.input->fetchTree(store);
auto storePath = input.second.computeStorePath(*store);
lockedInput->lockedRef.input->fetchTree(store);
auto storePath = lockedInput->computeStorePath(*store);
if (jsonObj3)
jsonObj3->attr("path", store->printStorePath(storePath));
sources.insert(std::move(storePath));
traverse(input.second, jsonObj3);
traverse(*lockedInput, jsonObj3);
}
};
traverse(flake.lockFile, jsonRoot);
traverse(*flake.lockFile.root, jsonRoot);
if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);

View file

@ -517,7 +517,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF
nix flake update $flake3Dir
[[ $(jq .inputs.foo.locked $flake3Dir/flake.lock) = $(jq .inputs.bar.locked $flake3Dir/flake.lock) ]]
[[ $(jq .nodes.foo.locked $flake3Dir/flake.lock) = $(jq .nodes.bar.locked $flake3Dir/flake.lock) ]]
cat > $flake3Dir/flake.nix <<EOF
{
@ -531,7 +531,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF
nix flake update $flake3Dir
[[ $(jq .inputs.bar.locked.url $flake3Dir/flake.lock) =~ flake1 ]]
[[ $(jq .nodes.bar.locked.url $flake3Dir/flake.lock) =~ flake1 ]]
cat > $flake3Dir/flake.nix <<EOF
{
@ -545,7 +545,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF
nix flake update $flake3Dir
[[ $(jq .inputs.bar.locked.url $flake3Dir/flake.lock) =~ flake2 ]]
[[ $(jq .nodes.bar.locked.url $flake3Dir/flake.lock) =~ flake2 ]]
# Test overriding inputs of inputs.
cat > $flake3Dir/flake.nix <<EOF
@ -563,7 +563,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF
nix flake update $flake3Dir
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
[[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
cat > $flake3Dir/flake.nix <<EOF
{
@ -578,7 +578,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF
nix flake update $flake3Dir --recreate-lock-file
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
[[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
# Test Mercurial flakes.
rm -rf $flake5Dir
@ -636,21 +636,21 @@ nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ
# Test --override-input.
git -C $flake3Dir reset --hard
nix flake update $flake3Dir --override-input flake2/flake1 flake5
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
# Test --update-input.
nix flake update $flake3Dir
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
nix flake update $flake3Dir --update-input flake2/flake1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
# Test 'nix flake list-inputs'.
[[ $(nix flake list-inputs $flake3Dir | wc -l) == 5 ]]