Change the lock file to a graph

This enables support for cycles between flakes.
This commit is contained in:
Eelco Dolstra 2020-03-12 22:06:57 +01:00
parent e188fe7c6d
commit ae9119167e
7 changed files with 303 additions and 253 deletions

View file

@ -1,22 +1,27 @@
locks: rootSrc: rootSubdir: lockFileStr: rootSrc: rootSubdir:
let let
callFlake = sourceInfo: subdir: locks: lockFile = builtins.fromJSON lockFileStr;
let
flake = import (sourceInfo + "/" + subdir + "/flake.nix");
inputs = builtins.mapAttrs (n: v: allNodes =
if v.flake or true builtins.mapAttrs
then callFlake (fetchTree (removeAttrs v.locked ["dir"])) (v.locked.dir or "") v.inputs (key: node:
else fetchTree v.locked) locks; let
sourceInfo = if key == lockFile.root then rootSrc else fetchTree (removeAttrs node.locked ["dir"]);
subdir = if key == lockFile.root then rootSubdir else node.locked.dir or "";
flake = import (sourceInfo + (if subdir != "" then "/" else "") + subdir + "/flake.nix");
inputs = builtins.mapAttrs (inputName: key: allNodes.${key}) (node.inputs or {});
outputs = flake.outputs (inputs // { self = result; });
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
in
if node.flake or true then
assert flake.edition or flake.epoch or 0 == 201909;
assert builtins.isFunction flake.outputs;
result
else
sourceInfo
)
lockFile.nodes;
outputs = flake.outputs (inputs // { self = result; }); in allNodes.${lockFile.root}
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
in
assert flake.edition == 201909;
result;
in callFlake rootSrc rootSubdir (builtins.fromJSON locks).inputs

View file

@ -293,52 +293,6 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup
return getFlake(state, originalRef, {}, allowLookup, flakeCache); return getFlake(state, originalRef, {}, allowLookup, flakeCache);
} }
static void flattenLockFile(
const LockedInputs & inputs,
const InputPath & prefix,
std::map<InputPath, const LockedInput *> & res)
{
for (auto &[id, input] : inputs.inputs) {
auto inputPath(prefix);
inputPath.push_back(id);
res.emplace(inputPath, &input);
flattenLockFile(input, inputPath, res);
}
}
static std::string diffLockFiles(const LockedInputs & oldLocks, const LockedInputs & newLocks)
{
std::map<InputPath, const LockedInput *> oldFlat, newFlat;
flattenLockFile(oldLocks, {}, oldFlat);
flattenLockFile(newLocks, {}, newFlat);
auto i = oldFlat.begin();
auto j = newFlat.begin();
std::string res;
while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
++i;
} else {
if (!(i->second->lockedRef == j->second->lockedRef)) {
assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
res += fmt("* Updated '%s': '%s' -> '%s'\n",
concatStringsSep("/", i->first),
i->second->lockedRef,
j->second->lockedRef);
}
++i;
++j;
}
}
return res;
}
/* Compute an in-memory lock file for the specified top-level flake, /* Compute an in-memory lock file for the specified top-level flake,
and optionally write it to file, it the flake is writable. */ and optionally write it to file, it the flake is writable. */
LockedFlake lockFlake( LockedFlake lockFlake(
@ -380,8 +334,8 @@ LockedFlake lockFlake(
/* Recurse into the flake inputs. */ /* Recurse into the flake inputs. */
std::function<void( std::function<void(
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
const LockedInputs & oldLocks, std::shared_ptr<const Node> oldLocks,
LockedInputs & newLocks, std::shared_ptr<Node> newLocks,
const InputPath & inputPathPrefix)> const InputPath & inputPathPrefix)>
updateLocks; updateLocks;
@ -389,8 +343,8 @@ LockedFlake lockFlake(
updateLocks = [&]( updateLocks = [&](
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
const LockedInputs & oldLocks, std::shared_ptr<const Node> oldLocks,
LockedInputs & newLocks, std::shared_ptr<Node> newLocks,
const InputPath & inputPathPrefix) const InputPath & inputPathPrefix)
{ {
/* Get the overrides (i.e. attributes of the form /* Get the overrides (i.e. attributes of the form
@ -428,14 +382,16 @@ LockedFlake lockFlake(
input of the dwarffs input of the root flake), input of the dwarffs input of the root flake),
but if it's from an override, it's relative to but if it's from an override, it's relative to
the *root* of the lock file. */ the *root* of the lock file. */
auto follows = (hasOverride ? newLockFile : newLocks).findInput(*input.follows); auto follows = (hasOverride ? newLockFile.root : newLocks)->findInput(*input.follows);
if (follows) if (follows)
newLocks.inputs.insert_or_assign(id, **follows); newLocks->inputs.insert_or_assign(id, follows);
else else
/* We haven't processed the source of the /* We haven't processed the source of the
"follows" yet (e.g. "dwarffs/nixpkgs"). So "follows" yet (e.g. "dwarffs/nixpkgs"). So
we'll need another round of the fixpoint we'll need another round of the fixpoint
iteration. */ iteration. */
// FIXME: now that LockFile is a graph, we
// could pre-create the missing node.
unresolved.push_back(inputPath); unresolved.push_back(inputPath);
continue; continue;
} }
@ -443,16 +399,25 @@ LockedFlake lockFlake(
/* Do we have an entry in the existing lock file? And /* Do we have an entry in the existing lock file? And
we don't have a --update-input flag for this we don't have a --update-input flag for this
input? */ input? */
auto oldLock = auto oldLockIt =
lockFlags.inputUpdates.count(inputPath) lockFlags.inputUpdates.count(inputPath)
? oldLocks.inputs.end() ? oldLocks->inputs.end()
: oldLocks.inputs.find(id); : oldLocks->inputs.find(id);
if (oldLock != oldLocks.inputs.end() && oldLock->second.originalRef == input.ref && !hasOverride) { std::shared_ptr<const LockedNode> oldLock;
if (oldLockIt != oldLocks->inputs.end()) {
oldLock = std::dynamic_pointer_cast<const LockedNode>(oldLockIt->second);
assert(oldLock);
}
if (oldLock
&& oldLock->originalRef == input.ref
&& !hasOverride)
{
/* Copy the input from the old lock file if its /* Copy the input from the old lock file if its
flakeref didn't change and there is no override flakeref didn't change and there is no override
from a higher level flake. */ from a higher level flake. */
newLocks.inputs.insert_or_assign(id, oldLock->second); newLocks->inputs.insert_or_assign(id, std::make_shared<LockedNode>(*oldLock));
/* If we have an --update-input flag for an input /* If we have an --update-input flag for an input
of this input, then we must fetch the flake to of this input, then we must fetch the flake to
@ -466,11 +431,11 @@ LockedFlake lockFlake(
if (hasChildUpdate) { if (hasChildUpdate) {
auto inputFlake = getFlake( auto inputFlake = getFlake(
state, oldLock->second.lockedRef, oldLock->second.info, false, flakeCache); state, oldLock->lockedRef, oldLock->info, false, flakeCache);
updateLocks(inputFlake.inputs, updateLocks(inputFlake.inputs,
(const LockedInputs &) oldLock->second, oldLock,
newLocks.inputs.find(id)->second, newLocks->inputs.find(id)->second,
inputPath); inputPath);
} else { } else {
@ -480,12 +445,14 @@ LockedFlake lockFlake(
check those. */ check those. */
FlakeInputs fakeInputs; FlakeInputs fakeInputs;
for (auto & i : oldLock->second.inputs) for (auto & i : oldLock->inputs)
fakeInputs.emplace(i.first, FlakeInput { .ref = i.second.originalRef }); fakeInputs.emplace(i.first, FlakeInput {
.ref = std::dynamic_pointer_cast<LockedNode>(i.second)->originalRef
});
updateLocks(fakeInputs, updateLocks(fakeInputs,
oldLock->second, oldLock,
newLocks.inputs.find(id)->second, newLocks->inputs.find(id)->second,
inputPath); inputPath);
} }
@ -499,8 +466,8 @@ LockedFlake lockFlake(
if (input.isFlake) { if (input.isFlake) {
auto inputFlake = getFlake(state, input.ref, {}, lockFlags.useRegistries, flakeCache); auto inputFlake = getFlake(state, input.ref, {}, lockFlags.useRegistries, flakeCache);
newLocks.inputs.insert_or_assign(id, newLocks->inputs.insert_or_assign(id,
LockedInput(inputFlake.lockedRef, inputFlake.originalRef, inputFlake.sourceInfo->info)); std::make_shared<LockedNode>(inputFlake.lockedRef, inputFlake.originalRef, inputFlake.sourceInfo->info));
/* Recursively process the inputs of this /* Recursively process the inputs of this
flake. Also, unless we already have this flake. Also, unless we already have this
@ -515,25 +482,25 @@ LockedFlake lockFlake(
Finally cleanup([&]() { parents.pop_back(); }); Finally cleanup([&]() { parents.pop_back(); });
updateLocks(inputFlake.inputs, updateLocks(inputFlake.inputs,
oldLock != oldLocks.inputs.end() oldLock
? (const LockedInputs &) oldLock->second ? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read( : LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock"), inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
newLocks.inputs.find(id)->second, newLocks->inputs.find(id)->second,
inputPath); inputPath);
} }
else { else {
auto [sourceInfo, lockedRef] = fetchOrSubstituteTree( auto [sourceInfo, lockedRef] = fetchOrSubstituteTree(
state, input.ref, {}, lockFlags.useRegistries, flakeCache); state, input.ref, {}, lockFlags.useRegistries, flakeCache);
newLocks.inputs.insert_or_assign(id, newLocks->inputs.insert_or_assign(id,
LockedInput(lockedRef, input.ref, sourceInfo.info, false)); std::make_shared<LockedNode>(lockedRef, input.ref, sourceInfo.info, false));
} }
} }
} }
}; };
updateLocks(flake.inputs, oldLockFile, newLockFile, {}); updateLocks(flake.inputs, oldLockFile.root, newLockFile.root, {});
/* Check if there is a cycle in the "follows" inputs. */ /* Check if there is a cycle in the "follows" inputs. */
if (!unresolved.empty() && unresolved == prevUnresolved) { if (!unresolved.empty() && unresolved == prevUnresolved) {
@ -619,8 +586,7 @@ LockedFlake lockFlake(
} }
void callFlake(EvalState & state, void callFlake(EvalState & state,
const Flake & flake, const LockedFlake & lockedFlake,
const LockedInputs & lockedInputs,
Value & vRes) Value & vRes)
{ {
auto vLocks = state.allocValue(); auto vLocks = state.allocValue();
@ -629,11 +595,11 @@ void callFlake(EvalState & state,
auto vTmp1 = state.allocValue(); auto vTmp1 = state.allocValue();
auto vTmp2 = state.allocValue(); auto vTmp2 = state.allocValue();
mkString(*vLocks, lockedInputs.to_string()); mkString(*vLocks, lockedFlake.lockFile.to_string());
emitTreeAttrs(state, *flake.sourceInfo, flake.lockedRef.input, *vRootSrc); emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc);
mkString(*vRootSubdir, flake.lockedRef.subdir); mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
static Value * vCallFlake = nullptr; static Value * vCallFlake = nullptr;
@ -649,13 +615,6 @@ void callFlake(EvalState & state,
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos); state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
} }
void callFlake(EvalState & state,
const LockedFlake & lockedFlake,
Value & v)
{
callFlake(state, lockedFlake.flake, lockedFlake.lockFile, v);
}
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
callFlake(state, callFlake(state,

View file

@ -96,13 +96,7 @@ LockedFlake lockFlake(
void callFlake( void callFlake(
EvalState & state, EvalState & state,
const Flake & flake, const LockedFlake & lockedFlake,
const LockedInputs & inputs,
Value & v);
void callFlake(
EvalState & state,
const LockedFlake & resFlake,
Value & v); Value & v);
} }

View file

@ -67,9 +67,8 @@ static TreeInfo parseTreeInfo(const nlohmann::json & json)
throw Error("attribute 'info' missing in lock file"); throw Error("attribute 'info' missing in lock file");
} }
LockedInput::LockedInput(const nlohmann::json & json) LockedNode::LockedNode(const nlohmann::json & json)
: LockedInputs(json) : lockedRef(getFlakeRef(json, "url", "uri", "locked"))
, lockedRef(getFlakeRef(json, "url", "uri", "locked"))
, originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original")) , originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original"))
, info(parseTreeInfo(json)) , info(parseTreeInfo(json))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
@ -90,103 +89,139 @@ static nlohmann::json treeInfoToJson(const TreeInfo & info)
return json; return json;
} }
nlohmann::json LockedInput::toJson() const StorePath LockedNode::computeStorePath(Store & store) const
{
auto json = LockedInputs::toJson();
json["original"] = fetchers::attrsToJson(originalRef.toAttrs());
json["locked"] = fetchers::attrsToJson(lockedRef.toAttrs());
json["info"] = treeInfoToJson(info);
if (!isFlake) json["flake"] = false;
return json;
}
StorePath LockedInput::computeStorePath(Store & store) const
{ {
return info.computeStorePath(store); return info.computeStorePath(store);
} }
LockedInputs::LockedInputs(const nlohmann::json & json) std::shared_ptr<Node> Node::findInput(const InputPath & path)
{
for (auto & i : json["inputs"].items())
inputs.insert_or_assign(i.key(), LockedInput(i.value()));
}
nlohmann::json LockedInputs::toJson() const
{
nlohmann::json json;
{
auto j = nlohmann::json::object();
for (auto & i : inputs)
j[i.first] = i.second.toJson();
json["inputs"] = std::move(j);
}
return json;
}
std::string LockedInputs::to_string() const
{
return toJson().dump(2);
}
bool LockedInputs::isImmutable() const
{
for (auto & i : inputs)
if (!i.second.lockedRef.input->isImmutable() || !i.second.isImmutable()) return false;
return true;
}
std::optional<LockedInput *> LockedInputs::findInput(const InputPath & path)
{ {
assert(!path.empty()); assert(!path.empty());
LockedInputs * pos = this; auto pos = shared_from_this();
for (auto & elem : path) { for (auto & elem : path) {
auto i = pos->inputs.find(elem); auto i = pos->inputs.find(elem);
if (i == pos->inputs.end()) if (i == pos->inputs.end())
return {}; return {};
pos = &i->second; pos = i->second;
} }
return (LockedInput *) pos; return pos;
} }
void LockedInputs::removeInput(const InputPath & path) LockFile::LockFile(const nlohmann::json & json, const Path & path)
{ {
assert(!path.empty()); auto version = json.value("version", 0);
if (version < 3 || version > 5)
throw Error("lock file '%s' has unsupported version %d", path, version);
LockedInputs * pos = this; if (version < 5) {
std::function<void(Node & node, const nlohmann::json & json)> getInputs;
for (size_t n = 0; n < path.size(); n++) { getInputs = [&](Node & node, const nlohmann::json & json)
auto i = pos->inputs.find(path[n]); {
if (i == pos->inputs.end()) return; for (auto & i : json["inputs"].items()) {
if (n + 1 == path.size()) auto input = std::make_shared<LockedNode>(i.value());
pos->inputs.erase(i); getInputs(*input, i.value());
else node.inputs.insert_or_assign(i.key(), input);
pos = &i->second; }
};
getInputs(*root, json);
}
else {
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
getInputs = [&](Node & node, const nlohmann::json & jsonNode)
{
if (jsonNode.find("inputs") == jsonNode.end()) return;
for (auto & i : jsonNode["inputs"].items()) {
std::string inputKey = i.value();
auto k = nodeMap.find(inputKey);
if (k == nodeMap.end()) {
auto jsonNode2 = json["nodes"][inputKey];
auto input = std::make_shared<LockedNode>(jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, jsonNode2);
}
node.inputs.insert_or_assign(i.key(), k->second);
}
};
std::string rootKey = json["root"];
nodeMap.insert_or_assign(rootKey, root);
getInputs(*root, json["nodes"][rootKey]);
} }
} }
nlohmann::json LockFile::toJson() const nlohmann::json LockFile::toJson() const
{ {
auto json = LockedInputs::toJson(); nlohmann::json nodes;
json["version"] = 4; std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
std::unordered_set<std::string> keys;
std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
{
auto k = nodeKeys.find(node);
if (k != nodeKeys.end())
return k->second;
if (!keys.insert(key).second) {
for (int n = 2; ; ++n) {
auto k = fmt("%s_%d", key, n);
if (keys.insert(k).second) {
key = k;
break;
}
}
}
nodeKeys.insert_or_assign(node, key);
auto n = nlohmann::json::object();
if (!node->inputs.empty()) {
auto inputs = nlohmann::json::object();
for (auto & i : node->inputs)
inputs[i.first] = dumpNode(i.first, i.second);
n["inputs"] = std::move(inputs);
}
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs());
n["info"] = treeInfoToJson(lockedNode->info);
if (!lockedNode->isFlake) n["flake"] = false;
}
nodes[key] = std::move(n);
return key;
};
nlohmann::json json;
json["version"] = 5;
json["root"] = dumpNode("root", root);
json["nodes"] = std::move(nodes);
return json; return json;
} }
std::string LockFile::to_string() const
{
return toJson().dump(2);
}
LockFile LockFile::read(const Path & path) LockFile LockFile::read(const Path & path)
{ {
if (pathExists(path)) { if (!pathExists(path)) return LockFile();
auto json = nlohmann::json::parse(readFile(path)); return LockFile(nlohmann::json::parse(readFile(path)), path);
auto version = json.value("version", 0);
if (version != 3 && version != 4)
throw Error("lock file '%s' has unsupported version %d", path, version);
return LockFile(json);
} else
return LockFile();
} }
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
@ -201,6 +236,35 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this)); writeFile(path, fmt("%s\n", *this));
} }
bool LockFile::isImmutable() const
{
std::unordered_set<std::shared_ptr<const Node>> nodes;
std::function<void(std::shared_ptr<const Node> node)> visit;
visit = [&](std::shared_ptr<const Node> node)
{
if (!nodes.insert(node).second) return;
for (auto & i : node->inputs) visit(i.second);
};
visit(root);
for (auto & i : nodes) {
if (i == root) continue;
auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
if (lockedNode && !lockedNode->lockedRef.input->isImmutable()) return false;
}
return true;
}
bool LockFile::operator ==(const LockFile & other) const
{
// FIXME: slow
return toJson() == other.toJson();
}
InputPath parseInputPath(std::string_view s) InputPath parseInputPath(std::string_view s)
{ {
InputPath path; InputPath path;
@ -217,4 +281,52 @@ InputPath parseInputPath(std::string_view s)
return path; return path;
} }
static void flattenLockFile(
std::shared_ptr<const Node> node,
const InputPath & prefix,
std::map<InputPath, std::shared_ptr<const LockedNode>> & res)
{
// FIXME: handle cycles
for (auto &[id, input] : node->inputs) {
auto inputPath(prefix);
inputPath.push_back(id);
if (auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input))
res.emplace(inputPath, lockedInput);
flattenLockFile(input, inputPath, res);
}
}
std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks)
{
std::map<InputPath, std::shared_ptr<const LockedNode>> oldFlat, newFlat;
flattenLockFile(oldLocks.root, {}, oldFlat);
flattenLockFile(newLocks.root, {}, newFlat);
auto i = oldFlat.begin();
auto j = newFlat.begin();
std::string res;
while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
++i;
} else {
if (!(i->second->lockedRef == j->second->lockedRef)) {
assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
res += fmt("* Updated '%s': '%s' -> '%s'\n",
concatStringsSep("/", i->first),
i->second->lockedRef,
j->second->lockedRef);
}
++i;
++j;
}
}
return res;
}
} }

View file

@ -15,35 +15,26 @@ using namespace fetchers;
typedef std::vector<FlakeId> InputPath; typedef std::vector<FlakeId> InputPath;
struct LockedInput; /* A node in the lock file. It has outgoing edges to other nodes (its
inputs). Only the root node has this type; all other nodes have
/* Lock file information about the dependencies of a flake. */ type LockedNode. */
struct LockedInputs struct Node : std::enable_shared_from_this<Node>
{ {
std::map<FlakeId, LockedInput> inputs; std::map<FlakeId, std::shared_ptr<Node>> inputs;
LockedInputs() {}; virtual ~Node() { }
LockedInputs(const nlohmann::json & json);
nlohmann::json toJson() const; std::shared_ptr<Node> findInput(const InputPath & path);
std::string to_string() const;
bool isImmutable() const;
std::optional<LockedInput *> findInput(const InputPath & path);
void removeInput(const InputPath & path);
}; };
/* Lock file information about a flake input. */ /* A non-root node in the lock file. */
struct LockedInput : LockedInputs struct LockedNode : Node
{ {
FlakeRef lockedRef, originalRef; FlakeRef lockedRef, originalRef;
TreeInfo info; TreeInfo info;
bool isFlake = true; bool isFlake = true;
LockedInput( LockedNode(
const FlakeRef & lockedRef, const FlakeRef & lockedRef,
const FlakeRef & originalRef, const FlakeRef & originalRef,
const TreeInfo & info, const TreeInfo & info,
@ -51,51 +42,36 @@ struct LockedInput : LockedInputs
: lockedRef(lockedRef), originalRef(originalRef), info(info), isFlake(isFlake) : lockedRef(lockedRef), originalRef(originalRef), info(info), isFlake(isFlake)
{ } { }
LockedInput(const nlohmann::json & json); LockedNode(const nlohmann::json & json);
bool operator ==(const LockedInput & other) const
{
return
lockedRef == other.lockedRef
&& originalRef == other.originalRef
&& info == other.info
&& inputs == other.inputs
&& isFlake == other.isFlake;
}
nlohmann::json toJson() const;
StorePath computeStorePath(Store & store) const; StorePath computeStorePath(Store & store) const;
}; };
/* An entire lock file. Note that this cannot be a FlakeInput for the struct LockFile
top-level flake, because then the lock file would need to contain
the hash of the top-level flake, but committing the lock file
would invalidate that hash. */
struct LockFile : LockedInputs
{ {
bool operator ==(const LockFile & other) const std::shared_ptr<Node> root = std::make_shared<Node>();
{
return inputs == other.inputs;
}
LockFile() {} LockFile() {};
LockFile(const nlohmann::json & json) : LockedInputs(json) {} LockFile(const nlohmann::json & json, const Path & path);
LockFile(LockedInput && dep)
{
inputs = std::move(dep.inputs);
}
nlohmann::json toJson() const; nlohmann::json toJson() const;
std::string to_string() const;
static LockFile read(const Path & path); static LockFile read(const Path & path);
void write(const Path & path) const; void write(const Path & path) const;
bool isImmutable() const;
bool operator ==(const LockFile & other) const;
}; };
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile); std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile);
InputPath parseInputPath(std::string_view s); InputPath parseInputPath(std::string_view s);
std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks);
} }

View file

@ -199,24 +199,25 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
stopProgressBar(); stopProgressBar();
if (json) if (json)
std::cout << ((LockedInputs &) flake.lockFile).toJson() << "\n"; std::cout << flake.lockFile.toJson() << "\n";
else { else {
std::cout << fmt("%s\n", flake.flake.lockedRef); std::cout << fmt("%s\n", flake.flake.lockedRef);
std::function<void(const LockedInputs & inputs, const std::string & prefix)> recurse; std::function<void(const Node & node, const std::string & prefix)> recurse;
recurse = [&](const LockedInputs & inputs, const std::string & prefix) recurse = [&](const Node & node, const std::string & prefix)
{ {
for (const auto & [i, input] : enumerate(inputs.inputs)) { for (const auto & [i, input] : enumerate(node.inputs)) {
//auto tree2 = tree.child(i + 1 == inputs.inputs.size()); //auto tree2 = tree.child(i + 1 == inputs.inputs.size());
bool last = i + 1 == inputs.inputs.size(); bool last = i + 1 == node.inputs.size();
std::cout << fmt("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s\n", std::cout << fmt("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s\n",
prefix + (last ? treeLast : treeConn), input.first, input.second.lockedRef); prefix + (last ? treeLast : treeConn), input.first,
recurse(input.second, prefix + (last ? treeNull : treeLine)); std::dynamic_pointer_cast<const LockedNode>(input.second)->lockedRef);
recurse(*input.second, prefix + (last ? treeNull : treeLine));
} }
}; };
recurse(flake.lockFile, ""); recurse(*flake.lockFile.root, "");
} }
} }
}; };
@ -664,23 +665,26 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
if (jsonRoot) if (jsonRoot)
jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath)); jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
std::function<void(const LockedInputs & inputs, std::optional<JSONObject> & jsonObj)> traverse; // FIXME: use graph output, handle cycles.
traverse = [&](const LockedInputs & inputs, std::optional<JSONObject> & jsonObj) std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse;
traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj)
{ {
auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>(); auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>();
for (auto & input : inputs.inputs) { for (auto & input : node.inputs) {
auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input.second);
assert(lockedInput);
auto jsonObj3 = jsonObj2 ? jsonObj2->object(input.first) : std::optional<JSONObject>(); auto jsonObj3 = jsonObj2 ? jsonObj2->object(input.first) : std::optional<JSONObject>();
if (!dryRun) if (!dryRun)
input.second.lockedRef.input->fetchTree(store); lockedInput->lockedRef.input->fetchTree(store);
auto storePath = input.second.computeStorePath(*store); auto storePath = lockedInput->computeStorePath(*store);
if (jsonObj3) if (jsonObj3)
jsonObj3->attr("path", store->printStorePath(storePath)); jsonObj3->attr("path", store->printStorePath(storePath));
sources.insert(std::move(storePath)); sources.insert(std::move(storePath));
traverse(input.second, jsonObj3); traverse(*lockedInput, jsonObj3);
} }
}; };
traverse(flake.lockFile, jsonRoot); traverse(*flake.lockFile.root, jsonRoot);
if (!dryRun && !dstUri.empty()) { if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri); ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);

View file

@ -517,7 +517,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF EOF
nix flake update $flake3Dir nix flake update $flake3Dir
[[ $(jq .inputs.foo.locked $flake3Dir/flake.lock) = $(jq .inputs.bar.locked $flake3Dir/flake.lock) ]] [[ $(jq .nodes.foo.locked $flake3Dir/flake.lock) = $(jq .nodes.bar.locked $flake3Dir/flake.lock) ]]
cat > $flake3Dir/flake.nix <<EOF cat > $flake3Dir/flake.nix <<EOF
{ {
@ -531,7 +531,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF EOF
nix flake update $flake3Dir nix flake update $flake3Dir
[[ $(jq .inputs.bar.locked.url $flake3Dir/flake.lock) =~ flake1 ]] [[ $(jq .nodes.bar.locked.url $flake3Dir/flake.lock) =~ flake1 ]]
cat > $flake3Dir/flake.nix <<EOF cat > $flake3Dir/flake.nix <<EOF
{ {
@ -545,7 +545,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF EOF
nix flake update $flake3Dir nix flake update $flake3Dir
[[ $(jq .inputs.bar.locked.url $flake3Dir/flake.lock) =~ flake2 ]] [[ $(jq .nodes.bar.locked.url $flake3Dir/flake.lock) =~ flake2 ]]
# Test overriding inputs of inputs. # Test overriding inputs of inputs.
cat > $flake3Dir/flake.nix <<EOF cat > $flake3Dir/flake.nix <<EOF
@ -563,7 +563,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF EOF
nix flake update $flake3Dir nix flake update $flake3Dir
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]] [[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
cat > $flake3Dir/flake.nix <<EOF cat > $flake3Dir/flake.nix <<EOF
{ {
@ -578,7 +578,7 @@ cat > $flake3Dir/flake.nix <<EOF
EOF EOF
nix flake update $flake3Dir --recreate-lock-file nix flake update $flake3Dir --recreate-lock-file
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]] [[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
# Test Mercurial flakes. # Test Mercurial flakes.
rm -rf $flake5Dir rm -rf $flake5Dir
@ -636,21 +636,21 @@ nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ
# Test --override-input. # Test --override-input.
git -C $flake3Dir reset --hard git -C $flake3Dir reset --hard
nix flake update $flake3Dir --override-input flake2/flake1 flake5 nix flake update $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
[[ $(jq .inputs.flake2.inputs.flake1.locked.url $flake3Dir/flake.lock) =~ flake5 ]] [[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1 nix flake update $flake3Dir --override-input flake2/flake1 flake1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]] [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1/master/$hash1 nix flake update $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]] [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
# Test --update-input. # Test --update-input.
nix flake update $flake3Dir nix flake update $flake3Dir
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) = $hash1 ]] [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
nix flake update $flake3Dir --update-input flake2/flake1 nix flake update $flake3Dir --update-input flake2/flake1
[[ $(jq -r .inputs.flake2.inputs.flake1.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]] [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
# Test 'nix flake list-inputs'. # Test 'nix flake list-inputs'.
[[ $(nix flake list-inputs $flake3Dir | wc -l) == 5 ]] [[ $(nix flake list-inputs $flake3Dir | wc -l) == 5 ]]