forked from lix-project/lix
Remove support for old lockfiles and the epoch/uri attributes
This commit is contained in:
parent
b0e9b07e80
commit
1ad71bc62c
|
@ -3,7 +3,7 @@
|
|||
|
||||
edition = 201909; # FIXME: remove
|
||||
|
||||
inputs.nixpkgs.uri = "nixpkgs/nixos-20.03-small";
|
||||
inputs.nixpkgs.url = "nixpkgs/nixos-20.03-small";
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
|
||||
|
|
|
@ -129,7 +129,6 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
|
||||
auto sInputs = state.symbols.create("inputs");
|
||||
auto sUrl = state.symbols.create("url");
|
||||
auto sUri = state.symbols.create("uri"); // FIXME: remove soon
|
||||
auto sFlake = state.symbols.create("flake");
|
||||
auto sFollows = state.symbols.create("follows");
|
||||
|
||||
|
@ -138,7 +137,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
|
||||
for (nix::Attr attr : *(value->attrs)) {
|
||||
try {
|
||||
if (attr.name == sUrl || attr.name == sUri) {
|
||||
if (attr.name == sUrl) {
|
||||
expectType(state, tString, *attr.value, *attr.pos);
|
||||
url = attr.value->string.s;
|
||||
attrs.emplace("url", *url);
|
||||
|
@ -232,14 +231,10 @@ static Flake getFlake(
|
|||
expectType(state, tAttrs, vInfo, Pos(state.symbols.create(flakeFile), 0, 0));
|
||||
|
||||
auto sEdition = state.symbols.create("edition"); // FIXME: remove soon
|
||||
auto sEpoch = state.symbols.create("epoch"); // FIXME: remove soon
|
||||
|
||||
if (vInfo.attrs->get(sEdition))
|
||||
warn("flake '%s' has deprecated attribute 'edition'", lockedRef);
|
||||
|
||||
if (vInfo.attrs->get(sEpoch))
|
||||
warn("flake '%s' has deprecated attribute 'epoch'", lockedRef);
|
||||
|
||||
if (auto description = vInfo.attrs->get(state.sDescription)) {
|
||||
expectType(state, tString, *description->value, *description->pos);
|
||||
flake.description = description->value->string.s;
|
||||
|
@ -270,7 +265,6 @@ static Flake getFlake(
|
|||
|
||||
for (auto & attr : *vInfo.attrs) {
|
||||
if (attr.name != sEdition &&
|
||||
attr.name != sEpoch &&
|
||||
attr.name != state.sDescription &&
|
||||
attr.name != sInputs &&
|
||||
attr.name != sOutputs)
|
||||
|
|
|
@ -12,29 +12,18 @@ FlakeRef flakeRefFromJson(const nlohmann::json & json)
|
|||
|
||||
FlakeRef getFlakeRef(
|
||||
const nlohmann::json & json,
|
||||
const char * version3Attr1,
|
||||
const char * version3Attr2,
|
||||
const char * version4Attr)
|
||||
const char * attr)
|
||||
{
|
||||
auto i = json.find(version4Attr);
|
||||
auto i = json.find(attr);
|
||||
if (i != json.end())
|
||||
return flakeRefFromJson(*i);
|
||||
|
||||
// FIXME: remove these.
|
||||
i = json.find(version3Attr1);
|
||||
if (i != json.end())
|
||||
return parseFlakeRef(*i);
|
||||
|
||||
i = json.find(version3Attr2);
|
||||
if (i != json.end())
|
||||
return parseFlakeRef(*i);
|
||||
|
||||
throw Error("attribute '%s' missing in lock file", version4Attr);
|
||||
throw Error("attribute '%s' missing in lock file", attr);
|
||||
}
|
||||
|
||||
LockedNode::LockedNode(const nlohmann::json & json)
|
||||
: lockedRef(getFlakeRef(json, "url", "uri", "locked"))
|
||||
, originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original"))
|
||||
: lockedRef(getFlakeRef(json, "locked"))
|
||||
, originalRef(getFlakeRef(json, "original"))
|
||||
, info(TreeInfo::fromJson(json))
|
||||
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
|
||||
{
|
||||
|
@ -64,49 +53,32 @@ std::shared_ptr<Node> Node::findInput(const InputPath & path)
|
|||
LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
||||
{
|
||||
auto version = json.value("version", 0);
|
||||
if (version < 3 || version > 5)
|
||||
if (version != 5)
|
||||
throw Error("lock file '%s' has unsupported version %d", path, version);
|
||||
|
||||
if (version < 5) {
|
||||
std::function<void(Node & node, const nlohmann::json & json)> getInputs;
|
||||
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
|
||||
|
||||
getInputs = [&](Node & node, const nlohmann::json & json)
|
||||
{
|
||||
for (auto & i : json["inputs"].items()) {
|
||||
auto input = std::make_shared<LockedNode>(i.value());
|
||||
getInputs(*input, i.value());
|
||||
node.inputs.insert_or_assign(i.key(), input);
|
||||
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
|
||||
|
||||
getInputs = [&](Node & node, const nlohmann::json & jsonNode)
|
||||
{
|
||||
if (jsonNode.find("inputs") == jsonNode.end()) return;
|
||||
for (auto & i : jsonNode["inputs"].items()) {
|
||||
std::string inputKey = i.value();
|
||||
auto k = nodeMap.find(inputKey);
|
||||
if (k == nodeMap.end()) {
|
||||
auto jsonNode2 = json["nodes"][inputKey];
|
||||
auto input = std::make_shared<LockedNode>(jsonNode2);
|
||||
k = nodeMap.insert_or_assign(inputKey, input).first;
|
||||
getInputs(*input, jsonNode2);
|
||||
}
|
||||
};
|
||||
node.inputs.insert_or_assign(i.key(), k->second);
|
||||
}
|
||||
};
|
||||
|
||||
getInputs(*root, json);
|
||||
}
|
||||
|
||||
else {
|
||||
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
|
||||
|
||||
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
|
||||
|
||||
getInputs = [&](Node & node, const nlohmann::json & jsonNode)
|
||||
{
|
||||
if (jsonNode.find("inputs") == jsonNode.end()) return;
|
||||
for (auto & i : jsonNode["inputs"].items()) {
|
||||
std::string inputKey = i.value();
|
||||
auto k = nodeMap.find(inputKey);
|
||||
if (k == nodeMap.end()) {
|
||||
auto jsonNode2 = json["nodes"][inputKey];
|
||||
auto input = std::make_shared<LockedNode>(jsonNode2);
|
||||
k = nodeMap.insert_or_assign(inputKey, input).first;
|
||||
getInputs(*input, jsonNode2);
|
||||
}
|
||||
node.inputs.insert_or_assign(i.key(), k->second);
|
||||
}
|
||||
};
|
||||
|
||||
std::string rootKey = json["root"];
|
||||
nodeMap.insert_or_assign(rootKey, root);
|
||||
getInputs(*root, json["nodes"][rootKey]);
|
||||
}
|
||||
std::string rootKey = json["root"];
|
||||
nodeMap.insert_or_assign(rootKey, root);
|
||||
getInputs(*root, json["nodes"][rootKey]);
|
||||
}
|
||||
|
||||
nlohmann::json LockFile::toJson() const
|
||||
|
|
Loading…
Reference in a new issue