forked from lix-project/lix
Rename dep -> input
Also use nlohmann::json range-based for.
This commit is contained in:
parent
9e99b5205c
commit
5fe7be2409
4 changed files with 54 additions and 56 deletions
|
@ -316,7 +316,7 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
|
|||
else assert(false);
|
||||
}
|
||||
|
||||
static std::pair<Flake, FlakeDep> updateLocks(
|
||||
static std::pair<Flake, FlakeInput> updateLocks(
|
||||
EvalState & state,
|
||||
const FlakeRef & flakeRef,
|
||||
HandleLockFile handleLockFile,
|
||||
|
@ -325,7 +325,7 @@ static std::pair<Flake, FlakeDep> updateLocks(
|
|||
{
|
||||
auto flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef));
|
||||
|
||||
FlakeDep newEntry(
|
||||
FlakeInput newEntry(
|
||||
flake.id,
|
||||
flake.sourceInfo.resolvedRef,
|
||||
flake.sourceInfo.narHash);
|
||||
|
@ -333,28 +333,28 @@ static std::pair<Flake, FlakeDep> updateLocks(
|
|||
for (auto & input : flake.nonFlakeInputs) {
|
||||
auto & id = input.first;
|
||||
auto & ref = input.second;
|
||||
auto i = oldEntry.nonFlakeDeps.find(id);
|
||||
if (i != oldEntry.nonFlakeDeps.end()) {
|
||||
newEntry.nonFlakeDeps.insert_or_assign(i->first, i->second);
|
||||
auto i = oldEntry.nonFlakeInputs.find(id);
|
||||
if (i != oldEntry.nonFlakeInputs.end()) {
|
||||
newEntry.nonFlakeInputs.insert_or_assign(i->first, i->second);
|
||||
} else {
|
||||
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
||||
throw Error("cannot update non-flake dependency '%s' in pure mode", id);
|
||||
auto nonFlake = getNonFlake(state, ref, id, allowedToUseRegistries(handleLockFile, false));
|
||||
newEntry.nonFlakeDeps.insert_or_assign(id,
|
||||
NonFlakeDep(
|
||||
newEntry.nonFlakeInputs.insert_or_assign(id,
|
||||
NonFlakeInput(
|
||||
nonFlake.sourceInfo.resolvedRef,
|
||||
nonFlake.sourceInfo.narHash));
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & inputRef : flake.inputs) {
|
||||
auto i = oldEntry.flakeDeps.find(inputRef);
|
||||
if (i != oldEntry.flakeDeps.end()) {
|
||||
newEntry.flakeDeps.insert_or_assign(inputRef, i->second);
|
||||
auto i = oldEntry.flakeInputs.find(inputRef);
|
||||
if (i != oldEntry.flakeInputs.end()) {
|
||||
newEntry.flakeInputs.insert_or_assign(inputRef, i->second);
|
||||
} else {
|
||||
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
||||
throw Error("cannot update flake dependency '%s' in pure mode", inputRef);
|
||||
newEntry.flakeDeps.insert_or_assign(inputRef,
|
||||
newEntry.flakeInputs.insert_or_assign(inputRef,
|
||||
updateLocks(state, inputRef, handleLockFile, {}, false).second);
|
||||
}
|
||||
}
|
||||
|
@ -434,7 +434,7 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo
|
|||
it doesn't appear in 'builtins'. */
|
||||
static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
auto lazyFlake = (FlakeDep *) args[0]->attrs;
|
||||
auto lazyFlake = (FlakeInput *) args[0]->attrs;
|
||||
auto flake = getFlake(state, lazyFlake->ref, false);
|
||||
callFlake(state, flake, *lazyFlake, v);
|
||||
}
|
||||
|
@ -448,10 +448,10 @@ void callFlake(EvalState & state,
|
|||
// ...}'. This attrset is passed lazily as an argument to 'outputs'.
|
||||
|
||||
state.mkAttrs(v,
|
||||
inputs.flakeDeps.size() +
|
||||
inputs.nonFlakeDeps.size() + 8);
|
||||
inputs.flakeInputs.size() +
|
||||
inputs.nonFlakeInputs.size() + 8);
|
||||
|
||||
for (auto & dep : inputs.flakeDeps) {
|
||||
for (auto & dep : inputs.flakeInputs) {
|
||||
auto vFlake = state.allocAttr(v, dep.second.id);
|
||||
auto vPrimOp = state.allocValue();
|
||||
static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake"));
|
||||
|
@ -460,11 +460,11 @@ void callFlake(EvalState & state,
|
|||
auto vArg = state.allocValue();
|
||||
vArg->type = tNull;
|
||||
// FIXME: leak
|
||||
vArg->attrs = (Bindings *) new FlakeDep(dep.second); // evil! also inefficient
|
||||
vArg->attrs = (Bindings *) new FlakeInput(dep.second); // evil! also inefficient
|
||||
mkApp(*vFlake, *vPrimOp, *vArg);
|
||||
}
|
||||
|
||||
for (auto & dep : inputs.nonFlakeDeps) {
|
||||
for (auto & dep : inputs.nonFlakeInputs) {
|
||||
auto vNonFlake = state.allocAttr(v, dep.first);
|
||||
state.mkAttrs(*vNonFlake, 8);
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
namespace nix::flake {
|
||||
|
||||
AbstractDep::AbstractDep(const nlohmann::json & json)
|
||||
AbstractInput::AbstractInput(const nlohmann::json & json)
|
||||
: ref(json["uri"])
|
||||
, narHash(Hash((std::string) json["narHash"]))
|
||||
{
|
||||
|
@ -11,7 +11,7 @@ AbstractDep::AbstractDep(const nlohmann::json & json)
|
|||
throw Error("lockfile contains mutable flakeref '%s'", ref);
|
||||
}
|
||||
|
||||
nlohmann::json AbstractDep::toJson() const
|
||||
nlohmann::json AbstractInput::toJson() const
|
||||
{
|
||||
nlohmann::json json;
|
||||
json["uri"] = ref.to_string();
|
||||
|
@ -19,35 +19,33 @@ nlohmann::json AbstractDep::toJson() const
|
|||
return json;
|
||||
}
|
||||
|
||||
Path AbstractDep::computeStorePath(Store & store) const
|
||||
Path AbstractInput::computeStorePath(Store & store) const
|
||||
{
|
||||
return store.makeFixedOutputPath(true, narHash, "source");
|
||||
}
|
||||
|
||||
FlakeDep::FlakeDep(const nlohmann::json & json)
|
||||
FlakeInput::FlakeInput(const nlohmann::json & json)
|
||||
: FlakeInputs(json)
|
||||
, AbstractDep(json)
|
||||
, AbstractInput(json)
|
||||
, id(json["id"])
|
||||
{
|
||||
}
|
||||
|
||||
nlohmann::json FlakeDep::toJson() const
|
||||
nlohmann::json FlakeInput::toJson() const
|
||||
{
|
||||
auto json = FlakeInputs::toJson();
|
||||
json.update(AbstractDep::toJson());
|
||||
json.update(AbstractInput::toJson());
|
||||
json["id"] = id;
|
||||
return json;
|
||||
}
|
||||
|
||||
FlakeInputs::FlakeInputs(const nlohmann::json & json)
|
||||
{
|
||||
auto nonFlakeInputs = json["nonFlakeInputs"];
|
||||
for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i)
|
||||
nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i));
|
||||
for (auto & i : json["nonFlakeInputs"].items())
|
||||
nonFlakeInputs.insert_or_assign(i.key(), NonFlakeInput(i.value()));
|
||||
|
||||
auto inputs = json["inputs"];
|
||||
for (auto i = inputs.begin(); i != inputs.end(); ++i)
|
||||
flakeDeps.insert_or_assign(i.key(), FlakeDep(*i));
|
||||
for (auto & i : json["inputs"].items())
|
||||
flakeInputs.insert_or_assign(i.key(), FlakeInput(i.value()));
|
||||
}
|
||||
|
||||
nlohmann::json FlakeInputs::toJson() const
|
||||
|
@ -55,13 +53,13 @@ nlohmann::json FlakeInputs::toJson() const
|
|||
nlohmann::json json;
|
||||
{
|
||||
auto j = nlohmann::json::object();
|
||||
for (auto & i : nonFlakeDeps)
|
||||
for (auto & i : nonFlakeInputs)
|
||||
j[i.first] = i.second.toJson();
|
||||
json["nonFlakeInputs"] = std::move(j);
|
||||
}
|
||||
{
|
||||
auto j = nlohmann::json::object();
|
||||
for (auto & i : flakeDeps)
|
||||
for (auto & i : flakeInputs)
|
||||
j[i.first.to_string()] = i.second.toJson();
|
||||
json["inputs"] = std::move(j);
|
||||
}
|
||||
|
|
|
@ -12,18 +12,18 @@ namespace nix::flake {
|
|||
|
||||
/* Common lock file information about a flake input, namely the
|
||||
immutable ref and the NAR hash. */
|
||||
struct AbstractDep
|
||||
struct AbstractInput
|
||||
{
|
||||
FlakeRef ref;
|
||||
Hash narHash;
|
||||
|
||||
AbstractDep(const FlakeRef & flakeRef, const Hash & narHash)
|
||||
AbstractInput(const FlakeRef & flakeRef, const Hash & narHash)
|
||||
: ref(flakeRef), narHash(narHash)
|
||||
{
|
||||
assert(ref.isImmutable());
|
||||
};
|
||||
|
||||
AbstractDep(const nlohmann::json & json);
|
||||
AbstractInput(const nlohmann::json & json);
|
||||
|
||||
nlohmann::json toJson() const;
|
||||
|
||||
|
@ -31,23 +31,23 @@ struct AbstractDep
|
|||
};
|
||||
|
||||
/* Lock file information about a non-flake input. */
|
||||
struct NonFlakeDep : AbstractDep
|
||||
struct NonFlakeInput : AbstractInput
|
||||
{
|
||||
using AbstractDep::AbstractDep;
|
||||
using AbstractInput::AbstractInput;
|
||||
|
||||
bool operator ==(const NonFlakeDep & other) const
|
||||
bool operator ==(const NonFlakeInput & other) const
|
||||
{
|
||||
return ref == other.ref && narHash == other.narHash;
|
||||
}
|
||||
};
|
||||
|
||||
struct FlakeDep;
|
||||
struct FlakeInput;
|
||||
|
||||
/* Lock file information about the dependencies of a flake. */
|
||||
struct FlakeInputs
|
||||
{
|
||||
std::map<FlakeRef, FlakeDep> flakeDeps;
|
||||
std::map<FlakeAlias, NonFlakeDep> nonFlakeDeps;
|
||||
std::map<FlakeRef, FlakeInput> flakeInputs;
|
||||
std::map<FlakeAlias, NonFlakeInput> nonFlakeInputs;
|
||||
|
||||
FlakeInputs() {};
|
||||
FlakeInputs(const nlohmann::json & json);
|
||||
|
@ -56,29 +56,29 @@ struct FlakeInputs
|
|||
};
|
||||
|
||||
/* Lock file information about a flake input. */
|
||||
struct FlakeDep : FlakeInputs, AbstractDep
|
||||
struct FlakeInput : FlakeInputs, AbstractInput
|
||||
{
|
||||
FlakeId id;
|
||||
|
||||
FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash)
|
||||
: AbstractDep(flakeRef, narHash), id(id) {};
|
||||
FlakeInput(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash)
|
||||
: AbstractInput(flakeRef, narHash), id(id) {};
|
||||
|
||||
FlakeDep(const nlohmann::json & json);
|
||||
FlakeInput(const nlohmann::json & json);
|
||||
|
||||
bool operator ==(const FlakeDep & other) const
|
||||
bool operator ==(const FlakeInput & other) const
|
||||
{
|
||||
return
|
||||
id == other.id
|
||||
&& ref == other.ref
|
||||
&& narHash == other.narHash
|
||||
&& flakeDeps == other.flakeDeps
|
||||
&& nonFlakeDeps == other.nonFlakeDeps;
|
||||
&& flakeInputs == other.flakeInputs
|
||||
&& nonFlakeInputs == other.nonFlakeInputs;
|
||||
}
|
||||
|
||||
nlohmann::json toJson() const;
|
||||
};
|
||||
|
||||
/* An entire lock file. Note that this cannot be a FlakeDep for the
|
||||
/* An entire lock file. Note that this cannot be a FlakeInput for the
|
||||
top-level flake, because then the lock file would need to contain
|
||||
the hash of the top-level flake, but committing the lock file
|
||||
would invalidate that hash. */
|
||||
|
@ -87,16 +87,16 @@ struct LockFile : FlakeInputs
|
|||
bool operator ==(const LockFile & other) const
|
||||
{
|
||||
return
|
||||
flakeDeps == other.flakeDeps
|
||||
&& nonFlakeDeps == other.nonFlakeDeps;
|
||||
flakeInputs == other.flakeInputs
|
||||
&& nonFlakeInputs == other.nonFlakeInputs;
|
||||
}
|
||||
|
||||
LockFile() {}
|
||||
LockFile(const nlohmann::json & json) : FlakeInputs(json) {}
|
||||
LockFile(FlakeDep && dep)
|
||||
LockFile(FlakeInput && dep)
|
||||
{
|
||||
flakeDeps = std::move(dep.flakeDeps);
|
||||
nonFlakeDeps = std::move(dep.nonFlakeDeps);
|
||||
flakeInputs = std::move(dep.flakeInputs);
|
||||
nonFlakeInputs = std::move(dep.nonFlakeInputs);
|
||||
}
|
||||
|
||||
nlohmann::json toJson() const;
|
||||
|
|
|
@ -206,11 +206,11 @@ void makeFlakeClosureGCRoot(Store & store,
|
|||
queue.pop();
|
||||
/* Note: due to lazy fetching, these paths might not exist
|
||||
yet. */
|
||||
for (auto & dep : flake.flakeDeps) {
|
||||
for (auto & dep : flake.flakeInputs) {
|
||||
closure.insert(dep.second.computeStorePath(store));
|
||||
queue.push(dep.second);
|
||||
}
|
||||
for (auto & dep : flake.nonFlakeDeps)
|
||||
for (auto & dep : flake.nonFlakeInputs)
|
||||
closure.insert(dep.second.computeStorePath(store));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue