forked from lix-project/lix
Respect lock files of inputs + fine-grained lock file control
When computing a lock file, we now respect the lock files of flake inputs. This is important for usability / reproducibility. For example, the 'nixops' flake depends on the 'nixops-aws' and 'nixops-hetzner' repositories. So when the 'nixops' flake is used in another flake, we want the versions of 'nixops-aws' and 'nixops-hetzner' locked by the the 'nixops' flake because those presumably have been tested. This can lead to a proliferation of versions of flakes like 'nixpkgs' (since every flake's lock file could depend on a different version of 'nixpkgs'). This is not a major issue when using Nixpkgs overlays or NixOS modules, since then the top-level flake composes those overlays/modules into *its* version of Nixpkgs and all other versions are ignored. Lock file computation has been made a bit more lazy so it won't try to fetch all those versions of 'nixpkgs'. However, in case it's necessary to minimize flake versions, there now are two input attributes that allow this. First, you can copy an input from another flake, as follows: inputs.nixpkgs.follows = "dwarffs/nixpkgs"; This states that the calling flake's 'nixpkgs' input shall be the same as the 'nixpkgs' input of the 'dwarffs' input. Second, you can override inputs of inputs: inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>; inputs.nixops.inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>; or equivalently, using 'follows': inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>; inputs.nixops.inputs.nixpkgs.follows = "nixpkgs"; This states that the 'nixpkgs' input of the 'nixops' input shall be the same as the calling flake's 'nixpkgs' input. Finally, at '-v' Nix now prints the changes to the lock file, e.g. $ nix flake update ~/Misc/eelco-configurations/hagbard inputs of flake 'git+file:///home/eelco/Misc/eelco-configurations?subdir=hagbard' changed: updated 'nixpkgs': 'github:edolstra/nixpkgs/7845bf5f4b3013df1cf036e9c9c3a55a30331db9' -> 'github:edolstra/nixpkgs/03f3def66a104a221aac8b751eeb7075374848fd' removed 'nixops' removed 'nixops/nixops-aws' removed 'nixops/nixops-hetzner' removed 'nixops/nixpkgs'
This commit is contained in:
parent
2b8ca654b0
commit
cc22cf662b
|
@ -4,6 +4,7 @@
|
|||
#include "eval-inline.hh"
|
||||
#include "store-api.hh"
|
||||
#include "fetchers/fetchers.hh"
|
||||
#include "fetchers/regex.hh"
|
||||
|
||||
#include <iostream>
|
||||
#include <ctime>
|
||||
|
@ -61,6 +62,78 @@ static void expectType(EvalState & state, ValueType type,
|
|||
showType(type), showType(value.type), pos);
|
||||
}
|
||||
|
||||
static InputPath parseInputPath(std::string_view s, const Pos & pos)
|
||||
{
|
||||
InputPath path;
|
||||
|
||||
for (auto & elem : tokenizeString<std::vector<std::string>>(s, "/")) {
|
||||
if (!std::regex_match(elem, fetchers::flakeIdRegex))
|
||||
throw Error("invalid flake input path element '%s' at %s", elem, pos);
|
||||
path.push_back(elem);
|
||||
}
|
||||
|
||||
if (path.empty())
|
||||
throw Error("flake input path is empty at %s", pos);
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
||||
EvalState & state, Value * value, const Pos & pos);
|
||||
|
||||
static FlakeInput parseFlakeInput(EvalState & state,
|
||||
const std::string & inputName, Value * value, const Pos & pos)
|
||||
{
|
||||
expectType(state, tAttrs, *value, pos);
|
||||
|
||||
FlakeInput input {
|
||||
.ref = parseFlakeRef(inputName)
|
||||
};
|
||||
|
||||
auto sInputs = state.symbols.create("inputs");
|
||||
auto sUrl = state.symbols.create("url");
|
||||
auto sUri = state.symbols.create("uri"); // FIXME: remove soon
|
||||
auto sFlake = state.symbols.create("flake");
|
||||
auto sFollows = state.symbols.create("follows");
|
||||
|
||||
for (Attr attr : *(value->attrs)) {
|
||||
if (attr.name == sUrl || attr.name == sUri) {
|
||||
expectType(state, tString, *attr.value, *attr.pos);
|
||||
input.ref = parseFlakeRef(attr.value->string.s);
|
||||
} else if (attr.name == sFlake) {
|
||||
expectType(state, tBool, *attr.value, *attr.pos);
|
||||
input.isFlake = attr.value->boolean;
|
||||
} else if (attr.name == sInputs) {
|
||||
input.overrides = parseFlakeInputs(state, attr.value, *attr.pos);
|
||||
} else if (attr.name == sFollows) {
|
||||
expectType(state, tString, *attr.value, *attr.pos);
|
||||
input.follows = parseInputPath(attr.value->string.s, *attr.pos);
|
||||
} else
|
||||
throw Error("flake input '%s' has an unsupported attribute '%s', at %s",
|
||||
inputName, attr.name, *attr.pos);
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
||||
EvalState & state, Value * value, const Pos & pos)
|
||||
{
|
||||
std::map<FlakeId, FlakeInput> inputs;
|
||||
|
||||
expectType(state, tAttrs, *value, pos);
|
||||
|
||||
for (Attr & inputAttr : *(*value).attrs) {
|
||||
inputs.emplace(inputAttr.name,
|
||||
parseFlakeInput(state,
|
||||
inputAttr.name,
|
||||
inputAttr.value,
|
||||
*inputAttr.pos));
|
||||
}
|
||||
|
||||
return inputs;
|
||||
}
|
||||
|
||||
static Flake getFlake(EvalState & state, const FlakeRef & originalRef,
|
||||
bool allowLookup, RefMap & refMap)
|
||||
{
|
||||
|
@ -72,7 +145,7 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef,
|
|||
|
||||
FlakeRef resolvedRef(resolvedInput, flakeRef.subdir);
|
||||
|
||||
debug("got flake source '%s' from flake URL '%s'",
|
||||
debug("got flake source '%s' from '%s'",
|
||||
state.store->printStorePath(sourceInfo.storePath), resolvedRef);
|
||||
|
||||
refMap.push_back({originalRef, resolvedRef});
|
||||
|
@ -124,33 +197,9 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef,
|
|||
}
|
||||
|
||||
auto sInputs = state.symbols.create("inputs");
|
||||
auto sUrl = state.symbols.create("url");
|
||||
auto sUri = state.symbols.create("uri"); // FIXME: remove soon
|
||||
auto sFlake = state.symbols.create("flake");
|
||||
|
||||
if (std::optional<Attr *> inputs = vInfo.attrs->get(sInputs)) {
|
||||
expectType(state, tAttrs, *(**inputs).value, *(**inputs).pos);
|
||||
|
||||
for (Attr inputAttr : *(*(**inputs).value).attrs) {
|
||||
expectType(state, tAttrs, *inputAttr.value, *inputAttr.pos);
|
||||
|
||||
FlakeInput input(parseFlakeRef(inputAttr.name));
|
||||
|
||||
for (Attr attr : *(inputAttr.value->attrs)) {
|
||||
if (attr.name == sUrl || attr.name == sUri) {
|
||||
expectType(state, tString, *attr.value, *attr.pos);
|
||||
input.ref = parseFlakeRef(attr.value->string.s);
|
||||
} else if (attr.name == sFlake) {
|
||||
expectType(state, tBool, *attr.value, *attr.pos);
|
||||
input.isFlake = attr.value->boolean;
|
||||
} else
|
||||
throw Error("flake input '%s' has an unsupported attribute '%s', at %s",
|
||||
inputAttr.name, attr.name, *attr.pos);
|
||||
}
|
||||
|
||||
flake.inputs.emplace(inputAttr.name, input);
|
||||
}
|
||||
}
|
||||
if (std::optional<Attr *> inputs = vInfo.attrs->get(sInputs))
|
||||
flake.inputs = parseFlakeInputs(state, (**inputs).value, *(**inputs).pos);
|
||||
|
||||
auto sOutputs = state.symbols.create("outputs");
|
||||
|
||||
|
@ -161,7 +210,9 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef,
|
|||
if (flake.vOutputs->lambda.fun->matchAttrs) {
|
||||
for (auto & formal : flake.vOutputs->lambda.fun->formals->formals) {
|
||||
if (formal.name != state.sSelf)
|
||||
flake.inputs.emplace(formal.name, FlakeInput(parseFlakeRef(formal.name)));
|
||||
flake.inputs.emplace(formal.name, FlakeInput {
|
||||
.ref = parseFlakeRef(formal.name)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,7 +252,7 @@ static std::pair<fetchers::Tree, FlakeRef> getNonFlake(
|
|||
|
||||
FlakeRef resolvedRef(resolvedInput, flakeRef.subdir);
|
||||
|
||||
debug("got non-flake source '%s' with flakeref %s",
|
||||
debug("got non-flake source '%s' from '%s'",
|
||||
state.store->printStorePath(sourceInfo.storePath), resolvedRef);
|
||||
|
||||
refMap.push_back({originalRef, resolvedRef});
|
||||
|
@ -224,70 +275,48 @@ bool allowedToUseRegistries(LockFileMode handle, bool isTopRef)
|
|||
else assert(false);
|
||||
}
|
||||
|
||||
/* Given a flakeref and its subtree of the lockfile, return an updated
|
||||
subtree of the lockfile. That is, if the 'flake.nix' of the
|
||||
referenced flake has inputs that don't have a corresponding entry
|
||||
in the lockfile, they're added to the lockfile; conversely, any
|
||||
lockfile entries that don't have a corresponding entry in flake.nix
|
||||
are removed.
|
||||
|
||||
Note that this is lazy: we only recursively fetch inputs that are
|
||||
not in the lockfile yet. */
|
||||
static std::pair<Flake, LockedInput> updateLocks(
|
||||
RefMap & refMap,
|
||||
const std::string & inputPath,
|
||||
EvalState & state,
|
||||
const Flake & flake,
|
||||
LockFileMode lockFileMode,
|
||||
const LockedInputs & oldEntry,
|
||||
bool topRef)
|
||||
static void flattenLockFile(
|
||||
const LockedInputs & inputs,
|
||||
const InputPath & prefix,
|
||||
std::map<InputPath, const LockedInput *> & res)
|
||||
{
|
||||
LockedInput newEntry(
|
||||
flake.resolvedRef,
|
||||
flake.originalRef,
|
||||
flake.sourceInfo->narHash);
|
||||
for (auto &[id, input] : inputs.inputs) {
|
||||
auto inputPath(prefix);
|
||||
inputPath.push_back(id);
|
||||
res.emplace(inputPath, &input);
|
||||
flattenLockFile(input, inputPath, res);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::function<void()>> postponed;
|
||||
static std::string diffLockFiles(const LockedInputs & oldLocks, const LockedInputs & newLocks)
|
||||
{
|
||||
std::map<InputPath, const LockedInput *> oldFlat, newFlat;
|
||||
flattenLockFile(oldLocks, {}, oldFlat);
|
||||
flattenLockFile(newLocks, {}, newFlat);
|
||||
|
||||
for (auto & [id, input] : flake.inputs) {
|
||||
auto inputPath2 = (inputPath.empty() ? "" : inputPath + "/") + id;
|
||||
auto i = oldEntry.inputs.find(id);
|
||||
if (i != oldEntry.inputs.end() && i->second.originalRef == input.ref) {
|
||||
newEntry.inputs.insert_or_assign(id, i->second);
|
||||
auto i = oldFlat.begin();
|
||||
auto j = newFlat.begin();
|
||||
std::string res;
|
||||
|
||||
while (i != oldFlat.end() || j != newFlat.end()) {
|
||||
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
|
||||
res += fmt(" added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->ref);
|
||||
++j;
|
||||
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
|
||||
res += fmt(" removed '%s'\n", concatStringsSep("/", i->first));
|
||||
++i;
|
||||
} else {
|
||||
if (lockFileMode == AllPure || lockFileMode == TopRefUsesRegistries)
|
||||
throw Error("cannot update flake input '%s' in pure mode", id);
|
||||
|
||||
auto warn = [&](const FlakeRef & resolvedRef, const fetchers::Tree & sourceInfo) {
|
||||
if (i == oldEntry.inputs.end())
|
||||
printInfo("mapped flake input '%s' to '%s'",
|
||||
inputPath2, resolvedRef);
|
||||
else
|
||||
printMsg(lvlWarn, "updated flake input '%s' from '%s' to '%s'",
|
||||
inputPath2, i->second.originalRef, resolvedRef);
|
||||
};
|
||||
|
||||
if (input.isFlake) {
|
||||
auto actualInput = getFlake(state, input.ref,
|
||||
allowedToUseRegistries(lockFileMode, false), refMap);
|
||||
warn(actualInput.resolvedRef, *actualInput.sourceInfo);
|
||||
postponed.push_back([&, id{id}, inputPath2, actualInput]() {
|
||||
newEntry.inputs.insert_or_assign(id,
|
||||
updateLocks(refMap, inputPath2, state, actualInput, lockFileMode, {}, false).second);
|
||||
});
|
||||
} else {
|
||||
auto [sourceInfo, resolvedRef] = getNonFlake(state, input.ref,
|
||||
allowedToUseRegistries(lockFileMode, false), refMap);
|
||||
warn(resolvedRef, sourceInfo);
|
||||
newEntry.inputs.insert_or_assign(id,
|
||||
LockedInput(resolvedRef, input.ref, sourceInfo.narHash));
|
||||
}
|
||||
if (!(i->second->ref == j->second->ref))
|
||||
res += fmt(" updated '%s': '%s' -> '%s'\n",
|
||||
concatStringsSep("/", i->first),
|
||||
i->second->ref,
|
||||
j->second->ref);
|
||||
++i;
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
for (auto & f : postponed) f();
|
||||
|
||||
return {flake, newEntry};
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Compute an in-memory lock file for the specified top-level flake,
|
||||
|
@ -299,8 +328,10 @@ LockedFlake lockFlake(
|
|||
{
|
||||
settings.requireExperimentalFeature("flakes");
|
||||
|
||||
RefMap refMap;
|
||||
|
||||
auto flake = getFlake(state, topRef,
|
||||
allowedToUseRegistries(lockFileMode, true));
|
||||
allowedToUseRegistries(lockFileMode, true), refMap);
|
||||
|
||||
LockFile oldLockFile;
|
||||
|
||||
|
@ -313,22 +344,171 @@ LockedFlake lockFlake(
|
|||
|
||||
debug("old lock file: %s", oldLockFile);
|
||||
|
||||
RefMap refMap;
|
||||
LockFile newLockFile, prevLockFile;
|
||||
std::vector<InputPath> prevUnresolved;
|
||||
|
||||
LockFile newLockFile(updateLocks(
|
||||
refMap, "", state, flake, lockFileMode, oldLockFile, true).second);
|
||||
// FIXME: check whether all overrides are used.
|
||||
std::map<InputPath, FlakeInput> overrides;
|
||||
|
||||
/* Compute the new lock file. This is dones as a fixpoint
|
||||
iteration: we repeat until the new lock file no longer changes
|
||||
and there are no unresolved "follows" inputs. */
|
||||
while (true) {
|
||||
std::vector<InputPath> unresolved;
|
||||
|
||||
/* Recurse into the flake inputs. */
|
||||
std::function<void(
|
||||
const FlakeInputs & flakeInputs,
|
||||
const LockedInputs & oldLocks,
|
||||
LockedInputs & newLocks,
|
||||
const InputPath & inputPathPrefix)>
|
||||
updateLocks;
|
||||
|
||||
updateLocks = [&](
|
||||
const FlakeInputs & flakeInputs,
|
||||
const LockedInputs & oldLocks,
|
||||
LockedInputs & newLocks,
|
||||
const InputPath & inputPathPrefix)
|
||||
{
|
||||
/* Get the overrides (i.e. attributes of the form
|
||||
'inputs.nixops.inputs.nixpkgs.url = ...'). */
|
||||
for (auto & [id, input] : flake.inputs) {
|
||||
for (auto & [idOverride, inputOverride] : input.overrides) {
|
||||
auto inputPath(inputPathPrefix);
|
||||
inputPath.push_back(id);
|
||||
inputPath.push_back(idOverride);
|
||||
overrides.insert_or_assign(inputPath, inputOverride);
|
||||
}
|
||||
}
|
||||
|
||||
/* Go over the flake inputs, resolve/fetch them if
|
||||
necessary (i.e. if they're new or the flakeref changed
|
||||
from what's in the lock file). */
|
||||
for (auto & [id, input2] : flakeInputs) {
|
||||
auto inputPath(inputPathPrefix);
|
||||
inputPath.push_back(id);
|
||||
auto inputPathS = concatStringsSep("/", inputPath);
|
||||
|
||||
/* Do we have an override for this input from one of
|
||||
the ancestors? */
|
||||
auto i = overrides.find(inputPath);
|
||||
bool hasOverride = i != overrides.end();
|
||||
auto & input = hasOverride ? i->second : input2;
|
||||
|
||||
if (input.follows) {
|
||||
/* This is a "follows" input
|
||||
(i.e. 'inputs.nixpkgs.follows =
|
||||
"dwarffs/nixpkgs"). Resolve the source and copy
|
||||
its inputs. Note that the source is normally
|
||||
relative to the current node of the lock file
|
||||
(e.g. "dwarffs/nixpkgs" refers to the nixpkgs
|
||||
input of the dwarffs input of the root flake),
|
||||
but if it's from an override, it's relative to
|
||||
the *root* of the lock file. */
|
||||
auto follows = (hasOverride ? newLockFile : newLocks).findInput(*input.follows);
|
||||
if (follows)
|
||||
newLocks.inputs.insert_or_assign(id, **follows);
|
||||
else
|
||||
/* We haven't processed the source of the
|
||||
"follows" yet (e.g. "dwarffs/nixpkgs"). So
|
||||
we'll need another round of the fixpoint
|
||||
iteration. */
|
||||
unresolved.push_back(inputPath);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto oldLock = oldLocks.inputs.find(id);
|
||||
|
||||
if (oldLock != oldLocks.inputs.end() && oldLock->second.originalRef == input.ref && !hasOverride) {
|
||||
/* Copy the input from the old lock file if its
|
||||
flakeref didn't change and there is no override
|
||||
from a higher level flake. */
|
||||
newLocks.inputs.insert_or_assign(id, oldLock->second);
|
||||
|
||||
/* However there may be new overrides on the
|
||||
inputs of this flake, so we need to check those
|
||||
(without fetching this flake - we need to be
|
||||
lazy). */
|
||||
FlakeInputs fakeInputs;
|
||||
|
||||
for (auto & i : oldLock->second.inputs) {
|
||||
fakeInputs.emplace(i.first, FlakeInput {
|
||||
.ref = i.second.originalRef
|
||||
});
|
||||
}
|
||||
|
||||
updateLocks(fakeInputs,
|
||||
oldLock->second,
|
||||
newLocks.inputs.find(id)->second,
|
||||
inputPath);
|
||||
|
||||
} else {
|
||||
/* We need to update/create a new lock file
|
||||
entry. So fetch the flake/non-flake. */
|
||||
if (lockFileMode == AllPure || lockFileMode == TopRefUsesRegistries)
|
||||
throw Error("cannot update flake input '%s' in pure mode", inputPathS);
|
||||
|
||||
if (input.isFlake) {
|
||||
auto inputFlake = getFlake(state, input.ref,
|
||||
allowedToUseRegistries(lockFileMode, false), refMap);
|
||||
|
||||
newLocks.inputs.insert_or_assign(id,
|
||||
LockedInput(inputFlake.resolvedRef, inputFlake.originalRef, inputFlake.sourceInfo->narHash));
|
||||
|
||||
/* Recursively process the inputs of this
|
||||
flake. Also, unless we already have this
|
||||
flake in the top-level lock file, use this
|
||||
flake's own lock file. */
|
||||
updateLocks(inputFlake.inputs,
|
||||
oldLock != oldLocks.inputs.end()
|
||||
? (const LockedInputs &) oldLock->second
|
||||
: LockFile::read(
|
||||
inputFlake.sourceInfo->actualPath + "/" + inputFlake.resolvedRef.subdir + "/flake.lock"),
|
||||
newLocks.inputs.find(id)->second,
|
||||
inputPath);
|
||||
}
|
||||
|
||||
else {
|
||||
auto [sourceInfo, resolvedRef] = getNonFlake(state, input.ref,
|
||||
allowedToUseRegistries(lockFileMode, false), refMap);
|
||||
newLocks.inputs.insert_or_assign(id,
|
||||
LockedInput(resolvedRef, input.ref, sourceInfo.narHash));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
updateLocks(flake.inputs, oldLockFile, newLockFile, {});
|
||||
|
||||
/* Check if there is a cycle in the "follows" inputs. */
|
||||
if (!unresolved.empty() && unresolved == prevUnresolved) {
|
||||
std::vector<std::string> ss;
|
||||
for (auto & i : unresolved)
|
||||
ss.push_back(concatStringsSep("/", i));
|
||||
throw Error("cycle or missing input detected in flake inputs: %s", concatStringsSep(", ", ss));
|
||||
}
|
||||
|
||||
std::swap(unresolved, prevUnresolved);
|
||||
|
||||
/* Done with the fixpoint iteration? */
|
||||
if (newLockFile == prevLockFile) break;
|
||||
prevLockFile = newLockFile;
|
||||
};
|
||||
|
||||
debug("new lock file: %s", newLockFile);
|
||||
|
||||
/* Check whether we need to / can write the new lock file. */
|
||||
if (!(newLockFile == oldLockFile)) {
|
||||
|
||||
if (!(oldLockFile == LockFile()))
|
||||
printInfo("inputs of flake '%s' changed:\n%s", topRef, chomp(diffLockFiles(oldLockFile, newLockFile)));
|
||||
|
||||
if (lockFileMode == UpdateLockFile || lockFileMode == RecreateLockFile) {
|
||||
if (auto sourcePath = topRef.input->getSourcePath()) {
|
||||
if (!newLockFile.isImmutable()) {
|
||||
if (settings.warnDirty)
|
||||
warn("will not write lock file of flake '%s' because it has a mutable input", topRef);
|
||||
} else {
|
||||
warn("updated lock file of flake '%s'", topRef);
|
||||
|
||||
newLockFile.write(*sourcePath + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
|
||||
|
||||
// FIXME: rewriting the lockfile changed the
|
||||
|
|
|
@ -22,11 +22,16 @@ enum LockFileMode : unsigned int
|
|||
, UseNewLockFile // `RecreateLockFile` without writing to file
|
||||
};
|
||||
|
||||
struct FlakeInput;
|
||||
|
||||
typedef std::map<FlakeId, FlakeInput> FlakeInputs;
|
||||
|
||||
struct FlakeInput
|
||||
{
|
||||
FlakeRef ref;
|
||||
bool isFlake = true;
|
||||
FlakeInput(const FlakeRef & ref) : ref(ref) {};
|
||||
std::optional<InputPath> follows;
|
||||
FlakeInputs overrides;
|
||||
};
|
||||
|
||||
struct Flake
|
||||
|
@ -35,7 +40,7 @@ struct Flake
|
|||
FlakeRef resolvedRef;
|
||||
std::optional<std::string> description;
|
||||
std::shared_ptr<const fetchers::Tree> sourceInfo;
|
||||
std::map<FlakeId, FlakeInput> inputs;
|
||||
FlakeInputs inputs;
|
||||
Value * vOutputs; // FIXME: gc
|
||||
unsigned int edition;
|
||||
~Flake();
|
||||
|
|
|
@ -77,7 +77,7 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
|
|||
std::regex::ECMAScript);
|
||||
|
||||
static std::regex flakeRegex(
|
||||
"((" + flakeId + ")(?:/(?:" + refAndOrRevRegex + "))?)"
|
||||
"((" + flakeIdRegexS + ")(?:/(?:" + refAndOrRevRegex + "))?)"
|
||||
+ "(?:#(" + queryRegex + "))?",
|
||||
std::regex::ECMAScript);
|
||||
|
||||
|
|
|
@ -55,6 +55,22 @@ bool LockedInputs::isImmutable() const
|
|||
return true;
|
||||
}
|
||||
|
||||
std::optional<LockedInput *> LockedInputs::findInput(const InputPath & path)
|
||||
{
|
||||
assert(!path.empty());
|
||||
|
||||
LockedInputs * pos = this;
|
||||
|
||||
for (auto & elem : path) {
|
||||
auto i = pos->inputs.find(elem);
|
||||
if (i == pos->inputs.end())
|
||||
return {};
|
||||
pos = &i->second;
|
||||
}
|
||||
|
||||
return (LockedInput *) pos;
|
||||
}
|
||||
|
||||
nlohmann::json LockFile::toJson() const
|
||||
{
|
||||
auto json = LockedInputs::toJson();
|
||||
|
|
|
@ -10,6 +10,8 @@ class Store;
|
|||
|
||||
namespace nix::flake {
|
||||
|
||||
typedef std::vector<FlakeId> InputPath;
|
||||
|
||||
struct LockedInput;
|
||||
|
||||
/* Lock file information about the dependencies of a flake. */
|
||||
|
@ -23,6 +25,8 @@ struct LockedInputs
|
|||
nlohmann::json toJson() const;
|
||||
|
||||
bool isImmutable() const;
|
||||
|
||||
std::optional<LockedInput *> findInput(const InputPath & path);
|
||||
};
|
||||
|
||||
/* Lock file information about a flake input. */
|
||||
|
|
|
@ -6,6 +6,7 @@ namespace nix::fetchers {
|
|||
|
||||
std::regex refRegex(refRegexS, std::regex::ECMAScript);
|
||||
std::regex revRegex(revRegexS, std::regex::ECMAScript);
|
||||
std::regex flakeIdRegex(flakeIdRegexS, std::regex::ECMAScript);
|
||||
|
||||
ParsedURL parseURL(const std::string & url)
|
||||
{
|
||||
|
|
|
@ -27,6 +27,7 @@ extern std::regex revRegex;
|
|||
// A ref or revision, or a ref followed by a revision.
|
||||
const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegexS + ")(?:/(" + revRegexS + "))?))";
|
||||
|
||||
const static std::string flakeId = "[a-zA-Z][a-zA-Z0-9_-]*";
|
||||
const static std::string flakeIdRegexS = "[a-zA-Z][a-zA-Z0-9_-]*";
|
||||
extern std::regex flakeIdRegex;
|
||||
|
||||
}
|
||||
|
|
|
@ -354,7 +354,9 @@ nix flake remove --flake-registry $registry flake1
|
|||
(cd $flake7Dir && nix flake init)
|
||||
git -C $flake7Dir add flake.nix
|
||||
nix flake --flake-registry $registry check $flake7Dir
|
||||
git -C $flake7Dir commit -m 'Initial'
|
||||
|
||||
# Test 'nix flake clone'.
|
||||
rm -rf $TEST_ROOT/flake1-v2
|
||||
nix flake clone --flake-registry $registry flake1 --dest $TEST_ROOT/flake1-v2
|
||||
[ -e $TEST_ROOT/flake1-v2/flake.nix ]
|
||||
|
@ -443,3 +445,77 @@ cat > $flake3Dir/flake.nix <<EOF
|
|||
EOF
|
||||
|
||||
(! nix flake check --flake-registry $registry $flake3Dir)
|
||||
|
||||
# Test 'follows' inputs.
|
||||
cat > $flake3Dir/flake.nix <<EOF
|
||||
{
|
||||
edition = 201909;
|
||||
|
||||
inputs.foo.url = flake:flake1;
|
||||
inputs.bar.follows = "foo";
|
||||
|
||||
outputs = { self, foo, bar }: {
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
nix flake update --flake-registry $registry $flake3Dir
|
||||
[[ $(jq .inputs.foo.url $flake3Dir/flake.lock) = $(jq .inputs.bar.url $flake3Dir/flake.lock) ]]
|
||||
|
||||
cat > $flake3Dir/flake.nix <<EOF
|
||||
{
|
||||
edition = 201909;
|
||||
|
||||
inputs.bar.follows = "flake2/flake1";
|
||||
|
||||
outputs = { self, flake2, bar }: {
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
nix flake update --flake-registry $registry $flake3Dir
|
||||
[[ $(jq .inputs.bar.url $flake3Dir/flake.lock) =~ flake1 ]]
|
||||
|
||||
cat > $flake3Dir/flake.nix <<EOF
|
||||
{
|
||||
edition = 201909;
|
||||
|
||||
inputs.bar.follows = "flake2";
|
||||
|
||||
outputs = { self, flake2, bar }: {
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
nix flake update --flake-registry $registry $flake3Dir
|
||||
[[ $(jq .inputs.bar.url $flake3Dir/flake.lock) =~ flake2 ]]
|
||||
|
||||
# Test overriding inputs of inputs.
|
||||
cat > $flake3Dir/flake.nix <<EOF
|
||||
{
|
||||
edition = 201909;
|
||||
|
||||
inputs.flake2.inputs.flake1.url = git+file://$flake7Dir;
|
||||
|
||||
outputs = { self, flake2 }: {
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
nix flake update --flake-registry $registry $flake3Dir
|
||||
[[ $(jq .inputs.flake2.inputs.flake1.url $flake3Dir/flake.lock) =~ flake7 ]]
|
||||
|
||||
cat > $flake3Dir/flake.nix <<EOF
|
||||
{
|
||||
edition = 201909;
|
||||
|
||||
inputs.flake2.inputs.flake1.follows = "foo";
|
||||
inputs.foo.url = git+file://$flake7Dir;
|
||||
|
||||
outputs = { self, flake2 }: {
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
||||
nix flake update --flake-registry $registry $flake3Dir --recreate-lock-file
|
||||
[[ $(jq .inputs.flake2.inputs.flake1.url $flake3Dir/flake.lock) =~ flake7 ]]
|
||||
|
|
Loading…
Reference in a new issue