forked from lix-project/lix
Merge pull request #5253 from edolstra/flake-ifd
Don't allow IFD in flakes by default
This commit is contained in:
commit
362d8f925e
|
@ -92,7 +92,7 @@ doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
|
|||
lowdown -sT man -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
|
||||
rm $$tmpFile; \
|
||||
done
|
||||
touch $@
|
||||
@touch $@
|
||||
|
||||
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(call rwildcard, $(d)/src, *.md)
|
||||
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual
|
||||
|
|
|
@ -52,7 +52,8 @@ void EvalState::realiseContext(const PathSet & context)
|
|||
if (drvs.empty()) return;
|
||||
|
||||
if (!evalSettings.enableImportFromDerivation)
|
||||
throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false",
|
||||
throw Error(
|
||||
"cannot build '%1%' during evaluation because the option 'allow-import-from-derivation' is disabled",
|
||||
store->printStorePath(drvs.begin()->drvPath));
|
||||
|
||||
/* For performance, prefetch all substitute info. */
|
||||
|
|
|
@ -52,9 +52,9 @@ void BinaryCacheStore::init()
|
|||
throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'",
|
||||
getUri(), value, storeDir);
|
||||
} else if (name == "WantMassQuery") {
|
||||
wantMassQuery.setDefault(value == "1" ? "true" : "false");
|
||||
wantMassQuery.setDefault(value == "1");
|
||||
} else if (name == "Priority") {
|
||||
priority.setDefault(fmt("%d", std::stoi(value)));
|
||||
priority.setDefault(std::stoi(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,8 +57,8 @@ public:
|
|||
{
|
||||
// FIXME: do this lazily?
|
||||
if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
|
||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
|
||||
priority.setDefault(fmt("%d", cacheInfo->priority));
|
||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
||||
priority.setDefault(cacheInfo->priority);
|
||||
} else {
|
||||
try {
|
||||
BinaryCacheStore::init();
|
||||
|
|
|
@ -232,8 +232,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
|
|||
void init() override
|
||||
{
|
||||
if (auto cacheInfo = diskCache->cacheExists(getUri())) {
|
||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
|
||||
priority.setDefault(fmt("%d", cacheInfo->priority));
|
||||
wantMassQuery.setDefault(cacheInfo->wantMassQuery);
|
||||
priority.setDefault(cacheInfo->priority);
|
||||
} else {
|
||||
BinaryCacheStore::init();
|
||||
diskCache->createCache(getUri(), storeDir, wantMassQuery, priority);
|
||||
|
|
|
@ -177,11 +177,6 @@ AbstractSetting::AbstractSetting(
|
|||
{
|
||||
}
|
||||
|
||||
void AbstractSetting::setDefault(const std::string & str)
|
||||
{
|
||||
if (!overridden) set(str);
|
||||
}
|
||||
|
||||
nlohmann::json AbstractSetting::toJSON()
|
||||
{
|
||||
return nlohmann::json(toJSONObject());
|
||||
|
|
|
@ -194,8 +194,6 @@ public:
|
|||
|
||||
bool overridden = false;
|
||||
|
||||
void setDefault(const std::string & str);
|
||||
|
||||
protected:
|
||||
|
||||
AbstractSetting(
|
||||
|
@ -253,6 +251,7 @@ public:
|
|||
bool operator !=(const T & v2) const { return value != v2; }
|
||||
void operator =(const T & v) { assign(v); }
|
||||
virtual void assign(const T & v) { value = v; }
|
||||
void setDefault(const T & v) { if (!overridden) value = v; }
|
||||
|
||||
void set(const std::string & str, bool append = false) override;
|
||||
|
||||
|
|
|
@ -131,8 +131,18 @@ static void enumerateOutputs(EvalState & state, Value & vFlake,
|
|||
|
||||
state.forceAttrs(*aOutputs->value);
|
||||
|
||||
for (auto & attr : *aOutputs->value->attrs)
|
||||
callback(attr.name, *attr.value, *attr.pos);
|
||||
auto sHydraJobs = state.symbols.create("hydraJobs");
|
||||
|
||||
/* Hack: ensure that hydraJobs is evaluated before anything
|
||||
else. This way we can disable IFD for hydraJobs and then enable
|
||||
it for other outputs. */
|
||||
if (auto attr = aOutputs->value->attrs->get(sHydraJobs))
|
||||
callback(attr->name, *attr->value, *attr->pos);
|
||||
|
||||
for (auto & attr : *aOutputs->value->attrs) {
|
||||
if (attr.name != sHydraJobs)
|
||||
callback(attr.name, *attr.value, *attr.pos);
|
||||
}
|
||||
}
|
||||
|
||||
struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
||||
|
@ -269,7 +279,10 @@ struct CmdFlakeCheck : FlakeCommand
|
|||
|
||||
void run(nix::ref<nix::Store> store) override
|
||||
{
|
||||
settings.readOnlyMode = !build;
|
||||
if (!build) {
|
||||
settings.readOnlyMode = true;
|
||||
evalSettings.enableImportFromDerivation.setDefault(false);
|
||||
}
|
||||
|
||||
auto state = getEvalState();
|
||||
|
||||
|
@ -381,9 +394,13 @@ struct CmdFlakeCheck : FlakeCommand
|
|||
|
||||
for (auto & attr : *v.attrs) {
|
||||
state->forceAttrs(*attr.value, *attr.pos);
|
||||
if (!state->isDerivation(*attr.value))
|
||||
checkHydraJobs(attrPath + "." + (std::string) attr.name,
|
||||
*attr.value, *attr.pos);
|
||||
auto attrPath2 = attrPath + "." + (std::string) attr.name;
|
||||
if (state->isDerivation(*attr.value)) {
|
||||
Activity act(*logger, lvlChatty, actUnknown,
|
||||
fmt("checking Hydra job '%s'", attrPath2));
|
||||
checkDerivation(attrPath2, *attr.value, *attr.pos);
|
||||
} else
|
||||
checkHydraJobs(attrPath2, *attr.value, *attr.pos);
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
|
@ -447,8 +464,8 @@ struct CmdFlakeCheck : FlakeCommand
|
|||
if (!v.isLambda())
|
||||
throw Error("bundler must be a function");
|
||||
if (!v.lambda.fun->formals ||
|
||||
v.lambda.fun->formals->argNames.find(state->symbols.create("program")) == v.lambda.fun->formals->argNames.end() ||
|
||||
v.lambda.fun->formals->argNames.find(state->symbols.create("system")) == v.lambda.fun->formals->argNames.end())
|
||||
!v.lambda.fun->formals->argNames.count(state->symbols.create("program")) ||
|
||||
!v.lambda.fun->formals->argNames.count(state->symbols.create("system")))
|
||||
throw Error("bundler must take formal arguments 'program' and 'system'");
|
||||
} catch (Error & e) {
|
||||
e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath));
|
||||
|
@ -469,6 +486,8 @@ struct CmdFlakeCheck : FlakeCommand
|
|||
fmt("checking flake output '%s'", name));
|
||||
|
||||
try {
|
||||
evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs");
|
||||
|
||||
state->forceValue(vOutput, pos);
|
||||
|
||||
if (name == "checks") {
|
||||
|
@ -603,7 +622,7 @@ struct CmdFlakeCheck : FlakeCommand
|
|||
store->buildPaths(drvPaths);
|
||||
}
|
||||
if (hasErrors)
|
||||
throw Error("Some errors were encountered during the evaluation");
|
||||
throw Error("some errors were encountered during the evaluation");
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -873,6 +892,8 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
|||
|
||||
void run(nix::ref<nix::Store> store) override
|
||||
{
|
||||
evalSettings.enableImportFromDerivation.setDefault(false);
|
||||
|
||||
auto state = getEvalState();
|
||||
auto flake = std::make_shared<LockedFlake>(lockFlake());
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ struct CmdSearch : InstallableCommand, MixJSON
|
|||
void run(ref<Store> store) override
|
||||
{
|
||||
settings.readOnlyMode = true;
|
||||
evalSettings.enableImportFromDerivation.setDefault(false);
|
||||
|
||||
// Empty search string should match all packages
|
||||
// Use "^" here instead of ".*" due to differences in resulting highlighting
|
||||
|
|
Loading…
Reference in a new issue